aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-09-15 15:08:05 +0200
committerJiri Kosina <jkosina@suse.cz>2011-09-15 15:08:18 +0200
commite060c38434b2caa78efe7cedaff4191040b65a15 (patch)
tree407361230bf6733f63d8e788e4b5e6566ee04818 /drivers
parent10e4ac572eeffe5317019bd7330b6058a400dfc2 (diff)
parentcc39c6a9bbdebfcf1a7dee64d83bf302bc38d941 (diff)
downloadkernel_samsung_smdk4412-e060c38434b2caa78efe7cedaff4191040b65a15.zip
kernel_samsung_smdk4412-e060c38434b2caa78efe7cedaff4191040b65a15.tar.gz
kernel_samsung_smdk4412-e060c38434b2caa78efe7cedaff4191040b65a15.tar.bz2
Merge branch 'master' into for-next
Fast-forward merge with Linus to be able to merge patches based on more recent version of the tree.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/acconfig.h2
-rw-r--r--drivers/acpi/acpica/acglobal.h6
-rw-r--r--drivers/acpi/acpica/aclocal.h1
-rw-r--r--drivers/acpi/acpica/acpredef.h1
-rw-r--r--drivers/acpi/acpica/nspredef.c19
-rw-r--r--drivers/acpi/acpica/nsrepair2.c15
-rw-r--r--drivers/acpi/acpica/tbinstal.c27
-rw-r--r--drivers/acpi/apei/Kconfig12
-rw-r--r--drivers/acpi/apei/apei-base.c35
-rw-r--r--drivers/acpi/apei/apei-internal.h15
-rw-r--r--drivers/acpi/apei/einj.c43
-rw-r--r--drivers/acpi/apei/erst-dbg.c6
-rw-r--r--drivers/acpi/apei/erst.c32
-rw-r--r--drivers/acpi/apei/ghes.c431
-rw-r--r--drivers/acpi/apei/hest.c17
-rw-r--r--drivers/acpi/battery.c86
-rw-r--r--drivers/acpi/bus.c14
-rw-r--r--drivers/acpi/dock.c4
-rw-r--r--drivers/acpi/ec_sys.c2
-rw-r--r--drivers/acpi/fan.c2
-rw-r--r--drivers/acpi/osl.c25
-rw-r--r--drivers/acpi/pci_irq.c58
-rw-r--r--drivers/acpi/pci_root.c3
-rw-r--r--drivers/acpi/processor_thermal.c2
-rw-r--r--drivers/acpi/sbs.c13
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/acpi/video.c9
-rw-r--r--drivers/ata/Kconfig9
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/libata-acpi.c4
-rw-r--r--drivers/ata/pata_imx.c253
-rw-r--r--drivers/ata/pata_via.c18
-rw-r--r--drivers/ata/sata_dwc_460ex.c14
-rw-r--r--drivers/ata/sata_sil.c2
-rw-r--r--drivers/atm/ambassador.c2
-rw-r--r--drivers/atm/atmtcp.c2
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/eni.h2
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/atm/horizon.c2
-rw-r--r--drivers/atm/idt77252.c2
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/atm/nicstar.c2
-rw-r--r--drivers/atm/suni.c2
-rw-r--r--drivers/atm/uPD98402.c2
-rw-r--r--drivers/atm/zatm.c2
-rw-r--r--drivers/base/devres.c1
-rw-r--r--drivers/base/devtmpfs.c4
-rw-r--r--drivers/base/firmware_class.c11
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/power/clock_ops.c40
-rw-r--r--drivers/base/power/domain.c33
-rw-r--r--drivers/base/power/opp.c2
-rw-r--r--drivers/base/power/runtime.c10
-rw-r--r--drivers/base/power/sysfs.c2
-rw-r--r--drivers/base/regmap/regmap-i2c.c1
-rw-r--r--drivers/base/regmap/regmap-spi.c3
-rw-r--r--drivers/base/regmap/regmap.c7
-rw-r--r--drivers/bcma/main.c12
-rw-r--r--drivers/block/Kconfig17
-rw-r--r--drivers/block/cciss_scsi.c2
-rw-r--r--drivers/block/drbd/drbd_nl.c4
-rw-r--r--drivers/block/loop.c297
-rw-r--r--drivers/block/rbd.c46
-rw-r--r--drivers/block/swim3.c1
-rw-r--r--drivers/block/xen-blkfront.c6
-rw-r--r--drivers/bluetooth/ath3k.c1
-rw-r--r--drivers/bluetooth/btusb.c13
-rw-r--r--drivers/cdrom/cdrom.c8
-rw-r--r--drivers/char/Kconfig11
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/hw_random/n2-drv.c29
-rw-r--r--drivers/char/hw_random/n2rng.h2
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c2
-rw-r--r--drivers/char/msm_smd_pkt.c5
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/ramoops.c101
-rw-r--r--drivers/char/random.c349
-rw-r--r--drivers/char/tile-srom.c481
-rw-r--r--drivers/char/tpm/tpm.c102
-rw-r--r--drivers/char/tpm/tpm.h7
-rw-r--r--drivers/char/tpm/tpm_nsc.c14
-rw-r--r--drivers/char/tpm/tpm_tis.c179
-rw-r--r--drivers/clk/Kconfig3
-rw-r--r--drivers/clocksource/sh_cmt.c34
-rw-r--r--drivers/connector/cn_proc.c11
-rw-r--r--drivers/cpufreq/cpufreq.c20
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c3
-rw-r--r--drivers/cpuidle/cpuidle.c50
-rw-r--r--drivers/cpuidle/cpuidle.h1
-rw-r--r--drivers/cpuidle/driver.c3
-rw-r--r--drivers/cpuidle/governor.c3
-rw-r--r--drivers/crypto/n2_core.c33
-rw-r--r--drivers/dma/TODO1
-rw-r--r--drivers/dma/amba-pl08x.c247
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/coh901318.c19
-rw-r--r--drivers/dma/dmaengine.c8
-rw-r--r--drivers/dma/ep93xx_dma.c2
-rw-r--r--drivers/dma/imx-sdma.c100
-rw-r--r--drivers/dma/intel_mid_dma.c2
-rw-r--r--drivers/dma/ioat/dma_v3.c8
-rw-r--r--drivers/dma/ioat/pci.c11
-rw-r--r--drivers/dma/ipu/ipu_idmac.c6
-rw-r--r--drivers/dma/mv_xor.c3
-rw-r--r--drivers/dma/mxs-dma.c13
-rw-r--r--drivers/dma/pch_dma.c127
-rw-r--r--drivers/dma/pl330.c64
-rw-r--r--drivers/dma/shdma.c88
-rw-r--r--drivers/dma/shdma.h4
-rw-r--r--drivers/dma/ste_dma40.c312
-rw-r--r--drivers/dma/ste_dma40_ll.h3
-rw-r--r--drivers/edac/edac_stub.c2
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/mpc85xx_edac.c4
-rw-r--r--drivers/eisa/pci_eisa.c4
-rw-r--r--drivers/firewire/core-card.c2
-rw-r--r--drivers/firewire/core-cdev.c24
-rw-r--r--drivers/firewire/core-device.c17
-rw-r--r--drivers/firewire/core-topology.c2
-rw-r--r--drivers/firewire/core.h2
-rw-r--r--drivers/firewire/nosy.c2
-rw-r--r--drivers/firewire/ohci.c9
-rw-r--r--drivers/firewire/sbp2.c4
-rw-r--r--drivers/firmware/efivars.c243
-rw-r--r--drivers/firmware/google/gsmi.c2
-rw-r--r--drivers/gpio/Kconfig22
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/gpio-ab8500.c2
-rw-r--r--drivers/gpio/gpio-generic.c15
-rw-r--r--drivers/gpio/gpio-msm-v1.c636
-rw-r--r--drivers/gpio/gpio-msm-v2.c433
-rw-r--r--drivers/gpio/gpio-tps65912.c156
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c5
-rw-r--r--drivers/gpu/drm/drm_debugfs.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c37
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c1
-rw-r--r--drivers/gpu/drm/drm_gem.c47
-rw-r--r--drivers/gpu/drm/drm_irq.c26
-rw-r--r--drivers/gpu/drm/drm_modes.c87
-rw-r--r--drivers/gpu/drm/drm_platform.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c232
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c69
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h51
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c193
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c39
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c22
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h59
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c13
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c142
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1014
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c135
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h35
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c166
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c90
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c16
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c6
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c76
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c13
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c46
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c50
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c232
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c167
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c34
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h78
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c148
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c119
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c98
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c114
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c17
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c28
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c112
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mpeg.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mpeg.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c41
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c2
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_copy.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fb.c68
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c270
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.fuc400
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c239
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc474
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h483
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc808
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h838
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c14
-rw-r--r--drivers/gpu/drm/radeon/Makefile1
-rw-r--r--drivers/gpu/drm/radeon/atom.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c34
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c37
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c59
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h2
-rw-r--r--drivers/gpu/drm/radeon/ni.c5
-rw-r--r--drivers/gpu/drm/radeon/r600.c15
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c23
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon.h27
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c130
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c70
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c23
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman2
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen3
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6001
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c18
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-apple.c6
-rw-r--r--drivers/hid/hid-core.c3
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-magicmouse.c66
-rw-r--r--drivers/hid/hid-wacom.c22
-rw-r--r--drivers/hid/hid-wiimote.c277
-rw-r--r--drivers/hid/usbhid/hid-quirks.c2
-rw-r--r--drivers/hwmon/Kconfig127
-rw-r--r--drivers/hwmon/Makefile13
-rw-r--r--drivers/hwmon/coretemp.c177
-rw-r--r--drivers/hwmon/i5k_amb.c42
-rw-r--r--drivers/hwmon/ibmaem.c15
-rw-r--r--drivers/hwmon/lm90.c65
-rw-r--r--drivers/hwmon/lm95241.c31
-rw-r--r--drivers/hwmon/lm95245.c543
-rw-r--r--drivers/hwmon/max16065.c2
-rw-r--r--drivers/hwmon/max1668.c502
-rw-r--r--drivers/hwmon/ntc_thermistor.c452
-rw-r--r--drivers/hwmon/pmbus/Kconfig100
-rw-r--r--drivers/hwmon/pmbus/Makefile13
-rw-r--r--drivers/hwmon/pmbus/adm1275.c (renamed from drivers/hwmon/adm1275.c)66
-rw-r--r--drivers/hwmon/pmbus/lm25066.c352
-rw-r--r--drivers/hwmon/pmbus/max16064.c (renamed from drivers/hwmon/max16064.c)57
-rw-r--r--drivers/hwmon/pmbus/max34440.c (renamed from drivers/hwmon/max34440.c)81
-rw-r--r--drivers/hwmon/pmbus/max8688.c (renamed from drivers/hwmon/max8688.c)69
-rw-r--r--drivers/hwmon/pmbus/pmbus.c (renamed from drivers/hwmon/pmbus.c)37
-rw-r--r--drivers/hwmon/pmbus/pmbus.h (renamed from drivers/hwmon/pmbus.h)49
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c (renamed from drivers/hwmon/pmbus_core.c)371
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c (renamed from drivers/hwmon/ucd9000.c)6
-rw-r--r--drivers/hwmon/pmbus/ucd9200.c (renamed from drivers/hwmon/ucd9200.c)6
-rw-r--r--drivers/hwmon/sht15.c2
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c41
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c9
-rw-r--r--drivers/i2c/busses/i2c-omap.c29
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c5
-rw-r--r--drivers/i2c/busses/i2c-tegra.c77
-rw-r--r--drivers/ide/cy82c693.c2
-rw-r--r--drivers/ide/ide_platform.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_tools.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c8
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c10
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c4
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c2
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/joystick/xpad.c31
-rw-r--r--drivers/input/keyboard/adp5588-keys.c1
-rw-r--r--drivers/input/keyboard/adp5589-keys.c1
-rw-r--r--drivers/input/keyboard/atkbd.c4
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c1
-rw-r--r--drivers/input/keyboard/gpio_keys.c166
-rw-r--r--drivers/input/keyboard/lm8323.c32
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c16
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c8
-rw-r--r--drivers/input/keyboard/qt1070.c2
-rw-r--r--drivers/input/keyboard/sh_keysc.c2
-rw-r--r--drivers/input/keyboard/tegra-kbc.c9
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c2
-rw-r--r--drivers/input/misc/Kconfig51
-rw-r--r--drivers/input/misc/Makefile5
-rw-r--r--drivers/input/misc/ad714x-i2c.c81
-rw-r--r--drivers/input/misc/ad714x-spi.c68
-rw-r--r--drivers/input/misc/ad714x.c116
-rw-r--r--drivers/input/misc/ad714x.h35
-rw-r--r--drivers/input/misc/bfin_rotary.c1
-rw-r--r--drivers/input/misc/kxtj9.c672
-rw-r--r--drivers/input/misc/mma8450.c264
-rw-r--r--drivers/input/misc/mpu3050.c376
-rw-r--r--drivers/input/misc/twl4030-vibra.c12
-rw-r--r--drivers/input/misc/twl6040-vibra.c423
-rw-r--r--drivers/input/misc/xen-kbdfront.c2
-rw-r--r--drivers/input/mouse/bcm5974.c40
-rw-r--r--drivers/input/mouse/gpio_mouse.c2
-rw-r--r--drivers/input/mouse/hgpk.c1
-rw-r--r--drivers/input/mouse/lifebook.c4
-rw-r--r--drivers/input/mouse/pxa930_trkball.c1
-rw-r--r--drivers/input/mouse/sentelic.c1
-rw-r--r--drivers/input/mouse/synaptics.c107
-rw-r--r--drivers/input/mouse/synaptics.h18
-rw-r--r--drivers/input/serio/at32psif.c2
-rw-r--r--drivers/input/serio/hp_sdc.c2
-rw-r--r--drivers/input/serio/xilinx_ps2.c2
-rw-r--r--drivers/input/tablet/aiptek.c1
-rw-r--r--drivers/input/tablet/wacom_sys.c17
-rw-r--r--drivers/input/tablet/wacom_wac.c72
-rw-r--r--drivers/input/touchscreen/ad7879.c4
-rw-r--r--drivers/input/touchscreen/ads7846.c15
-rw-r--r--drivers/input/touchscreen/atmel-wm97xx.c4
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c202
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c8
-rw-r--r--drivers/input/touchscreen/intel-mid-touch.c15
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c6
-rw-r--r--drivers/input/touchscreen/max11801_ts.c3
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c3
-rw-r--r--drivers/input/touchscreen/wm9705.c25
-rw-r--r--drivers/input/touchscreen/wm9712.c27
-rw-r--r--drivers/input/touchscreen/wm9713.c25
-rw-r--r--drivers/input/touchscreen/zylonite-wm97xx.c6
-rw-r--r--drivers/iommu/amd_iommu.c18
-rw-r--r--drivers/isdn/gigaset/gigaset.h2
-rw-r--r--drivers/isdn/i4l/isdn_net.c3
-rw-r--r--drivers/leds/leds-ams-delta.c1
-rw-r--r--drivers/leds/leds-bd2802.c5
-rw-r--r--drivers/leds/leds-hp6xx.c1
-rw-r--r--drivers/leds/ledtrig-timer.c2
-rw-r--r--drivers/md/Kconfig5
-rw-r--r--drivers/md/bitmap.c137
-rw-r--r--drivers/md/bitmap.h5
-rw-r--r--drivers/md/dm-crypt.c64
-rw-r--r--drivers/md/dm-flakey.c270
-rw-r--r--drivers/md/dm-io.c29
-rw-r--r--drivers/md/dm-ioctl.c89
-rw-r--r--drivers/md/dm-kcopyd.c44
-rw-r--r--drivers/md/dm-log-userspace-base.c3
-rw-r--r--drivers/md/dm-log.c32
-rw-r--r--drivers/md/dm-mpath.c149
-rw-r--r--drivers/md/dm-queue-length.c2
-rw-r--r--drivers/md/dm-raid.c621
-rw-r--r--drivers/md/dm-snap-persistent.c80
-rw-r--r--drivers/md/dm-snap.c84
-rw-r--r--drivers/md/dm-table.c157
-rw-r--r--drivers/md/dm.c75
-rw-r--r--drivers/md/dm.h2
-rw-r--r--drivers/md/linear.h2
-rw-r--r--drivers/md/md.c897
-rw-r--r--drivers/md/md.h110
-rw-r--r--drivers/md/raid1.c966
-rw-r--r--drivers/md/raid1.h26
-rw-r--r--drivers/md/raid10.c1204
-rw-r--r--drivers/md/raid10.h21
-rw-r--r--drivers/md/raid5.c1015
-rw-r--r--drivers/md/raid5.h99
-rw-r--r--drivers/media/Kconfig14
-rw-r--r--drivers/media/common/tuners/Kconfig10
-rw-r--r--drivers/media/common/tuners/Makefile1
-rw-r--r--drivers/media/common/tuners/tuner-types.c4
-rw-r--r--drivers/media/common/tuners/xc4000.c1691
-rw-r--r--drivers/media/common/tuners/xc4000.h67
-rw-r--r--drivers/media/dvb/Kconfig4
-rw-r--r--drivers/media/dvb/Makefile3
-rw-r--r--drivers/media/dvb/bt8xx/dvb-bt8xx.c4
-rw-r--r--drivers/media/dvb/ddbridge/Kconfig18
-rw-r--r--drivers/media/dvb/ddbridge/Makefile14
-rw-r--r--drivers/media/dvb/ddbridge/ddbridge-core.c1719
-rw-r--r--drivers/media/dvb/ddbridge/ddbridge-regs.h151
-rw-r--r--drivers/media/dvb/ddbridge/ddbridge.h187
-rw-r--r--drivers/media/dvb/dvb-core/Makefile4
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c3
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.h21
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig1
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.c135
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.h1
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c69
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.h16
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c188
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h3
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb.h2
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk.h3
-rw-r--r--drivers/media/dvb/dvb-usb/technisat-usb2.c4
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045.c26
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045.h3
-rw-r--r--drivers/media/dvb/firewire/firedtv-avc.c2
-rw-r--r--drivers/media/dvb/firewire/firedtv-ci.c34
-rw-r--r--drivers/media/dvb/frontends/Kconfig21
-rw-r--r--drivers/media/dvb/frontends/Makefile3
-rw-r--r--drivers/media/dvb/frontends/au8522_decoder.c2
-rw-r--r--drivers/media/dvb/frontends/cx24113.c20
-rw-r--r--drivers/media/dvb/frontends/cx24116.c6
-rw-r--r--drivers/media/dvb/frontends/cxd2820r.h4
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_core.c22
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_priv.h4
-rw-r--r--drivers/media/dvb/frontends/dib7000p.c5
-rw-r--r--drivers/media/dvb/frontends/drxd_hard.c9
-rw-r--r--drivers/media/dvb/frontends/drxk.h47
-rw-r--r--drivers/media/dvb/frontends/drxk_hard.c6454
-rw-r--r--drivers/media/dvb/frontends/drxk_hard.h348
-rw-r--r--drivers/media/dvb/frontends/drxk_map.h449
-rw-r--r--drivers/media/dvb/frontends/itd1000.c25
-rw-r--r--drivers/media/dvb/frontends/nxt6000.c2
-rw-r--r--drivers/media/dvb/frontends/s5h1420.c12
-rw-r--r--drivers/media/dvb/frontends/tda18271c2dd.c1251
-rw-r--r--drivers/media/dvb/frontends/tda18271c2dd.h16
-rw-r--r--drivers/media/dvb/frontends/tda18271c2dd_maps.h814
-rw-r--r--drivers/media/dvb/ngene/Kconfig2
-rw-r--r--drivers/media/dvb/ngene/ngene-cards.c182
-rw-r--r--drivers/media/dvb/ngene/ngene-core.c26
-rw-r--r--drivers/media/dvb/ngene/ngene-dvb.c46
-rw-r--r--drivers/media/dvb/ngene/ngene.h7
-rw-r--r--drivers/media/dvb/siano/smscoreapi.c2
-rw-r--r--drivers/media/dvb/siano/smscoreapi.h1
-rw-r--r--drivers/media/radio/dsbr100.c7
-rw-r--r--drivers/media/radio/radio-aimslab.c5
-rw-r--r--drivers/media/radio/radio-aztech.c5
-rw-r--r--drivers/media/radio/radio-cadet.c5
-rw-r--r--drivers/media/radio/radio-gemtek.c7
-rw-r--r--drivers/media/radio/radio-maxiradio.c10
-rw-r--r--drivers/media/radio/radio-mr800.c6
-rw-r--r--drivers/media/radio/radio-rtrack2.c5
-rw-r--r--drivers/media/radio/radio-sf16fmi.c5
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c531
-rw-r--r--drivers/media/radio/radio-tea5764.c8
-rw-r--r--drivers/media/radio/radio-terratec.c5
-rw-r--r--drivers/media/radio/radio-timb.c3
-rw-r--r--drivers/media/radio/radio-trust.c5
-rw-r--r--drivers/media/radio/radio-typhoon.c9
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/radio-zoltrix.c5
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c4
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c6
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h1
-rw-r--r--drivers/media/radio/wl128x/fmdrv.h5
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c3
-rw-r--r--drivers/media/rc/Kconfig11
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/ene_ir.c4
-rw-r--r--drivers/media/rc/ene_ir.h2
-rw-r--r--drivers/media/rc/ir-lirc-codec.c15
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c449
-rw-r--r--drivers/media/rc/ir-raw.c1
-rw-r--r--drivers/media/rc/ite-cir.c5
-rw-r--r--drivers/media/rc/keymaps/rc-rc6-mce.c3
-rw-r--r--drivers/media/rc/mceusb.c10
-rw-r--r--drivers/media/rc/nuvoton-cir.c57
-rw-r--r--drivers/media/rc/nuvoton-cir.h1
-rw-r--r--drivers/media/rc/rc-core-priv.h18
-rw-r--r--drivers/media/rc/rc-loopback.c13
-rw-r--r--drivers/media/rc/rc-main.c4
-rw-r--r--drivers/media/rc/redrat3.c63
-rw-r--r--drivers/media/rc/winbond-cir.c28
-rw-r--r--drivers/media/video/Kconfig44
-rw-r--r--drivers/media/video/Makefile8
-rw-r--r--drivers/media/video/adp1653.c491
-rw-r--r--drivers/media/video/arv.c5
-rw-r--r--drivers/media/video/atmel-isi.c1048
-rw-r--r--drivers/media/video/au0828/au0828-core.c1
-rw-r--r--drivers/media/video/au0828/au0828-video.c5
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c7
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c14
-rw-r--r--drivers/media/video/bt8xx/bttvp.h3
-rw-r--r--drivers/media/video/bw-qcam.c4
-rw-r--r--drivers/media/video/c-qcam.c4
-rw-r--r--drivers/media/video/cafe_ccic-regs.h166
-rw-r--r--drivers/media/video/cafe_ccic.c2267
-rw-r--r--drivers/media/video/cpia2/cpia2.h5
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c12
-rw-r--r--drivers/media/video/cx18/cx18-alsa-main.c1
-rw-r--r--drivers/media/video/cx18/cx18-driver.h1
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c1
-rw-r--r--drivers/media/video/cx18/cx18-version.h8
-rw-r--r--drivers/media/video/cx231xx/cx231xx-avcore.c4
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c78
-rw-r--r--drivers/media/video/cx231xx/cx231xx-core.c4
-rw-r--r--drivers/media/video/cx231xx/cx231xx-video.c29
-rw-r--r--drivers/media/video/cx231xx/cx231xx.h5
-rw-r--r--drivers/media/video/cx23885/altera-ci.c1
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c1
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c70
-rw-r--r--drivers/media/video/cx23885/cx23885-core.c13
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c23
-rw-r--r--drivers/media/video/cx23885/cx23885-input.c6
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c1
-rw-r--r--drivers/media/video/cx23885/cx23885.h4
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c19
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c20
-rw-r--r--drivers/media/video/cx88/cx88-cards.c150
-rw-r--r--drivers/media/video/cx88/cx88-core.c11
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c77
-rw-r--r--drivers/media/video/cx88/cx88-input.c4
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c35
-rw-r--r--drivers/media/video/cx88/cx88-video.c65
-rw-r--r--drivers/media/video/cx88/cx88.h7
-rw-r--r--drivers/media/video/davinci/Kconfig23
-rw-r--r--drivers/media/video/davinci/Makefile2
-rw-r--r--drivers/media/video/davinci/vpbe.c864
-rw-r--r--drivers/media/video/davinci/vpbe_display.c1860
-rw-r--r--drivers/media/video/davinci/vpbe_osd.c1231
-rw-r--r--drivers/media/video/davinci/vpbe_osd_regs.h364
-rw-r--r--drivers/media/video/davinci/vpbe_venc.c566
-rw-r--r--drivers/media/video/davinci/vpbe_venc_regs.h177
-rw-r--r--drivers/media/video/davinci/vpif_capture.c9
-rw-r--r--drivers/media/video/davinci/vpif_capture.h7
-rw-r--r--drivers/media/video/davinci/vpif_display.c9
-rw-r--r--drivers/media/video/davinci/vpif_display.h8
-rw-r--r--drivers/media/video/em28xx/Kconfig12
-rw-r--r--drivers/media/video/em28xx/Makefile6
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c251
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c159
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c84
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c126
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c17
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c1
-rw-r--r--drivers/media/video/em28xx/em28xx-reg.h1
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c14
-rw-r--r--drivers/media/video/em28xx/em28xx.h24
-rw-r--r--drivers/media/video/et61x251/et61x251.h1
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c16
-rw-r--r--drivers/media/video/fsl-viu.c10
-rw-r--r--drivers/media/video/gspca/Kconfig10
-rw-r--r--drivers/media/video/gspca/Makefile2
-rw-r--r--drivers/media/video/gspca/gl860/gl860.h1
-rw-r--r--drivers/media/video/gspca/gspca.c23
-rw-r--r--drivers/media/video/gspca/ov519.c133
-rw-r--r--drivers/media/video/gspca/se401.c774
-rw-r--r--drivers/media/video/gspca/se401.h90
-rw-r--r--drivers/media/video/gspca/sonixj.c6
-rw-r--r--drivers/media/video/gspca/sunplus.c3
-rw-r--r--drivers/media/video/gspca/t613.c2
-rw-r--r--drivers/media/video/hdpvr/hdpvr-core.c3
-rw-r--r--drivers/media/video/hdpvr/hdpvr-video.c2
-rw-r--r--drivers/media/video/hdpvr/hdpvr.h6
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h1
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c19
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c5
-rw-r--r--drivers/media/video/ivtv/ivtv-version.h7
-rw-r--r--drivers/media/video/m5mols/m5mols_capture.c1
-rw-r--r--drivers/media/video/m5mols/m5mols_core.c1
-rw-r--r--drivers/media/video/marvell-ccic/Kconfig23
-rw-r--r--drivers/media/video/marvell-ccic/Makefile6
-rw-r--r--drivers/media/video/marvell-ccic/cafe-driver.c654
-rw-r--r--drivers/media/video/marvell-ccic/mcam-core.c1843
-rw-r--r--drivers/media/video/marvell-ccic/mcam-core.h323
-rw-r--r--drivers/media/video/marvell-ccic/mmp-driver.c340
-rw-r--r--drivers/media/video/mem2mem_testdev.c4
-rw-r--r--drivers/media/video/mt9m001.c14
-rw-r--r--drivers/media/video/mt9m111.c359
-rw-r--r--drivers/media/video/mt9t031.c3
-rw-r--r--drivers/media/video/mt9t112.c10
-rw-r--r--drivers/media/video/mt9v011.c85
-rw-r--r--drivers/media/video/mt9v022.c10
-rw-r--r--drivers/media/video/mt9v032.c20
-rw-r--r--drivers/media/video/mx1_camera.c47
-rw-r--r--drivers/media/video/mx2_camera.c66
-rw-r--r--drivers/media/video/mx3_camera.c71
-rw-r--r--drivers/media/video/omap/Kconfig7
-rw-r--r--drivers/media/video/omap/Makefile1
-rw-r--r--drivers/media/video/omap/omap_vout.c645
-rw-r--r--drivers/media/video/omap/omap_vout_vrfb.c390
-rw-r--r--drivers/media/video/omap/omap_vout_vrfb.h40
-rw-r--r--drivers/media/video/omap/omap_voutdef.h78
-rw-r--r--drivers/media/video/omap/omap_voutlib.c46
-rw-r--r--drivers/media/video/omap/omap_voutlib.h12
-rw-r--r--drivers/media/video/omap1_camera.c57
-rw-r--r--drivers/media/video/omap24xxcam.c9
-rw-r--r--drivers/media/video/omap3isp/isp.c1
-rw-r--r--drivers/media/video/omap3isp/isp.h6
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c7
-rw-r--r--drivers/media/video/omap3isp/ispccp2.c27
-rw-r--r--drivers/media/video/omap3isp/ispccp2.h1
-rw-r--r--drivers/media/video/omap3isp/ispstat.c3
-rw-r--r--drivers/media/video/omap3isp/ispvideo.c1
-rw-r--r--drivers/media/video/omap3isp/ispvideo.h3
-rw-r--r--drivers/media/video/ov2640.c13
-rw-r--r--drivers/media/video/ov5642.c1012
-rw-r--r--drivers/media/video/ov7670.c3
-rw-r--r--drivers/media/video/ov7670.h20
-rw-r--r--drivers/media/video/ov772x.c10
-rw-r--r--drivers/media/video/ov9640.c13
-rw-r--r--drivers/media/video/ov9740.c556
-rw-r--r--drivers/media/video/pms.c4
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-main.c1
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c9
-rw-r--r--drivers/media/video/pwc/Kconfig1
-rw-r--r--drivers/media/video/pwc/pwc-ctrl.c803
-rw-r--r--drivers/media/video/pwc/pwc-dec1.c28
-rw-r--r--drivers/media/video/pwc/pwc-dec1.h8
-rw-r--r--drivers/media/video/pwc/pwc-dec23.c22
-rw-r--r--drivers/media/video/pwc/pwc-dec23.h10
-rw-r--r--drivers/media/video/pwc/pwc-if.c1259
-rw-r--r--drivers/media/video/pwc/pwc-ioctl.h323
-rw-r--r--drivers/media/video/pwc/pwc-kiara.c1
-rw-r--r--drivers/media/video/pwc/pwc-misc.c4
-rw-r--r--drivers/media/video/pwc/pwc-uncompress.c17
-rw-r--r--drivers/media/video/pwc/pwc-uncompress.h40
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c1257
-rw-r--r--drivers/media/video/pwc/pwc.h409
-rw-r--r--drivers/media/video/pxa_camera.c92
-rw-r--r--drivers/media/video/rj54n1cb0c.c7
-rw-r--r--drivers/media/video/s2255drv.c35
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c2
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c3
-rw-r--r--drivers/media/video/s5p-mfc/Makefile5
-rw-r--r--drivers/media/video/s5p-mfc/regs-mfc.h413
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc.c1274
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_cmd.c120
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_cmd.h30
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_common.h572
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c343
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_ctrl.h29
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_debug.h48
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_dec.c1036
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_dec.h23
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.c1829
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.h23
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_intr.c92
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_intr.h26
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_opr.c1397
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_opr.h91
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_pm.c117
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_pm.h24
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_shm.c47
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_shm.h91
-rw-r--r--drivers/media/video/s5p-tv/Kconfig76
-rw-r--r--drivers/media/video/s5p-tv/Makefile17
-rw-r--r--drivers/media/video/s5p-tv/hdmi_drv.c1042
-rw-r--r--drivers/media/video/s5p-tv/hdmiphy_drv.c188
-rw-r--r--drivers/media/video/s5p-tv/mixer.h354
-rw-r--r--drivers/media/video/s5p-tv/mixer_drv.c487
-rw-r--r--drivers/media/video/s5p-tv/mixer_grp_layer.c185
-rw-r--r--drivers/media/video/s5p-tv/mixer_reg.c541
-rw-r--r--drivers/media/video/s5p-tv/mixer_video.c1006
-rw-r--r--drivers/media/video/s5p-tv/mixer_vp_layer.c211
-rw-r--r--drivers/media/video/s5p-tv/regs-hdmi.h141
-rw-r--r--drivers/media/video/s5p-tv/regs-mixer.h121
-rw-r--r--drivers/media/video/s5p-tv/regs-sdo.h63
-rw-r--r--drivers/media/video/s5p-tv/regs-vp.h88
-rw-r--r--drivers/media/video/s5p-tv/sdo_drv.c479
-rw-r--r--drivers/media/video/saa7115.c4
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c13
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c12
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c25
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c1
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c2
-rw-r--r--drivers/media/video/saa7134/saa7134.h3
-rw-r--r--drivers/media/video/saa7164/saa7164-encoder.c6
-rw-r--r--drivers/media/video/saa7164/saa7164-vbi.c6
-rw-r--r--drivers/media/video/saa7164/saa7164.h1
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c207
-rw-r--r--drivers/media/video/sh_mobile_csi2.c135
-rw-r--r--drivers/media/video/sh_vou.c3
-rw-r--r--drivers/media/video/sn9c102/sn9c102.h1
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c16
-rw-r--r--drivers/media/video/soc_camera.c281
-rw-r--r--drivers/media/video/soc_camera_platform.c10
-rw-r--r--drivers/media/video/sr030pc30.c7
-rw-r--r--drivers/media/video/tda7432.c5
-rw-r--r--drivers/media/video/timblogiw.c1
-rw-r--r--drivers/media/video/tlg2300/pd-common.h1
-rw-r--r--drivers/media/video/tlg2300/pd-dvb.c2
-rw-r--r--drivers/media/video/tlg2300/pd-main.c1
-rw-r--r--drivers/media/video/tlg2300/pd-radio.c2
-rw-r--r--drivers/media/video/tuner-core.c18
-rw-r--r--drivers/media/video/tw9910.c21
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c12
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c6
-rw-r--r--drivers/media/video/uvc/uvc_driver.c12
-rw-r--r--drivers/media/video/uvc/uvc_queue.c2
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c6
-rw-r--r--drivers/media/video/uvc/uvc_video.c2
-rw-r--r--drivers/media/video/uvc/uvcvideo.h3
-rw-r--r--drivers/media/video/v4l2-common.c3
-rw-r--r--drivers/media/video/v4l2-compat-ioctl32.c37
-rw-r--r--drivers/media/video/v4l2-ctrls.c826
-rw-r--r--drivers/media/video/v4l2-device.c1
-rw-r--r--drivers/media/video/v4l2-event.c282
-rw-r--r--drivers/media/video/v4l2-fh.c23
-rw-r--r--drivers/media/video/v4l2-ioctl.c50
-rw-r--r--drivers/media/video/v4l2-subdev.c31
-rw-r--r--drivers/media/video/videobuf-dma-sg.c5
-rw-r--r--drivers/media/video/videobuf2-dma-sg.c8
-rw-r--r--drivers/media/video/videobuf2-memops.c6
-rw-r--r--drivers/media/video/vino.c5
-rw-r--r--drivers/media/video/vivi.c91
-rw-r--r--drivers/media/video/w9966.c4
-rw-r--r--drivers/media/video/zoran/zoran.h4
-rw-r--r--drivers/media/video/zoran/zoran_card.c7
-rw-r--r--drivers/media/video/zoran/zoran_driver.c3
-rw-r--r--drivers/media/video/zr364xx.c6
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/message/i2o/i2o_scsi.c2
-rw-r--r--drivers/mfd/Kconfig61
-rw-r--r--drivers/mfd/Makefile11
-rw-r--r--drivers/mfd/aat2870-core.c535
-rw-r--r--drivers/mfd/ab3550-core.c41
-rw-r--r--drivers/mfd/ab8500-core.c231
-rw-r--r--drivers/mfd/ab8500-debugfs.c41
-rw-r--r--drivers/mfd/jz4740-adc.c90
-rw-r--r--drivers/mfd/lpc_sch.c49
-rw-r--r--drivers/mfd/max8997-irq.c2
-rw-r--r--drivers/mfd/max8998.c2
-rw-r--r--drivers/mfd/omap-usb-host.c4
-rw-r--r--drivers/mfd/stmpe.c2
-rw-r--r--drivers/mfd/stmpe.h1
-rw-r--r--drivers/mfd/timberdale.c8
-rw-r--r--drivers/mfd/tps65910.c13
-rw-r--r--drivers/mfd/tps65911-comparator.c2
-rw-r--r--drivers/mfd/tps65912-core.c177
-rw-r--r--drivers/mfd/tps65912-i2c.c139
-rw-r--r--drivers/mfd/tps65912-irq.c224
-rw-r--r--drivers/mfd/tps65912-spi.c142
-rw-r--r--drivers/mfd/twl-core.c15
-rw-r--r--drivers/mfd/twl4030-audio.c277
-rw-r--r--drivers/mfd/twl4030-codec.c277
-rw-r--r--drivers/mfd/twl4030-madc.c8
-rw-r--r--drivers/mfd/twl6030-pwm.c2
-rw-r--r--drivers/mfd/twl6040-core.c620
-rw-r--r--drivers/mfd/twl6040-irq.c191
-rw-r--r--drivers/mfd/wm831x-auxadc.c299
-rw-r--r--drivers/mfd/wm831x-core.c259
-rw-r--r--drivers/mfd/wm831x-irq.c77
-rw-r--r--drivers/mfd/wm8350-irq.c18
-rw-r--r--drivers/mfd/wm8994-core.c33
-rw-r--r--drivers/mfd/wm8994-irq.c12
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/ab8500-pwm.c2
-rw-r--r--drivers/misc/cb710/core.c3
-rw-r--r--drivers/misc/fsa9480.c4
-rw-r--r--drivers/misc/phantom.c2
-rw-r--r--drivers/misc/pti.c14
-rw-r--r--drivers/misc/ti-st/st_core.c10
-rw-r--r--drivers/misc/ti-st/st_kim.c33
-rw-r--r--drivers/misc/ti-st/st_ll.c19
-rw-r--r--drivers/misc/vmw_balloon.c31
-rw-r--r--drivers/mmc/card/mmc_test.c58
-rw-r--r--drivers/mmc/core/core.c37
-rw-r--r--drivers/mmc/core/host.c12
-rw-r--r--drivers/mmc/core/host.h8
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/mmc_ops.c2
-rw-r--r--drivers/mmc/core/sd.c81
-rw-r--r--drivers/mmc/host/dw_mmc.c6
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c300
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c3
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c3
-rw-r--r--drivers/mmc/host/sdhci-s3c.c6
-rw-r--r--drivers/mmc/host/sdhci.c53
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c4
-rw-r--r--drivers/mmc/host/tmio_mmc.c2
-rw-r--r--drivers/mmc/host/tmio_mmc.h1
-rw-r--r--drivers/mtd/ubi/debug.h2
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/acenic.c45
-rw-r--r--drivers/net/acenic.h6
-rw-r--r--drivers/net/arm/am79c961a.c3
-rw-r--r--drivers/net/atlx/atl1.c2
-rw-r--r--drivers/net/atlx/atl2.c2
-rw-r--r--drivers/net/atlx/atl2.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c45
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c2
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c218
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h3
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c23
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h32
-rw-r--r--drivers/net/bonding/bond_main.c26
-rw-r--r--drivers/net/bonding/bond_sysfs.c133
-rw-r--r--drivers/net/can/sja1000/plx_pci.c4
-rw-r--r--drivers/net/can/slcan.c2
-rw-r--r--drivers/net/can/ti_hecc.c3
-rw-r--r--drivers/net/cassini.c5
-rw-r--r--drivers/net/cpmac.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/cxgb3/l2t.h2
-rw-r--r--drivers/net/cxgb3/t3cdev.h2
-rw-r--r--drivers/net/cxgb4/cxgb4_uld.h2
-rw-r--r--drivers/net/cxgb4/l2t.h2
-rw-r--r--drivers/net/e1000/e1000_ethtool.c6
-rw-r--r--drivers/net/e1000/e1000_hw.c3
-rw-r--r--drivers/net/e1000e/82571.c6
-rw-r--r--drivers/net/e1000e/e1000.h5
-rw-r--r--drivers/net/e1000e/es2lan.c2
-rw-r--r--drivers/net/e1000e/ethtool.c14
-rw-r--r--drivers/net/e1000e/ich8lan.c72
-rw-r--r--drivers/net/e1000e/lib.c8
-rw-r--r--drivers/net/e1000e/netdev.c91
-rw-r--r--drivers/net/e1000e/phy.c2
-rw-r--r--drivers/net/fec.c125
-rw-r--r--drivers/net/forcedeth.c17
-rw-r--r--drivers/net/gianfar.c15
-rw-r--r--drivers/net/gianfar_ethtool.c26
-rw-r--r--drivers/net/gianfar_ptp.c9
-rw-r--r--drivers/net/hamradio/6pack.c2
-rw-r--r--drivers/net/hamradio/dmascc.c2
-rw-r--r--drivers/net/ibm_newemac/core.c33
-rw-r--r--drivers/net/ibm_newemac/emac.h19
-rw-r--r--drivers/net/ibm_newemac/phy.c7
-rw-r--r--drivers/net/ibmveth.c14
-rw-r--r--drivers/net/ifb.c2
-rw-r--r--drivers/net/igb/e1000_nvm.c1
-rw-r--r--drivers/net/igb/igb_ethtool.c5
-rw-r--r--drivers/net/igb/igb_main.c4
-rw-r--r--drivers/net/igbvf/netdev.c2
-rw-r--r--drivers/net/irda/sh_irda.c2
-rw-r--r--drivers/net/irda/sh_sir.c4
-rw-r--r--drivers/net/irda/smsc-ircc2.c18
-rw-r--r--drivers/net/ixgb/ixgb_ee.c9
-rw-r--r--drivers/net/ixgb/ixgb_hw.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c7
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c3
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c1
-rw-r--r--drivers/net/macb.c3
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mlx4/en_port.c2
-rw-r--r--drivers/net/mlx4/main.c2
-rw-r--r--drivers/net/mlx4/port.c9
-rw-r--r--drivers/net/niu.c4
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c2
-rw-r--r--drivers/net/pcnet32.c2
-rw-r--r--drivers/net/phy/dp83640.c5
-rw-r--r--drivers/net/phy/national.c17
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/ppp_generic.c2
-rw-r--r--drivers/net/r8169.c28
-rw-r--r--drivers/net/rionet.c23
-rw-r--r--drivers/net/sh_eth.c2
-rw-r--r--drivers/net/sis190.c12
-rw-r--r--drivers/net/slip.c2
-rw-r--r--drivers/net/sungem.c3
-rw-r--r--drivers/net/tg3.c287
-rw-r--r--drivers/net/tg3.h9
-rw-r--r--drivers/net/tun.c1
-rw-r--r--drivers/net/usb/asix.c7
-rw-r--r--drivers/net/usb/cdc_ncm.c191
-rw-r--r--drivers/net/usb/rtl8150.c1
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/net/via-velocity.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c34
-rw-r--r--drivers/net/wan/hdlc_fr.c5
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h2
-rw-r--r--drivers/net/wireless/airo.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c27
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c4
-rw-r--r--drivers/net/wireless/b43/Kconfig2
-rw-r--r--drivers/net/wireless/b43/bus.c2
-rw-r--r--drivers/net/wireless/b43/dma.c20
-rw-r--r--drivers/net/wireless/b43/main.c5
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h2
-rw-r--r--drivers/net/wireless/b43legacy/dma.h2
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c1
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.c6
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-pci.c39
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c3
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c22
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c31
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c1
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c11
-rw-r--r--drivers/net/wireless/wl1251/acx.c6
-rw-r--r--drivers/net/wireless/wl1251/cmd.c2
-rw-r--r--drivers/net/wireless/wl12xx/acx.c6
-rw-r--r--drivers/net/wireless/wl12xx/main.c1
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c2
-rw-r--r--drivers/net/wireless/wl12xx/testmode.c50
-rw-r--r--drivers/nfc/pn533.c2
-rw-r--r--drivers/of/address.c2
-rw-r--r--drivers/of/base.c7
-rw-r--r--drivers/of/gpio.c5
-rw-r--r--drivers/of/of_net.c45
-rw-r--r--drivers/oprofile/oprofile_stats.h2
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c6
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c17
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c3
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c11
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c47
-rw-r--r--drivers/pci/of.c2
-rw-r--r--drivers/pci/pci-label.c2
-rw-r--r--drivers/pci/pci.c78
-rw-r--r--drivers/pci/pci.h4
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c76
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c3
-rw-r--r--drivers/pci/probe.c234
-rw-r--r--drivers/pci/setup-bus.c169
-rw-r--r--drivers/pci/setup-irq.c4
-rw-r--r--drivers/pci/setup-res.c155
-rw-r--r--drivers/pci/xen-pcifront.c2
-rw-r--r--drivers/pcmcia/pxa2xx_balloon3.c10
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x255.c11
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x270.c11
-rw-r--r--drivers/pcmcia/pxa2xx_colibri.c11
-rw-r--r--drivers/pcmcia/pxa2xx_mainstone.c10
-rw-r--r--drivers/pcmcia/pxa2xx_palmld.c11
-rw-r--r--drivers/pcmcia/pxa2xx_palmtc.c11
-rw-r--r--drivers/pcmcia/pxa2xx_palmtx.c11
-rw-r--r--drivers/pcmcia/pxa2xx_stargate2.c34
-rw-r--r--drivers/pcmcia/pxa2xx_viper.c10
-rw-r--r--drivers/pcmcia/soc_common.c7
-rw-r--r--drivers/platform/x86/Kconfig8
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/acer-wmi.c40
-rw-r--r--drivers/platform/x86/acerhdf.c13
-rw-r--r--drivers/platform/x86/asus-laptop.c9
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c27
-rw-r--r--drivers/platform/x86/asus-wmi.c239
-rw-r--r--drivers/platform/x86/asus-wmi.h7
-rw-r--r--drivers/platform/x86/dell-laptop.c1
-rw-r--r--drivers/platform/x86/dell-wmi.c10
-rw-r--r--drivers/platform/x86/eeepc-wmi.c27
-rw-r--r--drivers/platform/x86/ideapad-laptop.c195
-rw-r--r--drivers/platform/x86/intel_ips.c4
-rw-r--r--drivers/platform/x86/intel_menlow.c2
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c26
-rw-r--r--drivers/platform/x86/intel_rar_register.c4
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c2
-rw-r--r--drivers/platform/x86/msi-laptop.c10
-rw-r--r--drivers/platform/x86/msi-wmi.c1
-rw-r--r--drivers/platform/x86/samsung-laptop.c20
-rw-r--r--drivers/platform/x86/samsung-q10.c196
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c11
-rw-r--r--drivers/power/Kconfig14
-rw-r--r--drivers/power/Makefile2
-rw-r--r--drivers/power/apm_power.c8
-rw-r--r--drivers/power/bq20z75.c103
-rw-r--r--drivers/power/gpio-charger.c2
-rw-r--r--drivers/power/max17042_battery.c175
-rw-r--r--drivers/power/max8903_charger.c16
-rw-r--r--drivers/power/max8997_charger.c207
-rw-r--r--drivers/power/max8998_charger.c219
-rw-r--r--drivers/power/s3c_adc_battery.c3
-rw-r--r--drivers/power/twl4030_charger.c10
-rw-r--r--drivers/power/wm831x_backup.c12
-rw-r--r--drivers/power/wm831x_power.c26
-rw-r--r--drivers/rapidio/rio-scan.c3
-rw-r--r--drivers/regulator/Kconfig13
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/aat2870-regulator.c232
-rw-r--r--drivers/regulator/core.c190
-rw-r--r--drivers/regulator/dummy.c32
-rw-r--r--drivers/regulator/tps65910-regulator.c63
-rw-r--r--drivers/regulator/tps65912-regulator.c800
-rw-r--r--drivers/regulator/twl-regulator.c66
-rw-r--r--drivers/regulator/wm831x-dcdc.c126
-rw-r--r--drivers/regulator/wm831x-ldo.c25
-rw-r--r--drivers/regulator/wm8994-regulator.c4
-rw-r--r--drivers/rtc/interface.c55
-rw-r--r--drivers/rtc/rtc-ep93xx.c16
-rw-r--r--drivers/rtc/rtc-imxdi.c1
-rw-r--r--drivers/rtc/rtc-lib.c2
-rw-r--r--drivers/rtc/rtc-omap.c2
-rw-r--r--drivers/rtc/rtc-s3c.c105
-rw-r--r--drivers/rtc/rtc-twl.c60
-rw-r--r--drivers/s390/block/dasd.c9
-rw-r--r--drivers/s390/block/dasd_eckd.c9
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/block/dasd_ioctl.c10
-rw-r--r--drivers/s390/block/dasd_proc.c4
-rw-r--r--drivers/s390/char/sclp_async.c9
-rw-r--r--drivers/s390/char/sclp_cmd.c6
-rw-r--r--drivers/s390/char/sclp_quiesce.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/cio/device.h2
-rw-r--r--drivers/s390/cio/qdio.h2
-rw-r--r--drivers/s390/cio/qdio_debug.c12
-rw-r--r--drivers/s390/cio/qdio_main.c23
-rw-r--r--drivers/s390/cio/qdio_thinint.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c2
-rw-r--r--drivers/s390/crypto/zcrypt_mono.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c2
-rw-r--r--drivers/s390/net/fsm.h2
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c2
-rw-r--r--drivers/sbus/char/display7seg.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.h4
-rw-r--r--drivers/scsi/bfa/bfa.h51
-rw-r--r--drivers/scsi/bfa/bfa_core.c60
-rw-r--r--drivers/scsi/bfa/bfa_defs.h171
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h99
-rw-r--r--drivers/scsi/bfa/bfa_fc.h155
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c736
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h45
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c26
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h1
-rw-r--r--drivers/scsi/bfa/bfa_fcs_fcpim.c37
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c74
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c49
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c38
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c25
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c569
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h48
-rw-r--r--drivers/scsi/bfa/bfa_modules.h3
-rw-r--r--drivers/scsi/bfa/bfa_svc.c249
-rw-r--r--drivers/scsi/bfa/bfa_svc.h29
-rw-r--r--drivers/scsi/bfa/bfad.c8
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c1082
-rw-r--r--drivers/scsi/bfa/bfad_bsg.h237
-rw-r--r--drivers/scsi/bfa/bfad_drv.h6
-rw-r--r--drivers/scsi/bfa/bfad_im.c26
-rw-r--r--drivers/scsi/bfa/bfad_im.h22
-rw-r--r--drivers/scsi/bfa/bfi.h20
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h107
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.h16
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c434
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c732
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c433
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c194
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c51
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c10
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c82
-rw-r--r--drivers/scsi/dpt/dpti_i2o.h2
-rw-r--r--drivers/scsi/fcoe/fcoe.c82
-rw-r--r--drivers/scsi/hpsa.c63
-rw-r--r--drivers/scsi/hpsa.h2
-rw-r--r--drivers/scsi/ipr.c12
-rw-r--r--drivers/scsi/isci/host.c13
-rw-r--r--drivers/scsi/isci/host.h3
-rw-r--r--drivers/scsi/isci/init.c47
-rw-r--r--drivers/scsi/isci/phy.c13
-rw-r--r--drivers/scsi/isci/registers.h12
-rw-r--r--drivers/scsi/isci/request.c30
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.c2
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.h2
-rw-r--r--drivers/scsi/libfc/fc_exch.c68
-rw-r--r--drivers/scsi/libfc/fc_fcp.c20
-rw-r--r--drivers/scsi/libfc/fc_lport.c12
-rw-r--r--drivers/scsi/libiscsi.c22
-rw-r--r--drivers/scsi/libsas/sas_expander.c3
-rw-r--r--drivers/scsi/lpfc/lpfc.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c161
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c89
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c1354
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h125
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c105
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c222
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h30
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c90
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c97
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c399
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h29
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c18
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c4
-rw-r--r--drivers/scsi/mvsas/Kconfig9
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c101
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c508
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h99
-rw-r--r--drivers/scsi/mvsas/mv_chips.h17
-rw-r--r--drivers/scsi/mvsas/mv_defs.h11
-rw-r--r--drivers/scsi/mvsas/mv_init.c187
-rw-r--r--drivers/scsi/mvsas/mv_sas.c422
-rw-r--r--drivers/scsi/mvsas/mv_sas.h105
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h2
-rw-r--r--drivers/scsi/pmcraid.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c190
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c441
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c396
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h187
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c371
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c859
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h37
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c396
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c772
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c1091
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c162
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c581
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c779
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c275
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/Kconfig2
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_transport_spi.c24
-rw-r--r--drivers/sh/clk/core.c29
-rw-r--r--drivers/sh/intc/chip.c3
-rw-r--r--drivers/spi/spi-pl022.c11
-rw-r--r--drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c2
-rw-r--r--drivers/staging/ath6kl/os/linux/ar6000_drv.c1
-rw-r--r--drivers/staging/brcm80211/brcmsmac/mac80211_if.h2
-rw-r--r--drivers/staging/brcm80211/brcmsmac/otp.c1
-rw-r--r--drivers/staging/brcm80211/brcmsmac/types.h1
-rw-r--r--drivers/staging/cxd2099/Kconfig11
-rw-r--r--drivers/staging/cxd2099/cxd2099.c311
-rw-r--r--drivers/staging/cxd2099/cxd2099.h18
-rw-r--r--drivers/staging/dt3155v4l/dt3155v4l.c1
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c1
-rw-r--r--drivers/staging/gma500/gem_glue.c23
-rw-r--r--drivers/staging/gma500/gem_glue.h2
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dbi.c3
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dbi.h3
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dpi.c7
-rw-r--r--drivers/staging/gma500/mdfld_dsi_output.c4
-rw-r--r--drivers/staging/gma500/medfield.h2
-rw-r--r--drivers/staging/gma500/mrst_hdmi.c2
-rw-r--r--drivers/staging/gma500/psb_drv.h1
-rw-r--r--drivers/staging/hv/blkvsc_drv.c4
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c8
-rw-r--r--drivers/staging/iio/accel/adis16204_core.c8
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c8
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c8
-rw-r--r--drivers/staging/iio/gyro/adis16260_core.c8
-rw-r--r--drivers/staging/nvec/TODO6
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c1
-rw-r--r--drivers/staging/octeon/ethernet-rx.c2
-rw-r--r--drivers/staging/octeon/ethernet-spi.c1
-rw-r--r--drivers/staging/octeon/ethernet-tx.c2
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c9
-rw-r--r--drivers/staging/rts_pstor/rtsx.c109
-rw-r--r--drivers/staging/rts_pstor/rtsx.h9
-rw-r--r--drivers/staging/solo6x10/core.c1
-rw-r--r--drivers/staging/solo6x10/enc.c1
-rw-r--r--drivers/staging/solo6x10/g723.c1
-rw-r--r--drivers/staging/solo6x10/p2m.c1
-rw-r--r--drivers/staging/solo6x10/solo6x10.h3
-rw-r--r--drivers/staging/speakup/devsynth.c5
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c1
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/host_os.h2
-rw-r--r--drivers/staging/tm6000/tm6000-alsa.c12
-rw-r--r--drivers/staging/winbond/mds_s.h2
-rw-r--r--drivers/staging/winbond/wb35reg_s.h2
-rw-r--r--drivers/staging/zcache/Makefile2
-rw-r--r--drivers/staging/zcache/tmem.c2
-rw-r--r--drivers/staging/zcache/zcache-main.c (renamed from drivers/staging/zcache/zcache.c)21
-rw-r--r--drivers/target/Kconfig1
-rw-r--r--drivers/target/Makefile2
-rw-r--r--drivers/target/iscsi/Kconfig9
-rw-r--r--drivers/target/iscsi/Makefile20
-rw-r--r--drivers/target/iscsi/iscsi_target.c4563
-rw-r--r--drivers/target/iscsi/iscsi_target.h42
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c490
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.h31
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c1882
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.h7
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h859
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.c531
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.h12
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c87
-rw-r--r--drivers/target/iscsi/iscsi_target_device.h9
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c1004
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.h15
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c1299
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.h26
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c474
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h18
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c1222
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h12
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c1067
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.h17
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c263
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.h14
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c1894
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h269
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.c664
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.h86
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c950
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.h64
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c849
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.h14
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c759
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h41
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.c551
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.h88
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c1817
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h60
-rw-r--r--drivers/target/target_core_cdb.c57
-rw-r--r--drivers/target/target_core_device.c48
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_pr.c8
-rw-r--r--drivers/target/target_core_rd.c24
-rw-r--r--drivers/target/target_core_tpg.c64
-rw-r--r--drivers/target/target_core_transport.c246
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h5
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c1
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c6
-rw-r--r--drivers/target/tcm_fc/tfc_io.c121
-rw-r--r--drivers/thermal/Kconfig8
-rw-r--r--drivers/thermal/thermal_sys.c142
-rw-r--r--drivers/tty/bfin_jtag_comm.c2
-rw-r--r--drivers/tty/pty.c17
-rw-r--r--drivers/tty/rocket.c2
-rw-r--r--drivers/tty/serial/8250.c8
-rw-r--r--drivers/tty/serial/8250_pci.c11
-rw-r--r--drivers/tty/serial/8250_pnp.c3
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/atmel_serial.c8
-rw-r--r--drivers/tty/serial/crisv10.c4
-rw-r--r--drivers/tty/serial/dz.c2
-rw-r--r--drivers/tty/serial/imx.c161
-rw-r--r--drivers/tty/serial/max3107-aava.c2
-rw-r--r--drivers/tty/serial/max3107.c2
-rw-r--r--drivers/tty/serial/mrst_max3110.c2
-rw-r--r--drivers/tty/serial/omap-serial.c3
-rw-r--r--drivers/tty/serial/pch_uart.c3
-rw-r--r--drivers/tty/serial/samsung.c8
-rw-r--r--drivers/tty/serial/sb1250-duart.c2
-rw-r--r--drivers/tty/serial/serial_core.c5
-rw-r--r--drivers/tty/serial/sh-sci.c829
-rw-r--r--drivers/tty/serial/sh-sci.h434
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/serial/zs.c2
-rw-r--r--drivers/tty/tty_io.c3
-rw-r--r--drivers/usb/class/usbtmc.c2
-rw-r--r--drivers/usb/core/config.c11
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/at91_udc.c1
-rw-r--r--drivers/usb/gadget/composite.c6
-rw-r--r--drivers/usb/gadget/f_audio.c2
-rw-r--r--drivers/usb/gadget/f_hid.c7
-rw-r--r--drivers/usb/gadget/f_phonet.c1
-rw-r--r--drivers/usb/gadget/f_rndis.c2
-rw-r--r--drivers/usb/gadget/fusb300_udc.c101
-rw-r--r--drivers/usb/gadget/net2272.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c1
-rw-r--r--drivers/usb/gadget/uvc_queue.c2
-rw-r--r--drivers/usb/gadget/uvc_v4l2.c22
-rw-r--r--drivers/usb/host/ehci-hub.c19
-rw-r--r--drivers/usb/host/ehci-mxc.c1
-rw-r--r--drivers/usb/host/ehci-omap.c16
-rw-r--r--drivers/usb/host/ehci-s5p.c1
-rw-r--r--drivers/usb/host/isp1760-hcd.c3
-rw-r--r--drivers/usb/host/pci-quirks.c4
-rw-r--r--drivers/usb/host/xhci-hub.c17
-rw-r--r--drivers/usb/host/xhci-ring.c90
-rw-r--r--drivers/usb/host/xhci.c47
-rw-r--r--drivers/usb/image/microtek.c2
-rw-r--r--drivers/usb/misc/appledisplay.c2
-rw-r--r--drivers/usb/musb/Kconfig3
-rw-r--r--drivers/usb/musb/blackfin.c1
-rw-r--r--drivers/usb/musb/cppi_dma.c26
-rw-r--r--drivers/usb/musb/musb_core.h12
-rw-r--r--drivers/usb/musb/musb_gadget.c9
-rw-r--r--drivers/usb/musb/musb_regs.h6
-rw-r--r--drivers/usb/musb/tusb6010.c1
-rw-r--r--drivers/usb/musb/tusb6010_omap.c3
-rw-r--r--drivers/usb/musb/ux500_dma.c38
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c1
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c28
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/garmin_gps.c2
-rw-r--r--drivers/usb/serial/option.c116
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h10
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c2
-rw-r--r--drivers/vhost/vhost.h2
-rw-r--r--drivers/video/backlight/Kconfig7
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/aat2870_bl.c246
-rw-r--r--drivers/video/backlight/adp8870_bl.c2
-rw-r--r--drivers/video/backlight/backlight.c2
-rw-r--r--drivers/video/backlight/ep93xx_bl.c2
-rw-r--r--drivers/video/backlight/pwm_bl.c9
-rw-r--r--drivers/video/omap2/displays/panel-taal.c55
-rw-r--r--drivers/video/omap2/dss/Kconfig12
-rw-r--r--drivers/video/omap2/dss/core.c21
-rw-r--r--drivers/video/omap2/dss/dispc.c562
-rw-r--r--drivers/video/omap2/dss/display.c57
-rw-r--r--drivers/video/omap2/dss/dpi.c73
-rw-r--r--drivers/video/omap2/dss/dsi.c296
-rw-r--r--drivers/video/omap2/dss/dss.c583
-rw-r--r--drivers/video/omap2/dss/dss.h54
-rw-r--r--drivers/video/omap2/dss/dss_features.c36
-rw-r--r--drivers/video/omap2/dss/dss_features.h7
-rw-r--r--drivers/video/omap2/dss/hdmi.c162
-rw-r--r--drivers/video/omap2/dss/manager.c351
-rw-r--r--drivers/video/omap2/dss/overlay.c27
-rw-r--r--drivers/video/omap2/dss/rfbi.c114
-rw-r--r--drivers/video/omap2/dss/sdi.c40
-rw-r--r--drivers/video/omap2/dss/venc.c183
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c72
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c166
-rw-r--r--drivers/video/omap2/omapfb/omapfb-sysfs.c34
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h37
-rw-r--r--drivers/video/savage/savagefb.h2
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c2
-rw-r--r--drivers/video/vermilion/vermilion.h2
-rw-r--r--drivers/w1/masters/ds2490.c4
-rw-r--r--drivers/w1/masters/matrox_w1.c6
-rw-r--r--drivers/w1/slaves/w1_ds2408.c2
-rw-r--r--drivers/w1/slaves/w1_smem.c4
-rw-r--r--drivers/w1/slaves/w1_therm.c13
-rw-r--r--drivers/w1/w1.c6
-rw-r--r--drivers/w1/w1.h2
-rw-r--r--drivers/w1/w1_family.c2
-rw-r--r--drivers/w1/w1_family.h5
-rw-r--r--drivers/w1/w1_int.c2
-rw-r--r--drivers/w1/w1_int.h2
-rw-r--r--drivers/w1/w1_io.c2
-rw-r--r--drivers/w1/w1_log.h2
-rw-r--r--drivers/w1/w1_netlink.c2
-rw-r--r--drivers/w1/w1_netlink.h2
-rw-r--r--drivers/watchdog/Kconfig33
-rw-r--r--drivers/watchdog/Makefile8
-rw-r--r--drivers/watchdog/at91sam9_wdt.c21
-rw-r--r--drivers/watchdog/at91sam9_wdt.h37
-rw-r--r--drivers/watchdog/dw_wdt.c376
-rw-r--r--drivers/watchdog/hpwdt.c104
-rw-r--r--drivers/watchdog/iTCO_wdt.c412
-rw-r--r--drivers/watchdog/imx2_wdt.c6
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c2
-rw-r--r--drivers/watchdog/it8712f_wdt.c63
-rw-r--r--drivers/watchdog/it87_wdt.c168
-rw-r--r--drivers/watchdog/mpcore_wdt.c23
-rw-r--r--drivers/watchdog/mtx-1_wdt.c4
-rw-r--r--drivers/watchdog/nv_tco.c8
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c433
-rw-r--r--drivers/watchdog/pc87413_wdt.c96
-rw-r--r--drivers/watchdog/s3c2410_wdt.c10
-rw-r--r--drivers/watchdog/sbc7240_wdt.c2
-rw-r--r--drivers/watchdog/sch311x_wdt.c5
-rw-r--r--drivers/watchdog/shwdt.c2
-rw-r--r--drivers/watchdog/sp805_wdt.c5
-rw-r--r--drivers/watchdog/watchdog_core.c111
-rw-r--r--drivers/watchdog/watchdog_dev.c395
-rw-r--r--drivers/watchdog/watchdog_dev.h33
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/grant-table.c2
-rw-r--r--drivers/xen/xen-pciback/xenbus.c1
-rw-r--r--drivers/xen/xen-selfballoon.c5
1395 files changed, 115255 insertions, 24953 deletions
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index bc533dd..f895a24 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -121,7 +121,7 @@
/* Maximum sleep allowed via Sleep() operator */
-#define ACPI_MAX_SLEEP 20000 /* Two seconds */
+#define ACPI_MAX_SLEEP 2000 /* Two seconds */
/******************************************************************************
*
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 73863d8..76dc02f 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -126,6 +126,12 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
*/
u8 ACPI_INIT_GLOBAL(acpi_gbl_truncate_io_addresses, FALSE);
+/*
+ * Disable runtime checking and repair of values returned by control methods.
+ * Use only if the repair is causing a problem on a particular machine.
+ */
+u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_auto_repair, FALSE);
+
/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
struct acpi_table_fadt acpi_gbl_FADT;
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index c7f743c..5552125 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -357,6 +357,7 @@ struct acpi_predefined_data {
char *pathname;
const union acpi_predefined_info *predefined;
union acpi_operand_object *parent_package;
+ struct acpi_namespace_node *node;
u32 flags;
u8 node_flags;
};
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 94e73c9..c445cca 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -468,6 +468,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_SWS", 0, ACPI_RTYPE_INTEGER}},
{{"_TC1", 0, ACPI_RTYPE_INTEGER}},
{{"_TC2", 0, ACPI_RTYPE_INTEGER}},
+ {{"_TDL", 0, ACPI_RTYPE_INTEGER}},
{{"_TIP", 1, ACPI_RTYPE_INTEGER}},
{{"_TIV", 1, ACPI_RTYPE_INTEGER}},
{{"_TMP", 0, ACPI_RTYPE_INTEGER}},
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 9fb03fa..c845c80 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -193,14 +193,20 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
}
/*
- * 1) We have a return value, but if one wasn't expected, just exit, this is
- * not a problem. For example, if the "Implicit Return" feature is
- * enabled, methods will always return a value.
+ * Return value validation and possible repair.
*
- * 2) If the return value can be of any type, then we cannot perform any
- * validation, exit.
+ * 1) Don't perform return value validation/repair if this feature
+ * has been disabled via a global option.
+ *
+ * 2) We have a return value, but if one wasn't expected, just exit,
+ * this is not a problem. For example, if the "Implicit Return"
+ * feature is enabled, methods will always return a value.
+ *
+ * 3) If the return value can be of any type, then we cannot perform
+ * any validation, just exit.
*/
- if ((!predefined->info.expected_btypes) ||
+ if (acpi_gbl_disable_auto_repair ||
+ (!predefined->info.expected_btypes) ||
(predefined->info.expected_btypes == ACPI_RTYPE_ALL)) {
goto cleanup;
}
@@ -212,6 +218,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
goto cleanup;
}
data->predefined = predefined;
+ data->node = node;
data->node_flags = node->flags;
data->pathname = pathname;
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 973883b..024c4f2 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -503,6 +503,21 @@ acpi_ns_repair_TSS(struct acpi_predefined_data *data,
{
union acpi_operand_object *return_object = *return_object_ptr;
acpi_status status;
+ struct acpi_namespace_node *node;
+
+ /*
+ * We can only sort the _TSS return package if there is no _PSS in the
+ * same scope. This is because if _PSS is present, the ACPI specification
+ * dictates that the _TSS Power Dissipation field is to be ignored, and
+ * therefore some BIOSs leave garbage values in the _TSS Power field(s).
+ * In this case, it is best to just return the _TSS package as-is.
+ * (May, 2011)
+ */
+ status =
+ acpi_ns_get_node(data->node, "^_PSS", ACPI_NS_NO_UPSEARCH, &node);
+ if (ACPI_SUCCESS(status)) {
+ return (AE_OK);
+ }
status = acpi_ns_check_sorted_list(data, return_object, 5, 1,
ACPI_SORT_DESCENDING,
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 48db094..62365f6 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -126,12 +126,29 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
}
/*
- * Originally, we checked the table signature for "SSDT" or "PSDT" here.
- * Next, we added support for OEMx tables, signature "OEM".
- * Valid tables were encountered with a null signature, so we've just
- * given up on validating the signature, since it seems to be a waste
- * of code. The original code was removed (05/2008).
+ * Validate the incoming table signature.
+ *
+ * 1) Originally, we checked the table signature for "SSDT" or "PSDT".
+ * 2) We added support for OEMx tables, signature "OEM".
+ * 3) Valid tables were encountered with a null signature, so we just
+ * gave up on validating the signature, (05/2008).
+ * 4) We encountered non-AML tables such as the MADT, which caused
+ * interpreter errors and kernel faults. So now, we once again allow
+ * only "SSDT", "OEMx", and now, also a null signature. (05/2011).
*/
+ if ((table_desc->pointer->signature[0] != 0x00) &&
+ (!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT))
+ && (ACPI_STRNCMP(table_desc->pointer->signature, "OEM", 3))) {
+ ACPI_ERROR((AE_INFO,
+ "Table has invalid signature [%4.4s] (0x%8.8X), must be SSDT or OEMx",
+ acpi_ut_valid_acpi_name(*(u32 *)table_desc->
+ pointer->
+ signature) ? table_desc->
+ pointer->signature : "????",
+ *(u32 *)table_desc->pointer->signature));
+
+ return_ACPI_STATUS(AE_BAD_SIGNATURE);
+ }
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index f739a70..e3f4787 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -10,9 +10,12 @@ config ACPI_APEI
error injection.
config ACPI_APEI_GHES
- tristate "APEI Generic Hardware Error Source"
+ bool "APEI Generic Hardware Error Source"
depends on ACPI_APEI && X86
select ACPI_HED
+ select IRQ_WORK
+ select LLIST
+ select GENERIC_ALLOCATOR
help
Generic Hardware Error Source provides a way to report
platform hardware errors (such as that from chipset). It
@@ -30,6 +33,13 @@ config ACPI_APEI_PCIEAER
PCIe AER errors may be reported via APEI firmware first mode.
Turn on this option to enable the corresponding support.
+config ACPI_APEI_MEMORY_FAILURE
+ bool "APEI memory error recovering support"
+ depends on ACPI_APEI && MEMORY_FAILURE
+ help
+ Memory errors may be reported via APEI firmware first mode.
+ Turn on this option to enable the memory recovering support.
+
config ACPI_APEI_EINJ
tristate "APEI Error INJection (EINJ)"
depends on ACPI_APEI && DEBUG_FS
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 4a904a4..6154036 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -157,9 +157,10 @@ EXPORT_SYMBOL_GPL(apei_exec_noop);
* Interpret the specified action. Go through whole action table,
* execute all instructions belong to the action.
*/
-int apei_exec_run(struct apei_exec_context *ctx, u8 action)
+int __apei_exec_run(struct apei_exec_context *ctx, u8 action,
+ bool optional)
{
- int rc;
+ int rc = -ENOENT;
u32 i, ip;
struct acpi_whea_header *entry;
apei_exec_ins_func_t run;
@@ -198,9 +199,9 @@ rewind:
goto rewind;
}
- return 0;
+ return !optional && rc < 0 ? rc : 0;
}
-EXPORT_SYMBOL_GPL(apei_exec_run);
+EXPORT_SYMBOL_GPL(__apei_exec_run);
typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
struct acpi_whea_header *entry,
@@ -603,3 +604,29 @@ struct dentry *apei_get_debugfs_dir(void)
return dapei;
}
EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
+
+int apei_osc_setup(void)
+{
+ static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c";
+ acpi_handle handle;
+ u32 capbuf[3];
+ struct acpi_osc_context context = {
+ .uuid_str = whea_uuid_str,
+ .rev = 1,
+ .cap.length = sizeof(capbuf),
+ .cap.pointer = capbuf,
+ };
+
+ capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
+ capbuf[OSC_SUPPORT_TYPE] = 1;
+ capbuf[OSC_CONTROL_TYPE] = 0;
+
+ if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
+ || ACPI_FAILURE(acpi_run_osc(handle, &context)))
+ return -EIO;
+ else {
+ kfree(context.ret.pointer);
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(apei_osc_setup);
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index ef0581f..f57050e 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -50,7 +50,18 @@ static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx)
return ctx->value;
}
-int apei_exec_run(struct apei_exec_context *ctx, u8 action);
+int __apei_exec_run(struct apei_exec_context *ctx, u8 action, bool optional);
+
+static inline int apei_exec_run(struct apei_exec_context *ctx, u8 action)
+{
+ return __apei_exec_run(ctx, action, 0);
+}
+
+/* It is optional whether the firmware provides the action */
+static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 action)
+{
+ return __apei_exec_run(ctx, action, 1);
+}
/* Common instruction implementation */
@@ -113,4 +124,6 @@ void apei_estatus_print(const char *pfx,
const struct acpi_hest_generic_status *estatus);
int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
+
+int apei_osc_setup(void);
#endif
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index f74b2ea..589b96c 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -46,7 +46,8 @@
* Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
* EINJ table through an unpublished extension. Use with caution as
* most will ignore the parameter and make their own choice of address
- * for error injection.
+ * for error injection. This extension is used only if
+ * param_extension module parameter is specified.
*/
struct einj_parameter {
u64 type;
@@ -65,6 +66,9 @@ struct einj_parameter {
((struct acpi_whea_header *)((char *)(tab) + \
sizeof(struct acpi_table_einj)))
+static bool param_extension;
+module_param(param_extension, bool, 0);
+
static struct acpi_table_einj *einj_tab;
static struct apei_resources einj_resources;
@@ -285,7 +289,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
einj_exec_ctx_init(&ctx);
- rc = apei_exec_run(&ctx, ACPI_EINJ_BEGIN_OPERATION);
+ rc = apei_exec_run_optional(&ctx, ACPI_EINJ_BEGIN_OPERATION);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, type);
@@ -323,7 +327,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
rc = __einj_error_trigger(trigger_paddr);
if (rc)
return rc;
- rc = apei_exec_run(&ctx, ACPI_EINJ_END_OPERATION);
+ rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION);
return rc;
}
@@ -489,14 +493,6 @@ static int __init einj_init(void)
einj_debug_dir, NULL, &error_type_fops);
if (!fentry)
goto err_cleanup;
- fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_param1);
- if (!fentry)
- goto err_cleanup;
- fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_param2);
- if (!fentry)
- goto err_cleanup;
fentry = debugfs_create_file("error_inject", S_IWUSR,
einj_debug_dir, NULL, &error_inject_fops);
if (!fentry)
@@ -513,12 +509,23 @@ static int __init einj_init(void)
rc = apei_exec_pre_map_gars(&ctx);
if (rc)
goto err_release;
- param_paddr = einj_get_parameter_address();
- if (param_paddr) {
- einj_param = ioremap(param_paddr, sizeof(*einj_param));
- rc = -ENOMEM;
- if (!einj_param)
- goto err_unmap;
+ if (param_extension) {
+ param_paddr = einj_get_parameter_address();
+ if (param_paddr) {
+ einj_param = ioremap(param_paddr, sizeof(*einj_param));
+ rc = -ENOMEM;
+ if (!einj_param)
+ goto err_unmap;
+ fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param1);
+ if (!fentry)
+ goto err_unmap;
+ fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param2);
+ if (!fentry)
+ goto err_unmap;
+ } else
+ pr_warn(EINJ_PFX "Parameter extension is not supported.\n");
}
pr_info(EINJ_PFX "Error INJection is initialized.\n");
@@ -526,6 +533,8 @@ static int __init einj_init(void)
return 0;
err_unmap:
+ if (einj_param)
+ iounmap(einj_param);
apei_exec_post_unmap_gars(&ctx);
err_release:
apei_resources_release(&einj_resources);
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index a4cfb64..903549d 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -33,7 +33,7 @@
#define ERST_DBG_PFX "ERST DBG: "
-#define ERST_DBG_RECORD_LEN_MAX 4096
+#define ERST_DBG_RECORD_LEN_MAX 0x4000
static void *erst_dbg_buf;
static unsigned int erst_dbg_buf_len;
@@ -213,6 +213,10 @@ static struct miscdevice erst_dbg_dev = {
static __init int erst_dbg_init(void)
{
+ if (erst_disable) {
+ pr_info(ERST_DBG_PFX "ERST support is disabled.\n");
+ return -ENODEV;
+ }
return misc_register(&erst_dbg_dev);
}
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index e6cef8e..2ca59dc 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -642,7 +642,7 @@ static int __erst_write_to_storage(u64 offset)
int rc;
erst_exec_ctx_init(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_WRITE);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, offset);
@@ -666,7 +666,7 @@ static int __erst_write_to_storage(u64 offset)
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_END);
if (rc)
return rc;
@@ -681,7 +681,7 @@ static int __erst_read_from_storage(u64 record_id, u64 offset)
int rc;
erst_exec_ctx_init(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_READ);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, offset);
@@ -709,7 +709,7 @@ static int __erst_read_from_storage(u64 record_id, u64 offset)
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_END);
if (rc)
return rc;
@@ -724,7 +724,7 @@ static int __erst_clear_from_storage(u64 record_id)
int rc;
erst_exec_ctx_init(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_CLEAR);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, record_id);
@@ -748,7 +748,7 @@ static int __erst_clear_from_storage(u64 record_id)
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_END);
if (rc)
return rc;
@@ -932,8 +932,11 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
static int erst_open_pstore(struct pstore_info *psi);
static int erst_close_pstore(struct pstore_info *psi);
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
- struct timespec *time);
-static u64 erst_writer(enum pstore_type_id type, size_t size);
+ struct timespec *time, struct pstore_info *psi);
+static u64 erst_writer(enum pstore_type_id type, unsigned int part,
+ size_t size, struct pstore_info *psi);
+static int erst_clearer(enum pstore_type_id type, u64 id,
+ struct pstore_info *psi);
static struct pstore_info erst_info = {
.owner = THIS_MODULE,
@@ -942,7 +945,7 @@ static struct pstore_info erst_info = {
.close = erst_close_pstore,
.read = erst_reader,
.write = erst_writer,
- .erase = erst_clear
+ .erase = erst_clearer
};
#define CPER_CREATOR_PSTORE \
@@ -983,7 +986,7 @@ static int erst_close_pstore(struct pstore_info *psi)
}
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
- struct timespec *time)
+ struct timespec *time, struct pstore_info *psi)
{
int rc;
ssize_t len = 0;
@@ -1037,7 +1040,8 @@ out:
return (rc < 0) ? rc : (len - sizeof(*rcd));
}
-static u64 erst_writer(enum pstore_type_id type, size_t size)
+static u64 erst_writer(enum pstore_type_id type, unsigned int part,
+ size_t size, struct pstore_info *psi)
{
struct cper_pstore_record *rcd = (struct cper_pstore_record *)
(erst_info.buf - sizeof(*rcd));
@@ -1080,6 +1084,12 @@ static u64 erst_writer(enum pstore_type_id type, size_t size)
return rcd->hdr.record_id;
}
+static int erst_clearer(enum pstore_type_id type, u64 id,
+ struct pstore_info *psi)
+{
+ return erst_clear(id);
+}
+
static int __init erst_init(void)
{
int rc = 0;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index f703b28..0784f99 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -12,7 +12,7 @@
* For more information about Generic Hardware Error Source, please
* refer to ACPI Specification version 4.0, section 17.3.2.6
*
- * Copyright 2010 Intel Corp.
+ * Copyright 2010,2011 Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or
@@ -42,6 +42,9 @@
#include <linux/mutex.h>
#include <linux/ratelimit.h>
#include <linux/vmalloc.h>
+#include <linux/irq_work.h>
+#include <linux/llist.h>
+#include <linux/genalloc.h>
#include <acpi/apei.h>
#include <acpi/atomicio.h>
#include <acpi/hed.h>
@@ -53,6 +56,30 @@
#define GHES_PFX "GHES: "
#define GHES_ESTATUS_MAX_SIZE 65536
+#define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536
+
+#define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
+
+/* This is just an estimation for memory pool allocation */
+#define GHES_ESTATUS_CACHE_AVG_SIZE 512
+
+#define GHES_ESTATUS_CACHES_SIZE 4
+
+#define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL
+/* Prevent too many caches are allocated because of RCU */
+#define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2)
+
+#define GHES_ESTATUS_CACHE_LEN(estatus_len) \
+ (sizeof(struct ghes_estatus_cache) + (estatus_len))
+#define GHES_ESTATUS_FROM_CACHE(estatus_cache) \
+ ((struct acpi_hest_generic_status *) \
+ ((struct ghes_estatus_cache *)(estatus_cache) + 1))
+
+#define GHES_ESTATUS_NODE_LEN(estatus_len) \
+ (sizeof(struct ghes_estatus_node) + (estatus_len))
+#define GHES_ESTATUS_FROM_NODE(estatus_node) \
+ ((struct acpi_hest_generic_status *) \
+ ((struct ghes_estatus_node *)(estatus_node) + 1))
/*
* One struct ghes is created for each generic hardware error source.
@@ -77,6 +104,22 @@ struct ghes {
};
};
+struct ghes_estatus_node {
+ struct llist_node llnode;
+ struct acpi_hest_generic *generic;
+};
+
+struct ghes_estatus_cache {
+ u32 estatus_len;
+ atomic_t count;
+ struct acpi_hest_generic *generic;
+ unsigned long long time_in;
+ struct rcu_head rcu;
+};
+
+int ghes_disable;
+module_param_named(disable, ghes_disable, bool, 0);
+
static int ghes_panic_timeout __read_mostly = 30;
/*
@@ -121,6 +164,22 @@ static struct vm_struct *ghes_ioremap_area;
static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
+/*
+ * printk is not safe in NMI context. So in NMI handler, we allocate
+ * required memory from lock-less memory allocator
+ * (ghes_estatus_pool), save estatus into it, put them into lock-less
+ * list (ghes_estatus_llist), then delay printk into IRQ context via
+ * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record
+ * required pool size by all NMI error source.
+ */
+static struct gen_pool *ghes_estatus_pool;
+static unsigned long ghes_estatus_pool_size_request;
+static struct llist_head ghes_estatus_llist;
+static struct irq_work ghes_proc_irq_work;
+
+struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
+static atomic_t ghes_estatus_cache_alloced;
+
static int ghes_ioremap_init(void)
{
ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
@@ -180,6 +239,55 @@ static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
__flush_tlb_one(vaddr);
}
+static int ghes_estatus_pool_init(void)
+{
+ ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
+ if (!ghes_estatus_pool)
+ return -ENOMEM;
+ return 0;
+}
+
+static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool,
+ struct gen_pool_chunk *chunk,
+ void *data)
+{
+ free_page(chunk->start_addr);
+}
+
+static void ghes_estatus_pool_exit(void)
+{
+ gen_pool_for_each_chunk(ghes_estatus_pool,
+ ghes_estatus_pool_free_chunk_page, NULL);
+ gen_pool_destroy(ghes_estatus_pool);
+}
+
+static int ghes_estatus_pool_expand(unsigned long len)
+{
+ unsigned long i, pages, size, addr;
+ int ret;
+
+ ghes_estatus_pool_size_request += PAGE_ALIGN(len);
+ size = gen_pool_size(ghes_estatus_pool);
+ if (size >= ghes_estatus_pool_size_request)
+ return 0;
+ pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE;
+ for (i = 0; i < pages; i++) {
+ addr = __get_free_page(GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+ ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ghes_estatus_pool_shrink(unsigned long len)
+{
+ ghes_estatus_pool_size_request -= PAGE_ALIGN(len);
+}
+
static struct ghes *ghes_new(struct acpi_hest_generic *generic)
{
struct ghes *ghes;
@@ -341,43 +449,196 @@ static void ghes_clear_estatus(struct ghes *ghes)
ghes->flags &= ~GHES_TO_CLEAR;
}
-static void ghes_do_proc(struct ghes *ghes)
+static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
{
- int sev, processed = 0;
+ int sev, sec_sev;
struct acpi_hest_generic_data *gdata;
- sev = ghes_severity(ghes->estatus->error_severity);
- apei_estatus_for_each_section(ghes->estatus, gdata) {
-#ifdef CONFIG_X86_MCE
+ sev = ghes_severity(estatus->error_severity);
+ apei_estatus_for_each_section(estatus, gdata) {
+ sec_sev = ghes_severity(gdata->error_severity);
if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
CPER_SEC_PLATFORM_MEM)) {
- apei_mce_report_mem_error(
- sev == GHES_SEV_CORRECTED,
- (struct cper_sec_mem_err *)(gdata+1));
- processed = 1;
- }
+ struct cper_sec_mem_err *mem_err;
+ mem_err = (struct cper_sec_mem_err *)(gdata+1);
+#ifdef CONFIG_X86_MCE
+ apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED,
+ mem_err);
#endif
+#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
+ if (sev == GHES_SEV_RECOVERABLE &&
+ sec_sev == GHES_SEV_RECOVERABLE &&
+ mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) {
+ unsigned long pfn;
+ pfn = mem_err->physical_addr >> PAGE_SHIFT;
+ memory_failure_queue(pfn, 0, 0);
+ }
+#endif
+ }
}
}
-static void ghes_print_estatus(const char *pfx, struct ghes *ghes)
+static void __ghes_print_estatus(const char *pfx,
+ const struct acpi_hest_generic *generic,
+ const struct acpi_hest_generic_status *estatus)
{
- /* Not more than 2 messages every 5 seconds */
- static DEFINE_RATELIMIT_STATE(ratelimit, 5*HZ, 2);
-
if (pfx == NULL) {
- if (ghes_severity(ghes->estatus->error_severity) <=
+ if (ghes_severity(estatus->error_severity) <=
GHES_SEV_CORRECTED)
pfx = KERN_WARNING HW_ERR;
else
pfx = KERN_ERR HW_ERR;
}
- if (__ratelimit(&ratelimit)) {
- printk(
- "%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
- pfx, ghes->generic->header.source_id);
- apei_estatus_print(pfx, ghes->estatus);
+ printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
+ pfx, generic->header.source_id);
+ apei_estatus_print(pfx, estatus);
+}
+
+static int ghes_print_estatus(const char *pfx,
+ const struct acpi_hest_generic *generic,
+ const struct acpi_hest_generic_status *estatus)
+{
+ /* Not more than 2 messages every 5 seconds */
+ static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
+ static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
+ struct ratelimit_state *ratelimit;
+
+ if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED)
+ ratelimit = &ratelimit_corrected;
+ else
+ ratelimit = &ratelimit_uncorrected;
+ if (__ratelimit(ratelimit)) {
+ __ghes_print_estatus(pfx, generic, estatus);
+ return 1;
}
+ return 0;
+}
+
+/*
+ * GHES error status reporting throttle, to report more kinds of
+ * errors, instead of just most frequently occurred errors.
+ */
+static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
+{
+ u32 len;
+ int i, cached = 0;
+ unsigned long long now;
+ struct ghes_estatus_cache *cache;
+ struct acpi_hest_generic_status *cache_estatus;
+
+ len = apei_estatus_len(estatus);
+ rcu_read_lock();
+ for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
+ cache = rcu_dereference(ghes_estatus_caches[i]);
+ if (cache == NULL)
+ continue;
+ if (len != cache->estatus_len)
+ continue;
+ cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
+ if (memcmp(estatus, cache_estatus, len))
+ continue;
+ atomic_inc(&cache->count);
+ now = sched_clock();
+ if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
+ cached = 1;
+ break;
+ }
+ rcu_read_unlock();
+ return cached;
+}
+
+static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
+ struct acpi_hest_generic *generic,
+ struct acpi_hest_generic_status *estatus)
+{
+ int alloced;
+ u32 len, cache_len;
+ struct ghes_estatus_cache *cache;
+ struct acpi_hest_generic_status *cache_estatus;
+
+ alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
+ if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
+ atomic_dec(&ghes_estatus_cache_alloced);
+ return NULL;
+ }
+ len = apei_estatus_len(estatus);
+ cache_len = GHES_ESTATUS_CACHE_LEN(len);
+ cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
+ if (!cache) {
+ atomic_dec(&ghes_estatus_cache_alloced);
+ return NULL;
+ }
+ cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
+ memcpy(cache_estatus, estatus, len);
+ cache->estatus_len = len;
+ atomic_set(&cache->count, 0);
+ cache->generic = generic;
+ cache->time_in = sched_clock();
+ return cache;
+}
+
+static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache)
+{
+ u32 len;
+
+ len = apei_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
+ len = GHES_ESTATUS_CACHE_LEN(len);
+ gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
+ atomic_dec(&ghes_estatus_cache_alloced);
+}
+
+static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
+{
+ struct ghes_estatus_cache *cache;
+
+ cache = container_of(head, struct ghes_estatus_cache, rcu);
+ ghes_estatus_cache_free(cache);
+}
+
+static void ghes_estatus_cache_add(
+ struct acpi_hest_generic *generic,
+ struct acpi_hest_generic_status *estatus)
+{
+ int i, slot = -1, count;
+ unsigned long long now, duration, period, max_period = 0;
+ struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache;
+
+ new_cache = ghes_estatus_cache_alloc(generic, estatus);
+ if (new_cache == NULL)
+ return;
+ rcu_read_lock();
+ now = sched_clock();
+ for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
+ cache = rcu_dereference(ghes_estatus_caches[i]);
+ if (cache == NULL) {
+ slot = i;
+ slot_cache = NULL;
+ break;
+ }
+ duration = now - cache->time_in;
+ if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) {
+ slot = i;
+ slot_cache = cache;
+ break;
+ }
+ count = atomic_read(&cache->count);
+ period = duration;
+ do_div(period, (count + 1));
+ if (period > max_period) {
+ max_period = period;
+ slot = i;
+ slot_cache = cache;
+ }
+ }
+ /* new_cache must be put into array after its contents are written */
+ smp_wmb();
+ if (slot != -1 && cmpxchg(ghes_estatus_caches + slot,
+ slot_cache, new_cache) == slot_cache) {
+ if (slot_cache)
+ call_rcu(&slot_cache->rcu, ghes_estatus_cache_rcu_free);
+ } else
+ ghes_estatus_cache_free(new_cache);
+ rcu_read_unlock();
}
static int ghes_proc(struct ghes *ghes)
@@ -387,9 +648,11 @@ static int ghes_proc(struct ghes *ghes)
rc = ghes_read_estatus(ghes, 0);
if (rc)
goto out;
- ghes_print_estatus(NULL, ghes);
- ghes_do_proc(ghes);
-
+ if (!ghes_estatus_cached(ghes->estatus)) {
+ if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus))
+ ghes_estatus_cache_add(ghes->generic, ghes->estatus);
+ }
+ ghes_do_proc(ghes->estatus);
out:
ghes_clear_estatus(ghes);
return 0;
@@ -447,6 +710,45 @@ static int ghes_notify_sci(struct notifier_block *this,
return ret;
}
+static void ghes_proc_in_irq(struct irq_work *irq_work)
+{
+ struct llist_node *llnode, *next, *tail = NULL;
+ struct ghes_estatus_node *estatus_node;
+ struct acpi_hest_generic *generic;
+ struct acpi_hest_generic_status *estatus;
+ u32 len, node_len;
+
+ /*
+ * Because the time order of estatus in list is reversed,
+ * revert it back to proper order.
+ */
+ llnode = llist_del_all(&ghes_estatus_llist);
+ while (llnode) {
+ next = llnode->next;
+ llnode->next = tail;
+ tail = llnode;
+ llnode = next;
+ }
+ llnode = tail;
+ while (llnode) {
+ next = llnode->next;
+ estatus_node = llist_entry(llnode, struct ghes_estatus_node,
+ llnode);
+ estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
+ len = apei_estatus_len(estatus);
+ node_len = GHES_ESTATUS_NODE_LEN(len);
+ ghes_do_proc(estatus);
+ if (!ghes_estatus_cached(estatus)) {
+ generic = estatus_node->generic;
+ if (ghes_print_estatus(NULL, generic, estatus))
+ ghes_estatus_cache_add(generic, estatus);
+ }
+ gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
+ node_len);
+ llnode = next;
+ }
+}
+
static int ghes_notify_nmi(struct notifier_block *this,
unsigned long cmd, void *data)
{
@@ -476,7 +778,8 @@ static int ghes_notify_nmi(struct notifier_block *this,
if (sev_global >= GHES_SEV_PANIC) {
oops_begin();
- ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global);
+ __ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global->generic,
+ ghes_global->estatus);
/* reboot to log the error! */
if (panic_timeout == 0)
panic_timeout = ghes_panic_timeout;
@@ -484,12 +787,34 @@ static int ghes_notify_nmi(struct notifier_block *this,
}
list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
+#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ u32 len, node_len;
+ struct ghes_estatus_node *estatus_node;
+ struct acpi_hest_generic_status *estatus;
+#endif
if (!(ghes->flags & GHES_TO_CLEAR))
continue;
- /* Do not print estatus because printk is not NMI safe */
- ghes_do_proc(ghes);
+#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ if (ghes_estatus_cached(ghes->estatus))
+ goto next;
+ /* Save estatus for further processing in IRQ context */
+ len = apei_estatus_len(ghes->estatus);
+ node_len = GHES_ESTATUS_NODE_LEN(len);
+ estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool,
+ node_len);
+ if (estatus_node) {
+ estatus_node->generic = ghes->generic;
+ estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
+ memcpy(estatus, ghes->estatus, len);
+ llist_add(&estatus_node->llnode, &ghes_estatus_llist);
+ }
+next:
+#endif
ghes_clear_estatus(ghes);
}
+#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ irq_work_queue(&ghes_proc_irq_work);
+#endif
out:
raw_spin_unlock(&ghes_nmi_lock);
@@ -504,10 +829,26 @@ static struct notifier_block ghes_notifier_nmi = {
.notifier_call = ghes_notify_nmi,
};
+static unsigned long ghes_esource_prealloc_size(
+ const struct acpi_hest_generic *generic)
+{
+ unsigned long block_length, prealloc_records, prealloc_size;
+
+ block_length = min_t(unsigned long, generic->error_block_length,
+ GHES_ESTATUS_MAX_SIZE);
+ prealloc_records = max_t(unsigned long,
+ generic->records_to_preallocate, 1);
+ prealloc_size = min_t(unsigned long, block_length * prealloc_records,
+ GHES_ESOURCE_PREALLOC_MAX_SIZE);
+
+ return prealloc_size;
+}
+
static int __devinit ghes_probe(struct platform_device *ghes_dev)
{
struct acpi_hest_generic *generic;
struct ghes *ghes = NULL;
+ unsigned long len;
int rc = -EINVAL;
generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
@@ -573,6 +914,8 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
mutex_unlock(&ghes_list_mutex);
break;
case ACPI_HEST_NOTIFY_NMI:
+ len = ghes_esource_prealloc_size(generic);
+ ghes_estatus_pool_expand(len);
mutex_lock(&ghes_list_mutex);
if (list_empty(&ghes_nmi))
register_die_notifier(&ghes_notifier_nmi);
@@ -597,6 +940,7 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
{
struct ghes *ghes;
struct acpi_hest_generic *generic;
+ unsigned long len;
ghes = platform_get_drvdata(ghes_dev);
generic = ghes->generic;
@@ -627,6 +971,8 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
* freed after NMI handler finishes.
*/
synchronize_rcu();
+ len = ghes_esource_prealloc_size(generic);
+ ghes_estatus_pool_shrink(len);
break;
default:
BUG();
@@ -662,15 +1008,43 @@ static int __init ghes_init(void)
return -EINVAL;
}
+ if (ghes_disable) {
+ pr_info(GHES_PFX "GHES is not enabled!\n");
+ return -EINVAL;
+ }
+
+ init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
+
rc = ghes_ioremap_init();
if (rc)
goto err;
- rc = platform_driver_register(&ghes_platform_driver);
+ rc = ghes_estatus_pool_init();
if (rc)
goto err_ioremap_exit;
+ rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE *
+ GHES_ESTATUS_CACHE_ALLOCED_MAX);
+ if (rc)
+ goto err_pool_exit;
+
+ rc = platform_driver_register(&ghes_platform_driver);
+ if (rc)
+ goto err_pool_exit;
+
+ rc = apei_osc_setup();
+ if (rc == 0 && osc_sb_apei_support_acked)
+ pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
+ else if (rc == 0 && !osc_sb_apei_support_acked)
+ pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
+ else if (rc && osc_sb_apei_support_acked)
+ pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
+ else
+ pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
+
return 0;
+err_pool_exit:
+ ghes_estatus_pool_exit();
err_ioremap_exit:
ghes_ioremap_exit();
err:
@@ -680,6 +1054,7 @@ err:
static void __exit ghes_exit(void)
{
platform_driver_unregister(&ghes_platform_driver);
+ ghes_estatus_pool_exit();
ghes_ioremap_exit();
}
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 181bc2f..05fee06 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -231,16 +231,17 @@ void __init acpi_hest_init(void)
goto err;
}
- rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count);
- if (rc)
- goto err;
-
- rc = hest_ghes_dev_register(ghes_count);
- if (!rc) {
- pr_info(HEST_PFX "Table parsing has been initialized.\n");
- return;
+ if (!ghes_disable) {
+ rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count);
+ if (rc)
+ goto err;
+ rc = hest_ghes_dev_register(ghes_count);
+ if (rc)
+ goto err;
}
+ pr_info(HEST_PFX "Table parsing has been initialized.\n");
+ return;
err:
hest_disable = 1;
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 2c66135..7711d94 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -55,6 +55,9 @@
#define ACPI_BATTERY_NOTIFY_INFO 0x81
#define ACPI_BATTERY_NOTIFY_THRESHOLD 0x82
+/* Battery power unit: 0 means mW, 1 means mA */
+#define ACPI_BATTERY_POWER_UNIT_MA 1
+
#define _COMPONENT ACPI_BATTERY_COMPONENT
ACPI_MODULE_NAME("battery");
@@ -91,16 +94,12 @@ MODULE_DEVICE_TABLE(acpi, battery_device_ids);
enum {
ACPI_BATTERY_ALARM_PRESENT,
ACPI_BATTERY_XINFO_PRESENT,
- /* For buggy DSDTs that report negative 16-bit values for either
- * charging or discharging current and/or report 0 as 65536
- * due to bad math.
- */
- ACPI_BATTERY_QUIRK_SIGNED16_CURRENT,
ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY,
};
struct acpi_battery {
struct mutex lock;
+ struct mutex sysfs_lock;
struct power_supply bat;
struct acpi_device *device;
struct notifier_block pm_nb;
@@ -301,7 +300,8 @@ static enum power_supply_property energy_battery_props[] = {
#ifdef CONFIG_ACPI_PROCFS_POWER
inline char *acpi_battery_units(struct acpi_battery *battery)
{
- return (battery->power_unit)?"mA":"mW";
+ return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
+ "mA" : "mW";
}
#endif
@@ -461,9 +461,17 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
battery->update_time = jiffies;
kfree(buffer.pointer);
- if (test_bit(ACPI_BATTERY_QUIRK_SIGNED16_CURRENT, &battery->flags) &&
- battery->rate_now != -1)
+ /* For buggy DSDTs that report negative 16-bit values for either
+ * charging or discharging current and/or report 0 as 65536
+ * due to bad math.
+ */
+ if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA &&
+ battery->rate_now != ACPI_BATTERY_VALUE_UNKNOWN &&
+ (s16)(battery->rate_now) < 0) {
battery->rate_now = abs((s16)battery->rate_now);
+ printk_once(KERN_WARNING FW_BUG "battery: (dis)charge rate"
+ " invalid.\n");
+ }
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
&& battery->capacity_now >= 0 && battery->capacity_now <= 100)
@@ -544,7 +552,7 @@ static int sysfs_add_battery(struct acpi_battery *battery)
{
int result;
- if (battery->power_unit) {
+ if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) {
battery->bat.properties = charge_battery_props;
battery->bat.num_properties =
ARRAY_SIZE(charge_battery_props);
@@ -566,18 +574,16 @@ static int sysfs_add_battery(struct acpi_battery *battery)
static void sysfs_remove_battery(struct acpi_battery *battery)
{
- if (!battery->bat.dev)
+ mutex_lock(&battery->sysfs_lock);
+ if (!battery->bat.dev) {
+ mutex_unlock(&battery->sysfs_lock);
return;
+ }
+
device_remove_file(battery->bat.dev, &alarm_attr);
power_supply_unregister(&battery->bat);
battery->bat.dev = NULL;
-}
-
-static void acpi_battery_quirks(struct acpi_battery *battery)
-{
- if (dmi_name_in_vendors("Acer") && battery->power_unit) {
- set_bit(ACPI_BATTERY_QUIRK_SIGNED16_CURRENT, &battery->flags);
- }
+ mutex_unlock(&battery->sysfs_lock);
}
/*
@@ -592,7 +598,7 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
*
* Handle this correctly so that they won't break userspace.
*/
-static void acpi_battery_quirks2(struct acpi_battery *battery)
+static void acpi_battery_quirks(struct acpi_battery *battery)
{
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
return ;
@@ -623,13 +629,15 @@ static int acpi_battery_update(struct acpi_battery *battery)
result = acpi_battery_get_info(battery);
if (result)
return result;
- acpi_battery_quirks(battery);
acpi_battery_init_alarm(battery);
}
- if (!battery->bat.dev)
- sysfs_add_battery(battery);
+ if (!battery->bat.dev) {
+ result = sysfs_add_battery(battery);
+ if (result)
+ return result;
+ }
result = acpi_battery_get_state(battery);
- acpi_battery_quirks2(battery);
+ acpi_battery_quirks(battery);
return result;
}
@@ -863,7 +871,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
}, \
}
-static struct battery_file {
+static const struct battery_file {
struct file_operations ops;
mode_t mode;
const char *name;
@@ -948,9 +956,12 @@ static int battery_notify(struct notifier_block *nb,
struct acpi_battery *battery = container_of(nb, struct acpi_battery,
pm_nb);
switch (mode) {
+ case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
- sysfs_remove_battery(battery);
- sysfs_add_battery(battery);
+ if (battery->bat.dev) {
+ sysfs_remove_battery(battery);
+ sysfs_add_battery(battery);
+ }
break;
}
@@ -972,28 +983,38 @@ static int acpi_battery_add(struct acpi_device *device)
strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
device->driver_data = battery;
mutex_init(&battery->lock);
+ mutex_init(&battery->sysfs_lock);
if (ACPI_SUCCESS(acpi_get_handle(battery->device->handle,
"_BIX", &handle)))
set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
- acpi_battery_update(battery);
+ result = acpi_battery_update(battery);
+ if (result)
+ goto fail;
#ifdef CONFIG_ACPI_PROCFS_POWER
result = acpi_battery_add_fs(device);
#endif
- if (!result) {
- printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
- ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
- device->status.battery_present ? "present" : "absent");
- } else {
+ if (result) {
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_remove_fs(device);
#endif
- kfree(battery);
+ goto fail;
}
+ printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
+ ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
+ device->status.battery_present ? "present" : "absent");
+
battery->pm_nb.notifier_call = battery_notify;
register_pm_notifier(&battery->pm_nb);
return result;
+
+fail:
+ sysfs_remove_battery(battery);
+ mutex_destroy(&battery->lock);
+ mutex_destroy(&battery->sysfs_lock);
+ kfree(battery);
+ return result;
}
static int acpi_battery_remove(struct acpi_device *device, int type)
@@ -1009,6 +1030,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
#endif
sysfs_remove_battery(battery);
mutex_destroy(&battery->lock);
+ mutex_destroy(&battery->sysfs_lock);
kfree(battery);
return 0;
}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index d1e06c1..437ddbf 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -39,6 +39,7 @@
#include <linux/pci.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#include <acpi/apei.h>
#include <linux/dmi.h>
#include <linux/suspend.h>
@@ -519,6 +520,7 @@ out_kfree:
}
EXPORT_SYMBOL(acpi_run_osc);
+bool osc_sb_apei_support_acked;
static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
static void acpi_bus_osc_support(void)
{
@@ -541,11 +543,19 @@ static void acpi_bus_osc_support(void)
#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT;
#endif
+
+ if (!ghes_disable)
+ capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_APEI_SUPPORT;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
return;
- if (ACPI_SUCCESS(acpi_run_osc(handle, &context)))
+ if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) {
+ u32 *capbuf_ret = context.ret.pointer;
+ if (context.ret.length > OSC_SUPPORT_TYPE)
+ osc_sb_apei_support_acked =
+ capbuf_ret[OSC_SUPPORT_TYPE] & OSC_SB_APEI_SUPPORT;
kfree(context.ret.pointer);
- /* do we need to check the returned cap? Sounds no */
+ }
+ /* do we need to check other returned cap? Sounds no */
}
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 1864ad3..19a6113 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -77,7 +77,7 @@ struct dock_dependent_device {
struct list_head list;
struct list_head hotplug_list;
acpi_handle handle;
- struct acpi_dock_ops *ops;
+ const struct acpi_dock_ops *ops;
void *context;
};
@@ -589,7 +589,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
* the dock driver after _DCK is executed.
*/
int
-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
void *context)
{
struct dock_dependent_device *dd;
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index 05b4420..22f918b 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -92,7 +92,7 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
return count;
}
-static struct file_operations acpi_ec_io_ops = {
+static const struct file_operations acpi_ec_io_ops = {
.owner = THIS_MODULE,
.open = acpi_ec_open_io,
.read = acpi_ec_read_io,
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 467479f..0f0356c 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -110,7 +110,7 @@ fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
return result;
}
-static struct thermal_cooling_device_ops fan_cooling_ops = {
+static const struct thermal_cooling_device_ops fan_cooling_ops = {
.get_max_state = fan_get_max_state,
.get_cur_state = fan_get_cur_state,
.set_cur_state = fan_set_cur_state,
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 372f9b7..fa32f58 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -155,7 +155,7 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
{
if (!strcmp("Linux", interface)) {
- printk(KERN_NOTICE FW_BUG PREFIX
+ printk_once(KERN_NOTICE FW_BUG PREFIX
"BIOS _OSI(Linux) query %s%s\n",
osi_linux.enable ? "honored" : "ignored",
osi_linux.cmdline ? " via cmdline" :
@@ -237,8 +237,23 @@ void acpi_os_vprintf(const char *fmt, va_list args)
#endif
}
+#ifdef CONFIG_KEXEC
+static unsigned long acpi_rsdp;
+static int __init setup_acpi_rsdp(char *arg)
+{
+ acpi_rsdp = simple_strtoul(arg, NULL, 16);
+ return 0;
+}
+early_param("acpi_rsdp", setup_acpi_rsdp);
+#endif
+
acpi_physical_address __init acpi_os_get_root_pointer(void)
{
+#ifdef CONFIG_KEXEC
+ if (acpi_rsdp)
+ return acpi_rsdp;
+#endif
+
if (efi_enabled) {
if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
return efi.acpi20;
@@ -1083,7 +1098,13 @@ struct osi_setup_entry {
bool enable;
};
-static struct osi_setup_entry __initdata osi_setup_entries[OSI_STRING_ENTRIES_MAX];
+static struct osi_setup_entry __initdata
+ osi_setup_entries[OSI_STRING_ENTRIES_MAX] = {
+ {"Module Device", true},
+ {"Processor Device", true},
+ {"3.0 _SCP Extensions", true},
+ {"Processor Aggregator Device", true},
+};
void __init acpi_osi_setup(char *str)
{
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index f907cfb..7f9eba9 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -303,6 +303,61 @@ void acpi_pci_irq_del_prt(struct pci_bus *bus)
/* --------------------------------------------------------------------------
PCI Interrupt Routing Support
-------------------------------------------------------------------------- */
+#ifdef CONFIG_X86_IO_APIC
+extern int noioapicquirk;
+extern int noioapicreroute;
+
+static int bridge_has_boot_interrupt_variant(struct pci_bus *bus)
+{
+ struct pci_bus *bus_it;
+
+ for (bus_it = bus ; bus_it ; bus_it = bus_it->parent) {
+ if (!bus_it->self)
+ return 0;
+ if (bus_it->self->irq_reroute_variant)
+ return bus_it->self->irq_reroute_variant;
+ }
+ return 0;
+}
+
+/*
+ * Some chipsets (e.g. Intel 6700PXH) generate a legacy INTx when the IRQ
+ * entry in the chipset's IO-APIC is masked (as, e.g. the RT kernel does
+ * during interrupt handling). When this INTx generation cannot be disabled,
+ * we reroute these interrupts to their legacy equivalent to get rid of
+ * spurious interrupts.
+ */
+static int acpi_reroute_boot_interrupt(struct pci_dev *dev,
+ struct acpi_prt_entry *entry)
+{
+ if (noioapicquirk || noioapicreroute) {
+ return 0;
+ } else {
+ switch (bridge_has_boot_interrupt_variant(dev->bus)) {
+ case 0:
+ /* no rerouting necessary */
+ return 0;
+ case INTEL_IRQ_REROUTE_VARIANT:
+ /*
+ * Remap according to INTx routing table in 6700PXH
+ * specs, intel order number 302628-002, section
+ * 2.15.2. Other chipsets (80332, ...) have the same
+ * mapping and are handled here as well.
+ */
+ dev_info(&dev->dev, "PCI IRQ %d -> rerouted to legacy "
+ "IRQ %d\n", entry->index,
+ (entry->index % 4) + 16);
+ entry->index = (entry->index % 4) + 16;
+ return 1;
+ default:
+ dev_warn(&dev->dev, "Cannot reroute IRQ %d to legacy "
+ "IRQ: unknown mapping\n", entry->index);
+ return -1;
+ }
+ }
+}
+#endif /* CONFIG_X86_IO_APIC */
+
static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
{
struct acpi_prt_entry *entry;
@@ -311,6 +366,9 @@ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
entry = acpi_pci_irq_find_prt_entry(dev, pin);
if (entry) {
+#ifdef CONFIG_X86_IO_APIC
+ acpi_reroute_boot_interrupt(dev, entry);
+#endif /* CONFIG_X86_IO_APIC */
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %s[%c] _PRT entry\n",
pci_name(dev), pin_name(pin)));
return entry;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d06078d..2672c79 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -485,7 +485,8 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
root->secondary.end = 0xFF;
printk(KERN_WARNING FW_BUG PREFIX
"no secondary bus range in _CRS\n");
- status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus);
+ status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN,
+ NULL, &bus);
if (ACPI_SUCCESS(status))
root->secondary.start = bus;
else if (status == AE_NOT_FOUND)
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 79cb653..870550d 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -244,7 +244,7 @@ processor_set_cur_state(struct thermal_cooling_device *cdev,
return result;
}
-struct thermal_cooling_device_ops processor_cooling_ops = {
+const struct thermal_cooling_device_ops processor_cooling_ops = {
.get_max_state = processor_get_max_state,
.get_cur_state = processor_get_cur_state,
.set_cur_state = processor_set_cur_state,
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 50658ff..6e36d0c 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -130,6 +130,9 @@ struct acpi_sbs {
#define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger)
+static int acpi_sbs_remove(struct acpi_device *device, int type);
+static int acpi_battery_get_state(struct acpi_battery *battery);
+
static inline int battery_scale(int log)
{
int scale = 1;
@@ -195,6 +198,8 @@ static int acpi_sbs_battery_get_property(struct power_supply *psy,
if ((!battery->present) && psp != POWER_SUPPLY_PROP_PRESENT)
return -ENODEV;
+
+ acpi_battery_get_state(battery);
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
if (battery->rate_now < 0)
@@ -225,11 +230,17 @@ static int acpi_sbs_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_POWER_NOW:
val->intval = abs(battery->rate_now) *
acpi_battery_ipscale(battery) * 1000;
+ val->intval *= (acpi_battery_mode(battery)) ?
+ (battery->voltage_now *
+ acpi_battery_vscale(battery) / 1000) : 1;
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
case POWER_SUPPLY_PROP_POWER_AVG:
val->intval = abs(battery->rate_avg) *
acpi_battery_ipscale(battery) * 1000;
+ val->intval *= (acpi_battery_mode(battery)) ?
+ (battery->voltage_now *
+ acpi_battery_vscale(battery) / 1000) : 1;
break;
case POWER_SUPPLY_PROP_CAPACITY:
val->intval = battery->state_of_charge;
@@ -903,8 +914,6 @@ static void acpi_sbs_callback(void *context)
}
}
-static int acpi_sbs_remove(struct acpi_device *device, int type);
-
static int acpi_sbs_add(struct acpi_device *device)
{
struct acpi_sbs *sbs;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6c94960..3ed80b2 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -428,6 +428,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
},
},
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "Asus A8N-SLI DELUXE",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
+ },
+ },
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "Asus A8N-SLI Premium",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
+ },
+ },
{},
};
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 77255f2..c538d0e 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -149,12 +149,12 @@ static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
return result;
}
-static struct kernel_param_ops param_ops_debug_layer = {
+static const struct kernel_param_ops param_ops_debug_layer = {
.set = param_set_uint,
.get = param_get_debug_layer,
};
-static struct kernel_param_ops param_ops_debug_level = {
+static const struct kernel_param_ops param_ops_debug_level = {
.set = param_set_uint,
.get = param_get_debug_level,
};
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 2607e17..48fbc64 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -812,7 +812,7 @@ acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal,
thermal_zone_unbind_cooling_device);
}
-static struct thermal_zone_device_ops acpi_thermal_zone_ops = {
+static const struct thermal_zone_device_ops acpi_thermal_zone_ops = {
.bind = acpi_thermal_bind_cooling_device,
.unbind = acpi_thermal_unbind_cooling_device,
.get_temp = thermal_get_temp,
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index db39e9e..08a44b5 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -46,7 +46,6 @@
#define PREFIX "ACPI: "
-#define ACPI_VIDEO_CLASS "video"
#define ACPI_VIDEO_BUS_NAME "Video Bus"
#define ACPI_VIDEO_DEVICE_NAME "Video Device"
#define ACPI_VIDEO_NOTIFY_SWITCH 0x80
@@ -308,7 +307,7 @@ video_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long st
return acpi_video_device_lcd_set_level(video, level);
}
-static struct thermal_cooling_device_ops video_cooling_ops = {
+static const struct thermal_cooling_device_ops video_cooling_ops = {
.get_max_state = video_get_max_state,
.get_cur_state = video_get_cur_state,
.set_cur_state = video_set_cur_state,
@@ -1445,7 +1444,8 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
case ACPI_VIDEO_NOTIFY_SWITCH: /* User requested a switch,
* most likely via hotkey. */
acpi_bus_generate_proc_event(device, event, 0);
- keycode = KEY_SWITCHVIDEOMODE;
+ if (!acpi_notifier_call_chain(device, event, 0))
+ keycode = KEY_SWITCHVIDEOMODE;
break;
case ACPI_VIDEO_NOTIFY_PROBE: /* User plugged in or removed a video
@@ -1475,7 +1475,8 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
break;
}
- acpi_notifier_call_chain(device, event, 0);
+ if (event != ACPI_VIDEO_NOTIFY_SWITCH)
+ acpi_notifier_call_chain(device, event, 0);
if (keycode) {
input_report_key(input, keycode, 1);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index ca3e6be..5987e0b 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -468,6 +468,15 @@ config PATA_ICSIDE
interface card. This is not required for ICS partition support.
If you are unsure, say N to this.
+config PATA_IMX
+ tristate "PATA support for Freescale iMX"
+ depends on ARCH_MXC
+ help
+ This option enables support for the PATA host available on Freescale
+ iMX SoCs.
+
+ If unsure, say N.
+
config PATA_IT8213
tristate "IT8213 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 8ac64e1..9550d69 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o
obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o
obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o
obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
+obj-$(CONFIG_PATA_IMX) += pata_imx.o
obj-$(CONFIG_PATA_IT8213) += pata_it8213.o
obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index e0a5b55..bb7c5f1 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -218,12 +218,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
ata_acpi_uevent(dev->link->ap, dev, event);
}
-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
.handler = ata_acpi_dev_notify_dock,
.uevent = ata_acpi_dev_uevent,
};
-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
.handler = ata_acpi_ap_notify_dock,
.uevent = ata_acpi_ap_uevent,
};
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
new file mode 100644
index 0000000..ca9d9ca
--- /dev/null
+++ b/drivers/ata/pata_imx.c
@@ -0,0 +1,253 @@
+/*
+ * Freescale iMX PATA driver
+ *
+ * Copyright (C) 2011 Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ * Based on pata_platform - Copyright (C) 2006 - 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * TODO:
+ * - dmaengine support
+ * - check if timing stuff needed
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#define DRV_NAME "pata_imx"
+
+#define PATA_IMX_ATA_CONTROL 0x24
+#define PATA_IMX_ATA_CTRL_FIFO_RST_B (1<<7)
+#define PATA_IMX_ATA_CTRL_ATA_RST_B (1<<6)
+#define PATA_IMX_ATA_CTRL_IORDY_EN (1<<0)
+#define PATA_IMX_ATA_INT_EN 0x2C
+#define PATA_IMX_ATA_INTR_ATA_INTRQ2 (1<<3)
+#define PATA_IMX_DRIVE_DATA 0xA0
+#define PATA_IMX_DRIVE_CONTROL 0xD8
+
+struct pata_imx_priv {
+ struct clk *clk;
+ /* timings/interrupt/control regs */
+ u8 *host_regs;
+ u32 ata_ctl;
+};
+
+static int pata_imx_set_mode(struct ata_link *link, struct ata_device **unused)
+{
+ struct ata_device *dev;
+ struct ata_port *ap = link->ap;
+ struct pata_imx_priv *priv = ap->host->private_data;
+ u32 val;
+
+ ata_for_each_dev(dev, link, ENABLED) {
+ dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
+ dev->xfer_shift = ATA_SHIFT_PIO;
+ dev->flags |= ATA_DFLAG_PIO;
+
+ val = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
+ if (ata_pio_need_iordy(dev))
+ val |= PATA_IMX_ATA_CTRL_IORDY_EN;
+ else
+ val &= ~PATA_IMX_ATA_CTRL_IORDY_EN;
+ __raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL);
+
+ ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+ }
+ return 0;
+}
+
+static struct scsi_host_template pata_imx_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations pata_imx_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .cable_detect = ata_cable_unknown,
+ .set_mode = pata_imx_set_mode,
+};
+
+static void pata_imx_setup_port(struct ata_ioports *ioaddr)
+{
+ /* Fixup the port shift for platforms that need it */
+ ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << 2);
+ ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << 2);
+ ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << 2);
+ ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << 2);
+ ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << 2);
+ ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << 2);
+ ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << 2);
+ ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << 2);
+ ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << 2);
+ ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2);
+}
+
+static int __devinit pata_imx_probe(struct platform_device *pdev)
+{
+ struct ata_host *host;
+ struct ata_port *ap;
+ struct pata_imx_priv *priv;
+ int irq = 0;
+ struct resource *io_res;
+
+ io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (io_res == NULL)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&pdev->dev,
+ sizeof(struct pata_imx_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "Failed to get clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ clk_enable(priv->clk);
+
+ host = ata_host_alloc(&pdev->dev, 1);
+ if (!host)
+ goto free_priv;
+
+ host->private_data = priv;
+ ap = host->ports[0];
+
+ ap->ops = &pata_imx_port_ops;
+ ap->pio_mask = ATA_PIO0;
+ ap->flags |= ATA_FLAG_SLAVE_POSS;
+
+ priv->host_regs = devm_ioremap(&pdev->dev, io_res->start,
+ resource_size(io_res));
+ if (!priv->host_regs) {
+ dev_err(&pdev->dev, "failed to map IO/CTL base\n");
+ goto free_priv;
+ }
+
+ ap->ioaddr.cmd_addr = priv->host_regs + PATA_IMX_DRIVE_DATA;
+ ap->ioaddr.ctl_addr = priv->host_regs + PATA_IMX_DRIVE_CONTROL;
+
+ ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
+
+ pata_imx_setup_port(&ap->ioaddr);
+
+ ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
+ (unsigned long long)io_res->start + PATA_IMX_DRIVE_DATA,
+ (unsigned long long)io_res->start + PATA_IMX_DRIVE_CONTROL);
+
+ /* deassert resets */
+ __raw_writel(PATA_IMX_ATA_CTRL_FIFO_RST_B |
+ PATA_IMX_ATA_CTRL_ATA_RST_B,
+ priv->host_regs + PATA_IMX_ATA_CONTROL);
+ /* enable interrupts */
+ __raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2,
+ priv->host_regs + PATA_IMX_ATA_INT_EN);
+
+ /* activate */
+ return ata_host_activate(host, irq, ata_sff_interrupt, 0,
+ &pata_imx_sht);
+
+free_priv:
+ clk_disable(priv->clk);
+ clk_put(priv->clk);
+ return -ENOMEM;
+}
+
+static int __devexit pata_imx_remove(struct platform_device *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct pata_imx_priv *priv = host->private_data;
+
+ ata_host_detach(host);
+
+ __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
+
+ clk_disable(priv->clk);
+ clk_put(priv->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int pata_imx_suspend(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct pata_imx_priv *priv = host->private_data;
+ int ret;
+
+ ret = ata_host_suspend(host, PMSG_SUSPEND);
+ if (!ret) {
+ __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
+ priv->ata_ctl =
+ __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
+ clk_disable(priv->clk);
+ }
+
+ return ret;
+}
+
+static int pata_imx_resume(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct pata_imx_priv *priv = host->private_data;
+
+ clk_enable(priv->clk);
+
+ __raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL);
+
+ __raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2,
+ priv->host_regs + PATA_IMX_ATA_INT_EN);
+
+ ata_host_resume(host);
+
+ return 0;
+}
+
+static const struct dev_pm_ops pata_imx_pm_ops = {
+ .suspend = pata_imx_suspend,
+ .resume = pata_imx_resume,
+};
+#endif
+
+static struct platform_driver pata_imx_driver = {
+ .probe = pata_imx_probe,
+ .remove = __devexit_p(pata_imx_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &pata_imx_pm_ops,
+#endif
+ },
+};
+
+static int __init pata_imx_init(void)
+{
+ return platform_driver_register(&pata_imx_driver);
+}
+
+static void __exit pata_imx_exit(void)
+{
+ platform_driver_unregister(&pata_imx_driver);
+}
+module_init(pata_imx_init);
+module_exit(pata_imx_exit);
+
+MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
+MODULE_DESCRIPTION("low-level driver for iMX PATA");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 65e4be6..8e9f504 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -124,6 +124,17 @@ static const struct via_isa_bridge {
{ NULL }
};
+static const struct dmi_system_id no_atapi_dma_dmi_table[] = {
+ {
+ .ident = "AVERATEC 3200",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AVERATEC"),
+ DMI_MATCH(DMI_BOARD_NAME, "3200"),
+ },
+ },
+ { }
+};
+
struct via_port {
u8 cached_device;
};
@@ -355,6 +366,13 @@ static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask)
mask &= ~ ATA_MASK_UDMA;
}
}
+
+ if (dev->class == ATA_DEV_ATAPI &&
+ dmi_check_system(no_atapi_dma_dmi_table)) {
+ ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n");
+ mask &= ATA_MASK_PIO;
+ }
+
return mask;
}
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 0a9a774..5c42374 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1329,7 +1329,7 @@ static int sata_dwc_port_start(struct ata_port *ap)
dev_err(ap->dev, "%s: dma_alloc_coherent failed\n",
__func__);
err = -ENOMEM;
- goto CLEANUP;
+ goto CLEANUP_ALLOC;
}
}
@@ -1349,15 +1349,13 @@ static int sata_dwc_port_start(struct ata_port *ap)
/* Clear any error bits before libata starts issuing commands */
clear_serror();
ap->private_data = hsdevp;
+ dev_dbg(ap->dev, "%s: done\n", __func__);
+ return 0;
+CLEANUP_ALLOC:
+ kfree(hsdevp);
CLEANUP:
- if (err) {
- sata_dwc_port_stop(ap);
- dev_dbg(ap->dev, "%s: fail\n", __func__);
- } else {
- dev_dbg(ap->dev, "%s: done\n", __func__);
- }
-
+ dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
return err;
}
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 98c1d78..9dfb40b 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -438,7 +438,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
u8 status;
if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
- u32 serror;
+ u32 serror = 0xffffffff;
/* SIEN doesn't mask SATA IRQs on some 3112s. Those
* controllers continue to assert IRQ as long as
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index bb3b016..f8f41e0 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -38,7 +38,7 @@
#include <linux/ihex.h>
#include <linux/slab.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/byteorder.h>
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index 0b06250..b22d71c 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -11,7 +11,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
extern int atm_init_aal5(struct atm_vcc *vcc); /* "raw" AAL5 transport */
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 3230ea0..9307141 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -21,7 +21,7 @@
#include <linux/slab.h>
#include <asm/system.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <asm/string.h>
#include <asm/byteorder.h>
diff --git a/drivers/atm/eni.h b/drivers/atm/eni.h
index 493a693..dc9a62c 100644
--- a/drivers/atm/eni.h
+++ b/drivers/atm/eni.h
@@ -14,7 +14,7 @@
#include <linux/time.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "midway.h"
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 7c7b571..5072f8a 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -52,7 +52,7 @@
#include <asm/system.h>
#include <asm/string.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <linux/wait.h>
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index bc9e702..361f5ae 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -44,7 +44,7 @@
#include <asm/dma.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#ifdef CONFIG_SBUS
#include <linux/of.h>
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 2875061..b812103 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -45,7 +45,7 @@
#include <asm/system.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <asm/string.h>
#include <asm/byteorder.h>
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 4764853..1c05212 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -46,7 +46,7 @@
#include <asm/io.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/byteorder.h>
#ifdef CONFIG_ATM_IDT77252_USE_SUNI
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 957106f..cb90f7a 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -58,7 +58,7 @@
#include <linux/slab.h>
#include <asm/system.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <asm/string.h>
#include <asm/byteorder.h>
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 6b313ee..1c70c45 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -51,7 +51,7 @@
#include <linux/idr.h>
#include <asm/io.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "nicstar.h"
#ifdef CONFIG_ATM_NICSTAR_USE_SUNI
#include "suni.h"
diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
index 41c56ea..90f1ccc 100644
--- a/drivers/atm/suni.c
+++ b/drivers/atm/suni.c
@@ -25,7 +25,7 @@
#include <asm/system.h>
#include <asm/param.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "suni.h"
diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
index c45ae05..5120a96 100644
--- a/drivers/atm/uPD98402.c
+++ b/drivers/atm/uPD98402.c
@@ -11,7 +11,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "uPD98402.h"
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 7f8c513..d889f56 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -27,7 +27,7 @@
#include <asm/system.h>
#include <asm/string.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include "uPD98401.h"
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index cf7a0c7..65cd748 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -397,6 +397,7 @@ static int remove_nodes(struct device *dev,
static int release_nodes(struct device *dev, struct list_head *first,
struct list_head *end, unsigned long flags)
+ __releases(&dev->devres_lock)
{
LIST_HEAD(todo);
int cnt;
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index b89fffc..a4760e0 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -166,7 +166,7 @@ static int create_path(const char *nodepath)
{
char *path;
char *s;
- int err;
+ int err = 0;
/* parent directories do not exist, create them */
path = kstrdup(nodepath, GFP_KERNEL);
@@ -376,7 +376,7 @@ int devtmpfs_mount(const char *mntdir)
return err;
}
-static __initdata DECLARE_COMPLETION(setup_done);
+static DECLARE_COMPLETION(setup_done);
static int handle(const char *name, mode_t mode, struct device *dev)
{
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index bbb03e6..06ed6b4 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -521,11 +521,6 @@ static int _request_firmware(const struct firmware **firmware_p,
if (!firmware_p)
return -EINVAL;
- if (WARN_ON(usermodehelper_is_disabled())) {
- dev_err(device, "firmware: %s will not be loaded\n", name);
- return -EBUSY;
- }
-
*firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
if (!firmware) {
dev_err(device, "%s: kmalloc(struct firmware) failed\n",
@@ -539,6 +534,12 @@ static int _request_firmware(const struct firmware **firmware_p,
return 0;
}
+ if (WARN_ON(usermodehelper_is_disabled())) {
+ dev_err(device, "firmware: %s will not be loaded\n", name);
+ retval = -EBUSY;
+ goto out;
+ }
+
if (uevent)
dev_dbg(device, "firmware: requesting %s\n", name);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 45d7c8f..2840ed4 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -24,7 +24,7 @@
#include <linux/stat.h>
#include <linux/slab.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
static DEFINE_MUTEX(mem_sysfs_mutex);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 0cad9c7..99a5272 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(platform_bus);
/**
* arch_setup_pdev_archdata - Allow manipulation of archdata before its used
- * @dev: platform device
+ * @pdev: platform device
*
* This is called before platform_device_add() such that any pdev_archdata may
* be setup before the platform_notifier is called. So if a user needs to
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index a846b2f..2c18d58 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -19,7 +19,7 @@
struct pm_clk_data {
struct list_head clock_list;
- struct mutex lock;
+ spinlock_t lock;
};
enum pce_status {
@@ -73,9 +73,9 @@ int pm_clk_add(struct device *dev, const char *con_id)
}
}
- mutex_lock(&pcd->lock);
+ spin_lock_irq(&pcd->lock);
list_add_tail(&ce->node, &pcd->clock_list);
- mutex_unlock(&pcd->lock);
+ spin_unlock_irq(&pcd->lock);
return 0;
}
@@ -83,8 +83,8 @@ int pm_clk_add(struct device *dev, const char *con_id)
* __pm_clk_remove - Destroy PM clock entry.
* @ce: PM clock entry to destroy.
*
- * This routine must be called under the mutex protecting the PM list of clocks
- * corresponding the the @ce's device.
+ * This routine must be called under the spinlock protecting the PM list of
+ * clocks corresponding the the @ce's device.
*/
static void __pm_clk_remove(struct pm_clock_entry *ce)
{
@@ -123,7 +123,7 @@ void pm_clk_remove(struct device *dev, const char *con_id)
if (!pcd)
return;
- mutex_lock(&pcd->lock);
+ spin_lock_irq(&pcd->lock);
list_for_each_entry(ce, &pcd->clock_list, node) {
if (!con_id && !ce->con_id) {
@@ -137,7 +137,7 @@ void pm_clk_remove(struct device *dev, const char *con_id)
}
}
- mutex_unlock(&pcd->lock);
+ spin_unlock_irq(&pcd->lock);
}
/**
@@ -158,7 +158,7 @@ int pm_clk_init(struct device *dev)
}
INIT_LIST_HEAD(&pcd->clock_list);
- mutex_init(&pcd->lock);
+ spin_lock_init(&pcd->lock);
dev->power.subsys_data = pcd;
return 0;
}
@@ -181,12 +181,12 @@ void pm_clk_destroy(struct device *dev)
dev->power.subsys_data = NULL;
- mutex_lock(&pcd->lock);
+ spin_lock_irq(&pcd->lock);
list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
__pm_clk_remove(ce);
- mutex_unlock(&pcd->lock);
+ spin_unlock_irq(&pcd->lock);
kfree(pcd);
}
@@ -220,13 +220,14 @@ int pm_clk_suspend(struct device *dev)
{
struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
+ unsigned long flags;
dev_dbg(dev, "%s()\n", __func__);
if (!pcd)
return 0;
- mutex_lock(&pcd->lock);
+ spin_lock_irqsave(&pcd->lock, flags);
list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
if (ce->status == PCE_STATUS_NONE)
@@ -238,7 +239,7 @@ int pm_clk_suspend(struct device *dev)
}
}
- mutex_unlock(&pcd->lock);
+ spin_unlock_irqrestore(&pcd->lock, flags);
return 0;
}
@@ -251,13 +252,14 @@ int pm_clk_resume(struct device *dev)
{
struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
+ unsigned long flags;
dev_dbg(dev, "%s()\n", __func__);
if (!pcd)
return 0;
- mutex_lock(&pcd->lock);
+ spin_lock_irqsave(&pcd->lock, flags);
list_for_each_entry(ce, &pcd->clock_list, node) {
if (ce->status == PCE_STATUS_NONE)
@@ -269,7 +271,7 @@ int pm_clk_resume(struct device *dev)
}
}
- mutex_unlock(&pcd->lock);
+ spin_unlock_irqrestore(&pcd->lock, flags);
return 0;
}
@@ -344,6 +346,7 @@ int pm_clk_suspend(struct device *dev)
{
struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
+ unsigned long flags;
dev_dbg(dev, "%s()\n", __func__);
@@ -351,12 +354,12 @@ int pm_clk_suspend(struct device *dev)
if (!pcd || !dev->driver)
return 0;
- mutex_lock(&pcd->lock);
+ spin_lock_irqsave(&pcd->lock, flags);
list_for_each_entry_reverse(ce, &pcd->clock_list, node)
clk_disable(ce->clk);
- mutex_unlock(&pcd->lock);
+ spin_unlock_irqrestore(&pcd->lock, flags);
return 0;
}
@@ -369,6 +372,7 @@ int pm_clk_resume(struct device *dev)
{
struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
+ unsigned long flags;
dev_dbg(dev, "%s()\n", __func__);
@@ -376,12 +380,12 @@ int pm_clk_resume(struct device *dev)
if (!pcd || !dev->driver)
return 0;
- mutex_lock(&pcd->lock);
+ spin_lock_irqsave(&pcd->lock, flags);
list_for_each_entry(ce, &pcd->clock_list, node)
clk_enable(ce->clk);
- mutex_unlock(&pcd->lock);
+ spin_unlock_irqrestore(&pcd->lock, flags);
return 0;
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index be8714a..1c37457 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -80,7 +80,6 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
int pm_genpd_poweron(struct generic_pm_domain *genpd)
{
struct generic_pm_domain *parent = genpd->parent;
- DEFINE_WAIT(wait);
int ret = 0;
start:
@@ -112,7 +111,7 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
}
if (genpd->power_on) {
- int ret = genpd->power_on(genpd);
+ ret = genpd->power_on(genpd);
if (ret)
goto out;
}
@@ -461,6 +460,21 @@ static int pm_genpd_runtime_resume(struct device *dev)
return 0;
}
+/**
+ * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
+ */
+void pm_genpd_poweroff_unused(void)
+{
+ struct generic_pm_domain *genpd;
+
+ mutex_lock(&gpd_list_lock);
+
+ list_for_each_entry(genpd, &gpd_list, gpd_list_node)
+ genpd_queue_power_off_work(genpd);
+
+ mutex_unlock(&gpd_list_lock);
+}
+
#else
static inline void genpd_power_off_work_fn(struct work_struct *work) {}
@@ -1256,18 +1270,3 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
list_add(&genpd->gpd_list_node, &gpd_list);
mutex_unlock(&gpd_list_lock);
}
-
-/**
- * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
- */
-void pm_genpd_poweroff_unused(void)
-{
- struct generic_pm_domain *genpd;
-
- mutex_lock(&gpd_list_lock);
-
- list_for_each_entry(genpd, &gpd_list, gpd_list_node)
- genpd_queue_power_off_work(genpd);
-
- mutex_unlock(&gpd_list_lock);
-}
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 5cc1232..b23de18 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -453,7 +453,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
static int opp_set_availability(struct device *dev, unsigned long freq,
bool availability_req)
{
- struct device_opp *tmp_dev_opp, *dev_opp = NULL;
+ struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
int r = 0;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 8dc247c..acb3f83 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -226,11 +226,17 @@ static int rpm_idle(struct device *dev, int rpmflags)
callback = NULL;
if (callback) {
- spin_unlock_irq(&dev->power.lock);
+ if (dev->power.irq_safe)
+ spin_unlock(&dev->power.lock);
+ else
+ spin_unlock_irq(&dev->power.lock);
callback(dev);
- spin_lock_irq(&dev->power.lock);
+ if (dev->power.irq_safe)
+ spin_lock(&dev->power.lock);
+ else
+ spin_lock_irq(&dev->power.lock);
}
dev->power.idle_notification = false;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 942d6a7..17b7934 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -5,7 +5,7 @@
#include <linux/device.h>
#include <linux/string.h>
#include <linux/pm_runtime.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/jiffies.h>
#include "power.h"
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index c2231ff..c4f7a45 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -113,3 +113,4 @@ struct regmap *regmap_init_i2c(struct i2c_client *i2c,
}
EXPORT_SYMBOL_GPL(regmap_init_i2c);
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 4deba06..f839694 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -13,6 +13,7 @@
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/init.h>
+#include <linux/module.h>
static int regmap_spi_write(struct device *dev, const void *data, size_t count)
{
@@ -70,3 +71,5 @@ struct regmap *regmap_init_spi(struct spi_device *spi,
return regmap_init(&spi->dev, &regmap_spi, config);
}
EXPORT_SYMBOL_GPL(regmap_init_spi);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index cf3565c..20663f8 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -168,13 +168,11 @@ struct regmap *regmap_init(struct device *dev,
map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL);
if (map->work_buf == NULL) {
ret = -ENOMEM;
- goto err_bus;
+ goto err_map;
}
return map;
-err_bus:
- module_put(map->bus->owner);
err_map:
kfree(map);
err:
@@ -188,7 +186,6 @@ EXPORT_SYMBOL_GPL(regmap_init);
void regmap_exit(struct regmap *map)
{
kfree(map->work_buf);
- module_put(map->bus->owner);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
@@ -317,7 +314,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
u8[0] |= map->bus->read_flag_mask;
ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes,
- val, map->format.val_bytes);
+ val, val_len);
if (ret != 0)
return ret;
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 873e2e4..73b7b1a 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -15,6 +15,7 @@ MODULE_LICENSE("GPL");
static int bcma_bus_match(struct device *dev, struct device_driver *drv);
static int bcma_device_probe(struct device *dev);
static int bcma_device_remove(struct device *dev);
+static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -49,6 +50,7 @@ static struct bus_type bcma_bus_type = {
.match = bcma_bus_match,
.probe = bcma_device_probe,
.remove = bcma_device_remove,
+ .uevent = bcma_device_uevent,
.dev_attrs = bcma_device_attrs,
};
@@ -227,6 +229,16 @@ static int bcma_device_remove(struct device *dev)
return 0;
}
+static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct bcma_device *core = container_of(dev, struct bcma_device, dev);
+
+ return add_uevent_var(env,
+ "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
+ core->id.manuf, core->id.id,
+ core->id.rev, core->id.class);
+}
+
static int __init bcma_modinit(void)
{
int err;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 717d6e4..6f07ec1 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -256,6 +256,21 @@ config BLK_DEV_LOOP
Most users will answer N here.
+config BLK_DEV_LOOP_MIN_COUNT
+ int "Number of loop devices to pre-create at init time"
+ depends on BLK_DEV_LOOP
+ default 8
+ help
+ Static number of loop devices to be unconditionally pre-created
+ at init time.
+
+ This default value can be overwritten on the kernel command
+ line or with module-parameter loop.max_loop.
+
+ The historic default is 8. If a late 2011 version of losetup(8)
+ is used, it can be set to 0, since needed loop devices can be
+ dynamically allocated with the /dev/loop-control interface.
+
config BLK_DEV_CRYPTOLOOP
tristate "Cryptoloop Support"
select CRYPTO
@@ -471,7 +486,7 @@ config XEN_BLKDEV_FRONTEND
in another domain which drives the actual block device.
config XEN_BLKDEV_BACKEND
- tristate "Block-device backend driver"
+ tristate "Xen block-device backend driver"
depends on XEN_BACKEND
help
The block-device backend driver allows the kernel to export its
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 6961002..951a4e3 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -33,7 +33,7 @@
#include <linux/slab.h>
#include <linux/string.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 20de58d..af2a250 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1829,10 +1829,10 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
/* silently ignore cpu mask on UP kernel */
if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
- err = __bitmap_parse(sc.cpu_mask, 32, 0,
+ err = bitmap_parse(sc.cpu_mask, 32,
cpumask_bits(new_cpu_mask), nr_cpu_ids);
if (err) {
- dev_warn(DEV, "__bitmap_parse() failed with %d\n", err);
+ dev_warn(DEV, "bitmap_parse() failed with %d\n", err);
retcode = ERR_CPU_MASK_PARSE;
goto fail;
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 76c8da7..4720c7a 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -75,11 +75,11 @@
#include <linux/kthread.h>
#include <linux/splice.h>
#include <linux/sysfs.h>
-
+#include <linux/miscdevice.h>
#include <asm/uaccess.h>
-static LIST_HEAD(loop_devices);
-static DEFINE_MUTEX(loop_devices_mutex);
+static DEFINE_IDR(loop_index_idr);
+static DEFINE_MUTEX(loop_index_mutex);
static int max_part;
static int part_shift;
@@ -722,17 +722,10 @@ static inline int is_loop_device(struct file *file)
static ssize_t loop_attr_show(struct device *dev, char *page,
ssize_t (*callback)(struct loop_device *, char *))
{
- struct loop_device *l, *lo = NULL;
-
- mutex_lock(&loop_devices_mutex);
- list_for_each_entry(l, &loop_devices, lo_list)
- if (disk_to_dev(l->lo_disk) == dev) {
- lo = l;
- break;
- }
- mutex_unlock(&loop_devices_mutex);
+ struct gendisk *disk = dev_to_disk(dev);
+ struct loop_device *lo = disk->private_data;
- return lo ? callback(lo, page) : -EIO;
+ return callback(lo, page);
}
#define LOOP_ATTR_RO(_name) \
@@ -750,10 +743,10 @@ static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
ssize_t ret;
char *p = NULL;
- mutex_lock(&lo->lo_ctl_mutex);
+ spin_lock_irq(&lo->lo_lock);
if (lo->lo_backing_file)
p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1);
- mutex_unlock(&lo->lo_ctl_mutex);
+ spin_unlock_irq(&lo->lo_lock);
if (IS_ERR_OR_NULL(p))
ret = PTR_ERR(p);
@@ -1007,7 +1000,9 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
kthread_stop(lo->lo_thread);
+ spin_lock_irq(&lo->lo_lock);
lo->lo_backing_file = NULL;
+ spin_unlock_irq(&lo->lo_lock);
loop_release_xfer(lo);
lo->transfer = NULL;
@@ -1485,13 +1480,22 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
static int lo_open(struct block_device *bdev, fmode_t mode)
{
- struct loop_device *lo = bdev->bd_disk->private_data;
+ struct loop_device *lo;
+ int err = 0;
+
+ mutex_lock(&loop_index_mutex);
+ lo = bdev->bd_disk->private_data;
+ if (!lo) {
+ err = -ENXIO;
+ goto out;
+ }
mutex_lock(&lo->lo_ctl_mutex);
lo->lo_refcnt++;
mutex_unlock(&lo->lo_ctl_mutex);
-
- return 0;
+out:
+ mutex_unlock(&loop_index_mutex);
+ return err;
}
static int lo_release(struct gendisk *disk, fmode_t mode)
@@ -1557,40 +1561,71 @@ int loop_register_transfer(struct loop_func_table *funcs)
return 0;
}
+static int unregister_transfer_cb(int id, void *ptr, void *data)
+{
+ struct loop_device *lo = ptr;
+ struct loop_func_table *xfer = data;
+
+ mutex_lock(&lo->lo_ctl_mutex);
+ if (lo->lo_encryption == xfer)
+ loop_release_xfer(lo);
+ mutex_unlock(&lo->lo_ctl_mutex);
+ return 0;
+}
+
int loop_unregister_transfer(int number)
{
unsigned int n = number;
- struct loop_device *lo;
struct loop_func_table *xfer;
if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
return -EINVAL;
xfer_funcs[n] = NULL;
-
- list_for_each_entry(lo, &loop_devices, lo_list) {
- mutex_lock(&lo->lo_ctl_mutex);
-
- if (lo->lo_encryption == xfer)
- loop_release_xfer(lo);
-
- mutex_unlock(&lo->lo_ctl_mutex);
- }
-
+ idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
return 0;
}
EXPORT_SYMBOL(loop_register_transfer);
EXPORT_SYMBOL(loop_unregister_transfer);
-static struct loop_device *loop_alloc(int i)
+static int loop_add(struct loop_device **l, int i)
{
struct loop_device *lo;
struct gendisk *disk;
+ int err;
lo = kzalloc(sizeof(*lo), GFP_KERNEL);
- if (!lo)
+ if (!lo) {
+ err = -ENOMEM;
goto out;
+ }
+
+ err = idr_pre_get(&loop_index_idr, GFP_KERNEL);
+ if (err < 0)
+ goto out_free_dev;
+
+ if (i >= 0) {
+ int m;
+
+ /* create specific i in the index */
+ err = idr_get_new_above(&loop_index_idr, lo, i, &m);
+ if (err >= 0 && i != m) {
+ idr_remove(&loop_index_idr, m);
+ err = -EEXIST;
+ }
+ } else if (i == -1) {
+ int m;
+
+ /* get next free nr */
+ err = idr_get_new(&loop_index_idr, lo, &m);
+ if (err >= 0)
+ i = m;
+ } else {
+ err = -EINVAL;
+ }
+ if (err < 0)
+ goto out_free_dev;
lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
if (!lo->lo_queue)
@@ -1611,81 +1646,158 @@ static struct loop_device *loop_alloc(int i)
disk->private_data = lo;
disk->queue = lo->lo_queue;
sprintf(disk->disk_name, "loop%d", i);
- return lo;
+ add_disk(disk);
+ *l = lo;
+ return lo->lo_number;
out_free_queue:
blk_cleanup_queue(lo->lo_queue);
out_free_dev:
kfree(lo);
out:
- return NULL;
+ return err;
}
-static void loop_free(struct loop_device *lo)
+static void loop_remove(struct loop_device *lo)
{
+ del_gendisk(lo->lo_disk);
blk_cleanup_queue(lo->lo_queue);
put_disk(lo->lo_disk);
- list_del(&lo->lo_list);
kfree(lo);
}
-static struct loop_device *loop_init_one(int i)
+static int find_free_cb(int id, void *ptr, void *data)
+{
+ struct loop_device *lo = ptr;
+ struct loop_device **l = data;
+
+ if (lo->lo_state == Lo_unbound) {
+ *l = lo;
+ return 1;
+ }
+ return 0;
+}
+
+static int loop_lookup(struct loop_device **l, int i)
{
struct loop_device *lo;
+ int ret = -ENODEV;
- list_for_each_entry(lo, &loop_devices, lo_list) {
- if (lo->lo_number == i)
- return lo;
+ if (i < 0) {
+ int err;
+
+ err = idr_for_each(&loop_index_idr, &find_free_cb, &lo);
+ if (err == 1) {
+ *l = lo;
+ ret = lo->lo_number;
+ }
+ goto out;
}
- lo = loop_alloc(i);
+ /* lookup and return a specific i */
+ lo = idr_find(&loop_index_idr, i);
if (lo) {
- add_disk(lo->lo_disk);
- list_add_tail(&lo->lo_list, &loop_devices);
+ *l = lo;
+ ret = lo->lo_number;
}
- return lo;
-}
-
-static void loop_del_one(struct loop_device *lo)
-{
- del_gendisk(lo->lo_disk);
- loop_free(lo);
+out:
+ return ret;
}
static struct kobject *loop_probe(dev_t dev, int *part, void *data)
{
struct loop_device *lo;
struct kobject *kobj;
+ int err;
- mutex_lock(&loop_devices_mutex);
- lo = loop_init_one(MINOR(dev) >> part_shift);
- kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM);
- mutex_unlock(&loop_devices_mutex);
+ mutex_lock(&loop_index_mutex);
+ err = loop_lookup(&lo, MINOR(dev) >> part_shift);
+ if (err < 0)
+ err = loop_add(&lo, MINOR(dev) >> part_shift);
+ if (err < 0)
+ kobj = ERR_PTR(err);
+ else
+ kobj = get_disk(lo->lo_disk);
+ mutex_unlock(&loop_index_mutex);
*part = 0;
return kobj;
}
+static long loop_control_ioctl(struct file *file, unsigned int cmd,
+ unsigned long parm)
+{
+ struct loop_device *lo;
+ int ret = -ENOSYS;
+
+ mutex_lock(&loop_index_mutex);
+ switch (cmd) {
+ case LOOP_CTL_ADD:
+ ret = loop_lookup(&lo, parm);
+ if (ret >= 0) {
+ ret = -EEXIST;
+ break;
+ }
+ ret = loop_add(&lo, parm);
+ break;
+ case LOOP_CTL_REMOVE:
+ ret = loop_lookup(&lo, parm);
+ if (ret < 0)
+ break;
+ mutex_lock(&lo->lo_ctl_mutex);
+ if (lo->lo_state != Lo_unbound) {
+ ret = -EBUSY;
+ mutex_unlock(&lo->lo_ctl_mutex);
+ break;
+ }
+ if (lo->lo_refcnt > 0) {
+ ret = -EBUSY;
+ mutex_unlock(&lo->lo_ctl_mutex);
+ break;
+ }
+ lo->lo_disk->private_data = NULL;
+ mutex_unlock(&lo->lo_ctl_mutex);
+ idr_remove(&loop_index_idr, lo->lo_number);
+ loop_remove(lo);
+ break;
+ case LOOP_CTL_GET_FREE:
+ ret = loop_lookup(&lo, -1);
+ if (ret >= 0)
+ break;
+ ret = loop_add(&lo, -1);
+ }
+ mutex_unlock(&loop_index_mutex);
+
+ return ret;
+}
+
+static const struct file_operations loop_ctl_fops = {
+ .open = nonseekable_open,
+ .unlocked_ioctl = loop_control_ioctl,
+ .compat_ioctl = loop_control_ioctl,
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice loop_misc = {
+ .minor = LOOP_CTRL_MINOR,
+ .name = "loop-control",
+ .fops = &loop_ctl_fops,
+};
+
+MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
+MODULE_ALIAS("devname:loop-control");
+
static int __init loop_init(void)
{
int i, nr;
unsigned long range;
- struct loop_device *lo, *next;
+ struct loop_device *lo;
+ int err;
- /*
- * loop module now has a feature to instantiate underlying device
- * structure on-demand, provided that there is an access dev node.
- * However, this will not work well with user space tool that doesn't
- * know about such "feature". In order to not break any existing
- * tool, we do the following:
- *
- * (1) if max_loop is specified, create that many upfront, and this
- * also becomes a hard limit.
- * (2) if max_loop is not specified, create 8 loop device on module
- * load, user can further extend loop device by create dev node
- * themselves and have kernel automatically instantiate actual
- * device on-demand.
- */
+ err = misc_register(&loop_misc);
+ if (err < 0)
+ return err;
part_shift = 0;
if (max_part > 0) {
@@ -1708,57 +1820,60 @@ static int __init loop_init(void)
if (max_loop > 1UL << (MINORBITS - part_shift))
return -EINVAL;
+ /*
+ * If max_loop is specified, create that many devices upfront.
+ * This also becomes a hard limit. If max_loop is not specified,
+ * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
+ * init time. Loop devices can be requested on-demand with the
+ * /dev/loop-control interface, or be instantiated by accessing
+ * a 'dead' device node.
+ */
if (max_loop) {
nr = max_loop;
range = max_loop << part_shift;
} else {
- nr = 8;
+ nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
range = 1UL << MINORBITS;
}
if (register_blkdev(LOOP_MAJOR, "loop"))
return -EIO;
- for (i = 0; i < nr; i++) {
- lo = loop_alloc(i);
- if (!lo)
- goto Enomem;
- list_add_tail(&lo->lo_list, &loop_devices);
- }
-
- /* point of no return */
-
- list_for_each_entry(lo, &loop_devices, lo_list)
- add_disk(lo->lo_disk);
-
blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
THIS_MODULE, loop_probe, NULL, NULL);
+ /* pre-create number of devices given by config or max_loop */
+ mutex_lock(&loop_index_mutex);
+ for (i = 0; i < nr; i++)
+ loop_add(&lo, i);
+ mutex_unlock(&loop_index_mutex);
+
printk(KERN_INFO "loop: module loaded\n");
return 0;
+}
-Enomem:
- printk(KERN_INFO "loop: out of memory\n");
-
- list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
- loop_free(lo);
+static int loop_exit_cb(int id, void *ptr, void *data)
+{
+ struct loop_device *lo = ptr;
- unregister_blkdev(LOOP_MAJOR, "loop");
- return -ENOMEM;
+ loop_remove(lo);
+ return 0;
}
static void __exit loop_exit(void)
{
unsigned long range;
- struct loop_device *lo, *next;
range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
- list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
- loop_del_one(lo);
+ idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
+ idr_remove_all(&loop_index_idr);
+ idr_destroy(&loop_index_idr);
blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
unregister_blkdev(LOOP_MAJOR, "loop");
+
+ misc_deregister(&loop_misc);
}
module_init(loop_init);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 2c09102..fe3c324 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -630,6 +630,14 @@ static int rbd_get_num_segments(struct rbd_image_header *header,
}
/*
+ * returns the size of an object in the image
+ */
+static u64 rbd_obj_bytes(struct rbd_image_header *header)
+{
+ return 1 << header->obj_order;
+}
+
+/*
* bio helpers
*/
@@ -1253,6 +1261,35 @@ fail:
return ret;
}
+/*
+ * Request sync osd unwatch
+ */
+static int rbd_req_sync_unwatch(struct rbd_device *dev,
+ const char *obj)
+{
+ struct ceph_osd_req_op *ops;
+
+ int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_WATCH, 0);
+ if (ret < 0)
+ return ret;
+
+ ops[0].watch.ver = 0;
+ ops[0].watch.cookie = cpu_to_le64(dev->watch_event->cookie);
+ ops[0].watch.flag = 0;
+
+ ret = rbd_req_sync_op(dev, NULL,
+ CEPH_NOSNAP,
+ 0,
+ CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
+ ops,
+ 1, obj, 0, 0, NULL, NULL, NULL);
+
+ rbd_destroy_ops(ops);
+ ceph_osdc_cancel_event(dev->watch_event);
+ dev->watch_event = NULL;
+ return ret;
+}
+
struct rbd_notify_info {
struct rbd_device *dev;
};
@@ -1736,6 +1773,13 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
q = blk_init_queue(rbd_rq_fn, &rbd_dev->lock);
if (!q)
goto out_disk;
+
+ /* set io sizes to object size */
+ blk_queue_max_hw_sectors(q, rbd_obj_bytes(&rbd_dev->header) / 512ULL);
+ blk_queue_max_segment_size(q, rbd_obj_bytes(&rbd_dev->header));
+ blk_queue_io_min(q, rbd_obj_bytes(&rbd_dev->header));
+ blk_queue_io_opt(q, rbd_obj_bytes(&rbd_dev->header));
+
blk_queue_merge_bvec(q, rbd_merge_bvec);
disk->queue = q;
@@ -2290,7 +2334,7 @@ static void rbd_dev_release(struct device *dev)
ceph_osdc_unregister_linger_request(&rbd_dev->client->osdc,
rbd_dev->watch_request);
if (rbd_dev->watch_event)
- ceph_osdc_cancel_event(rbd_dev->watch_event);
+ rbd_req_sync_unwatch(rbd_dev, rbd_dev->obj_md_name);
rbd_put_client(rbd_dev);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 773bfa7..ae3e167 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1184,6 +1184,7 @@ static struct of_device_id swim3_match[] =
{
.compatible = "swim3"
},
+ { /* end of list */ }
};
static struct macio_driver swim3_driver =
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b536a9c..9ea8c25 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -123,8 +123,8 @@ static DEFINE_SPINLOCK(minor_lock);
#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
#define EMULATED_HD_DISK_MINOR_OFFSET (0)
#define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
-#define EMULATED_SD_DISK_MINOR_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET + (4 * 16))
-#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_HD_DISK_NAME_OFFSET + 4)
+#define EMULATED_SD_DISK_MINOR_OFFSET (0)
+#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
#define DEV_NAME "xvd" /* name in /dev */
@@ -529,7 +529,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
minor = BLKIF_MINOR_EXT(info->vdevice);
nr_parts = PARTS_PER_EXT_DISK;
offset = minor / nr_parts;
- if (xen_hvm_domain() && offset <= EMULATED_HD_DISK_NAME_OFFSET + 4)
+ if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
"emulated IDE disks,\n\t choose an xvd device name"
"from xvde on\n", info->vdevice);
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index a585473..db7cb81 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -63,6 +63,7 @@ static struct usb_device_id ath3k_table[] = {
/* Atheros AR3011 with sflash firmware*/
{ USB_DEVICE(0x0CF3, 0x3002) },
{ USB_DEVICE(0x13d3, 0x3304) },
+ { USB_DEVICE(0x0930, 0x0215) },
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03F0, 0x311D) },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 91d13a9..3ef4760 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -106,6 +106,7 @@ static struct usb_device_id blacklist_table[] = {
/* Atheros 3011 with sflash firmware */
{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
@@ -256,7 +257,9 @@ static void btusb_intr_complete(struct urb *urb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
- if (err != -EPERM)
+ /* -EPERM: urb is being killed;
+ * -ENODEV: device got disconnected */
+ if (err != -EPERM && err != -ENODEV)
BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
@@ -341,7 +344,9 @@ static void btusb_bulk_complete(struct urb *urb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
- if (err != -EPERM)
+ /* -EPERM: urb is being killed;
+ * -ENODEV: device got disconnected */
+ if (err != -EPERM && err != -ENODEV)
BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
@@ -431,7 +436,9 @@ static void btusb_isoc_complete(struct urb *urb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
- if (err != -EPERM)
+ /* -EPERM: urb is being killed;
+ * -ENODEV: device got disconnected */
+ if (err != -EPERM && err != -ENODEV)
BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 75fb965..f997c27 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1929,11 +1929,17 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s,
goto out;
s->manufact.len = buf[0] << 8 | buf[1];
- if (s->manufact.len < 0 || s->manufact.len > 2048) {
+ if (s->manufact.len < 0) {
cdinfo(CD_WARNING, "Received invalid manufacture info length"
" (%d)\n", s->manufact.len);
ret = -EIO;
} else {
+ if (s->manufact.len > 2048) {
+ cdinfo(CD_WARNING, "Received invalid manufacture info "
+ "length (%d): truncating to 2048\n",
+ s->manufact.len);
+ s->manufact.len = 2048;
+ }
memcpy(s->manufact.value, &buf[4], s->manufact.len);
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 49502bc..423fd56 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -616,5 +616,16 @@ config MSM_SMD_PKT
Enables userspace clients to read and write to some packet SMD
ports via device interface for MSM chipset.
+config TILE_SROM
+ bool "Character-device access via hypervisor to the Tilera SPI ROM"
+ depends on TILE
+ default y
+ ---help---
+ This device provides character-level read-write access
+ to the SROM, typically via the "0", "1", and "2" devices
+ in /dev/srom/. The Tilera hypervisor makes the flash
+ device appear much like a simple EEPROM, and knows
+ how to partition a single ROM for multiple purposes.
+
endmenu
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 7a00672..32762ba 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -63,3 +63,5 @@ obj-$(CONFIG_RAMOOPS) += ramoops.o
obj-$(CONFIG_JS_RTC) += js-rtc.o
js-rtc-y = rtc.o
+
+obj-$(CONFIG_TILE_SROM) += tile-srom.o
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index ac6739e..c3de70d 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -1,6 +1,6 @@
/* n2-drv.c: Niagara-2 RNG driver.
*
- * Copyright (C) 2008 David S. Miller <davem@davemloft.net>
+ * Copyright (C) 2008, 2011 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
@@ -22,8 +22,8 @@
#define DRV_MODULE_NAME "n2rng"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "0.1"
-#define DRV_MODULE_RELDATE "May 15, 2008"
+#define DRV_MODULE_VERSION "0.2"
+#define DRV_MODULE_RELDATE "July 27, 2011"
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -623,14 +623,14 @@ static const struct of_device_id n2rng_match[];
static int __devinit n2rng_probe(struct platform_device *op)
{
const struct of_device_id *match;
- int victoria_falls;
+ int multi_capable;
int err = -ENOMEM;
struct n2rng *np;
match = of_match_device(n2rng_match, &op->dev);
if (!match)
return -EINVAL;
- victoria_falls = (match->data != NULL);
+ multi_capable = (match->data != NULL);
n2rng_driver_version();
np = kzalloc(sizeof(*np), GFP_KERNEL);
@@ -640,8 +640,8 @@ static int __devinit n2rng_probe(struct platform_device *op)
INIT_DELAYED_WORK(&np->work, n2rng_work);
- if (victoria_falls)
- np->flags |= N2RNG_FLAG_VF;
+ if (multi_capable)
+ np->flags |= N2RNG_FLAG_MULTI;
err = -ENODEV;
np->hvapi_major = 2;
@@ -658,10 +658,10 @@ static int __devinit n2rng_probe(struct platform_device *op)
}
}
- if (np->flags & N2RNG_FLAG_VF) {
+ if (np->flags & N2RNG_FLAG_MULTI) {
if (np->hvapi_major < 2) {
- dev_err(&op->dev, "VF RNG requires HVAPI major "
- "version 2 or later, got %lu\n",
+ dev_err(&op->dev, "multi-unit-capable RNG requires "
+ "HVAPI major version 2 or later, got %lu\n",
np->hvapi_major);
goto out_hvapi_unregister;
}
@@ -688,8 +688,8 @@ static int __devinit n2rng_probe(struct platform_device *op)
goto out_free_units;
dev_info(&op->dev, "Found %s RNG, units: %d\n",
- ((np->flags & N2RNG_FLAG_VF) ?
- "Victoria Falls" : "Niagara2"),
+ ((np->flags & N2RNG_FLAG_MULTI) ?
+ "multi-unit-capable" : "single-unit"),
np->num_units);
np->hwrng.name = "n2rng";
@@ -751,6 +751,11 @@ static const struct of_device_id n2rng_match[] = {
.compatible = "SUNW,vf-rng",
.data = (void *) 1,
},
+ {
+ .name = "random-number-generator",
+ .compatible = "SUNW,kt-rng",
+ .data = (void *) 1,
+ },
{},
};
MODULE_DEVICE_TABLE(of, n2rng_match);
diff --git a/drivers/char/hw_random/n2rng.h b/drivers/char/hw_random/n2rng.h
index 4bea07f..f244ac8 100644
--- a/drivers/char/hw_random/n2rng.h
+++ b/drivers/char/hw_random/n2rng.h
@@ -68,7 +68,7 @@ struct n2rng {
struct platform_device *op;
unsigned long flags;
-#define N2RNG_FLAG_VF 0x00000001 /* Victoria Falls RNG, else N2 */
+#define N2RNG_FLAG_MULTI 0x00000001 /* Multi-unit capable RNG */
#define N2RNG_FLAG_CONTROL 0x00000002 /* Operating in control domain */
#define N2RNG_FLAG_READY 0x00000008 /* Ready for hw-rng layer */
#define N2RNG_FLAG_SHUTDOWN 0x00000010 /* Driver unregistering */
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 320668f..3302586 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -52,7 +52,7 @@
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/delay.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#ifdef CONFIG_X86
/*
diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c
index b6f8a65..8eca55d 100644
--- a/drivers/char/msm_smd_pkt.c
+++ b/drivers/char/msm_smd_pkt.c
@@ -379,9 +379,8 @@ static int __init smd_pkt_init(void)
for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) {
smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev),
GFP_KERNEL);
- if (IS_ERR(smd_pkt_devp[i])) {
- r = PTR_ERR(smd_pkt_devp[i]);
- pr_err("kmalloc() failed %d\n", r);
+ if (!smd_pkt_devp[i]) {
+ pr_err("kmalloc() failed\n");
goto clean_cdevs;
}
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 25d139c..5c0d96a 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -46,7 +46,7 @@
#include <asm/page.h>
#include <asm/system.h>
#include <asm/pgtable.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/tlbflush.h>
#include <asm/uncached.h>
#include <asm/sn/addrs.h>
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
index 1a9f5f6..810aff9 100644
--- a/drivers/char/ramoops.c
+++ b/drivers/char/ramoops.c
@@ -19,18 +19,26 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/kmsg_dump.h>
#include <linux/time.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <linux/ramoops.h>
#define RAMOOPS_KERNMSG_HDR "===="
+#define MIN_MEM_SIZE 4096UL
-#define RECORD_SIZE 4096UL
+static ulong record_size = MIN_MEM_SIZE;
+module_param(record_size, ulong, 0400);
+MODULE_PARM_DESC(record_size,
+ "size of each dump done on oops/panic");
static ulong mem_address;
module_param(mem_address, ulong, 0400);
@@ -52,10 +60,15 @@ static struct ramoops_context {
void *virt_addr;
phys_addr_t phys_addr;
unsigned long size;
+ unsigned long record_size;
+ int dump_oops;
int count;
int max_count;
} oops_cxt;
+static struct platform_device *dummy;
+static struct ramoops_platform_data *dummy_data;
+
static void ramoops_do_dump(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
const char *s2, unsigned long l2)
@@ -74,13 +87,13 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
return;
/* Only dump oopses if dump_oops is set */
- if (reason == KMSG_DUMP_OOPS && !dump_oops)
+ if (reason == KMSG_DUMP_OOPS && !cxt->dump_oops)
return;
- buf = cxt->virt_addr + (cxt->count * RECORD_SIZE);
+ buf = cxt->virt_addr + (cxt->count * cxt->record_size);
buf_orig = buf;
- memset(buf, '\0', RECORD_SIZE);
+ memset(buf, '\0', cxt->record_size);
res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR);
buf += res;
do_gettimeofday(&timestamp);
@@ -88,8 +101,8 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
buf += res;
hdr_size = buf - buf_orig;
- l2_cpy = min(l2, RECORD_SIZE - hdr_size);
- l1_cpy = min(l1, RECORD_SIZE - hdr_size - l2_cpy);
+ l2_cpy = min(l2, cxt->record_size - hdr_size);
+ l1_cpy = min(l1, cxt->record_size - hdr_size - l2_cpy);
s2_start = l2 - l2_cpy;
s1_start = l1 - l1_cpy;
@@ -106,44 +119,59 @@ static int __init ramoops_probe(struct platform_device *pdev)
struct ramoops_context *cxt = &oops_cxt;
int err = -EINVAL;
- if (pdata) {
- mem_size = pdata->mem_size;
- mem_address = pdata->mem_address;
+ if (!pdata->mem_size || !pdata->record_size) {
+ pr_err("The memory size and the record size must be "
+ "non-zero\n");
+ goto fail3;
}
- if (!mem_size) {
- printk(KERN_ERR "ramoops: invalid size specification");
+ rounddown_pow_of_two(pdata->mem_size);
+ rounddown_pow_of_two(pdata->record_size);
+
+ /* Check for the minimum memory size */
+ if (pdata->mem_size < MIN_MEM_SIZE &&
+ pdata->record_size < MIN_MEM_SIZE) {
+ pr_err("memory size too small, minium is %lu\n", MIN_MEM_SIZE);
goto fail3;
}
- rounddown_pow_of_two(mem_size);
-
- if (mem_size < RECORD_SIZE) {
- printk(KERN_ERR "ramoops: size too small");
+ if (pdata->mem_size < pdata->record_size) {
+ pr_err("The memory size must be larger than the "
+ "records size\n");
goto fail3;
}
- cxt->max_count = mem_size / RECORD_SIZE;
+ cxt->max_count = pdata->mem_size / pdata->record_size;
cxt->count = 0;
- cxt->size = mem_size;
- cxt->phys_addr = mem_address;
+ cxt->size = pdata->mem_size;
+ cxt->phys_addr = pdata->mem_address;
+ cxt->record_size = pdata->record_size;
+ cxt->dump_oops = pdata->dump_oops;
+ /*
+ * Update the module parameter variables as well so they are visible
+ * through /sys/module/ramoops/parameters/
+ */
+ mem_size = pdata->mem_size;
+ mem_address = pdata->mem_address;
+ record_size = pdata->record_size;
+ dump_oops = pdata->dump_oops;
if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) {
- printk(KERN_ERR "ramoops: request mem region failed");
+ pr_err("request mem region failed\n");
err = -EINVAL;
goto fail3;
}
cxt->virt_addr = ioremap(cxt->phys_addr, cxt->size);
if (!cxt->virt_addr) {
- printk(KERN_ERR "ramoops: ioremap failed");
+ pr_err("ioremap failed\n");
goto fail2;
}
cxt->dump.dump = ramoops_do_dump;
err = kmsg_dump_register(&cxt->dump);
if (err) {
- printk(KERN_ERR "ramoops: registering kmsg dumper failed");
+ pr_err("registering kmsg dumper failed\n");
goto fail1;
}
@@ -162,7 +190,7 @@ static int __exit ramoops_remove(struct platform_device *pdev)
struct ramoops_context *cxt = &oops_cxt;
if (kmsg_dump_unregister(&cxt->dump) < 0)
- printk(KERN_WARNING "ramoops: could not unregister kmsg_dumper");
+ pr_warn("could not unregister kmsg_dumper\n");
iounmap(cxt->virt_addr);
release_mem_region(cxt->phys_addr, cxt->size);
@@ -179,12 +207,39 @@ static struct platform_driver ramoops_driver = {
static int __init ramoops_init(void)
{
- return platform_driver_probe(&ramoops_driver, ramoops_probe);
+ int ret;
+ ret = platform_driver_probe(&ramoops_driver, ramoops_probe);
+ if (ret == -ENODEV) {
+ /*
+ * If we didn't find a platform device, we use module parameters
+ * building platform data on the fly.
+ */
+ pr_info("platform device not found, using module parameters\n");
+ dummy_data = kzalloc(sizeof(struct ramoops_platform_data),
+ GFP_KERNEL);
+ if (!dummy_data)
+ return -ENOMEM;
+ dummy_data->mem_size = mem_size;
+ dummy_data->mem_address = mem_address;
+ dummy_data->record_size = record_size;
+ dummy_data->dump_oops = dump_oops;
+ dummy = platform_create_bundle(&ramoops_driver, ramoops_probe,
+ NULL, 0, dummy_data,
+ sizeof(struct ramoops_platform_data));
+
+ if (IS_ERR(dummy))
+ ret = PTR_ERR(dummy);
+ else
+ ret = 0;
+ }
+
+ return ret;
}
static void __exit ramoops_exit(void)
{
platform_driver_unregister(&ramoops_driver);
+ kfree(dummy_data);
}
module_init(ramoops_init);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 7292819..c35a785 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1300,345 +1300,14 @@ ctl_table random_table[] = {
};
#endif /* CONFIG_SYSCTL */
-/********************************************************************
- *
- * Random functions for networking
- *
- ********************************************************************/
-
-/*
- * TCP initial sequence number picking. This uses the random number
- * generator to pick an initial secret value. This value is hashed
- * along with the TCP endpoint information to provide a unique
- * starting point for each pair of TCP endpoints. This defeats
- * attacks which rely on guessing the initial TCP sequence number.
- * This algorithm was suggested by Steve Bellovin.
- *
- * Using a very strong hash was taking an appreciable amount of the total
- * TCP connection establishment time, so this is a weaker hash,
- * compensated for by changing the secret periodically.
- */
-
-/* F, G and H are basic MD4 functions: selection, majority, parity */
-#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
-#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
-#define H(x, y, z) ((x) ^ (y) ^ (z))
-
-/*
- * The generic round function. The application is so specific that
- * we don't bother protecting all the arguments with parens, as is generally
- * good macro practice, in favor of extra legibility.
- * Rotation is separate from addition to prevent recomputation
- */
-#define ROUND(f, a, b, c, d, x, s) \
- (a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s)))
-#define K1 0
-#define K2 013240474631UL
-#define K3 015666365641UL
-
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-
-static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12])
-{
- __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
-
- /* Round 1 */
- ROUND(F, a, b, c, d, in[ 0] + K1, 3);
- ROUND(F, d, a, b, c, in[ 1] + K1, 7);
- ROUND(F, c, d, a, b, in[ 2] + K1, 11);
- ROUND(F, b, c, d, a, in[ 3] + K1, 19);
- ROUND(F, a, b, c, d, in[ 4] + K1, 3);
- ROUND(F, d, a, b, c, in[ 5] + K1, 7);
- ROUND(F, c, d, a, b, in[ 6] + K1, 11);
- ROUND(F, b, c, d, a, in[ 7] + K1, 19);
- ROUND(F, a, b, c, d, in[ 8] + K1, 3);
- ROUND(F, d, a, b, c, in[ 9] + K1, 7);
- ROUND(F, c, d, a, b, in[10] + K1, 11);
- ROUND(F, b, c, d, a, in[11] + K1, 19);
-
- /* Round 2 */
- ROUND(G, a, b, c, d, in[ 1] + K2, 3);
- ROUND(G, d, a, b, c, in[ 3] + K2, 5);
- ROUND(G, c, d, a, b, in[ 5] + K2, 9);
- ROUND(G, b, c, d, a, in[ 7] + K2, 13);
- ROUND(G, a, b, c, d, in[ 9] + K2, 3);
- ROUND(G, d, a, b, c, in[11] + K2, 5);
- ROUND(G, c, d, a, b, in[ 0] + K2, 9);
- ROUND(G, b, c, d, a, in[ 2] + K2, 13);
- ROUND(G, a, b, c, d, in[ 4] + K2, 3);
- ROUND(G, d, a, b, c, in[ 6] + K2, 5);
- ROUND(G, c, d, a, b, in[ 8] + K2, 9);
- ROUND(G, b, c, d, a, in[10] + K2, 13);
-
- /* Round 3 */
- ROUND(H, a, b, c, d, in[ 3] + K3, 3);
- ROUND(H, d, a, b, c, in[ 7] + K3, 9);
- ROUND(H, c, d, a, b, in[11] + K3, 11);
- ROUND(H, b, c, d, a, in[ 2] + K3, 15);
- ROUND(H, a, b, c, d, in[ 6] + K3, 3);
- ROUND(H, d, a, b, c, in[10] + K3, 9);
- ROUND(H, c, d, a, b, in[ 1] + K3, 11);
- ROUND(H, b, c, d, a, in[ 5] + K3, 15);
- ROUND(H, a, b, c, d, in[ 9] + K3, 3);
- ROUND(H, d, a, b, c, in[ 0] + K3, 9);
- ROUND(H, c, d, a, b, in[ 4] + K3, 11);
- ROUND(H, b, c, d, a, in[ 8] + K3, 15);
-
- return buf[1] + b; /* "most hashed" word */
- /* Alternative: return sum of all words? */
-}
-#endif
-
-#undef ROUND
-#undef F
-#undef G
-#undef H
-#undef K1
-#undef K2
-#undef K3
-
-/* This should not be decreased so low that ISNs wrap too fast. */
-#define REKEY_INTERVAL (300 * HZ)
-/*
- * Bit layout of the tcp sequence numbers (before adding current time):
- * bit 24-31: increased after every key exchange
- * bit 0-23: hash(source,dest)
- *
- * The implementation is similar to the algorithm described
- * in the Appendix of RFC 1185, except that
- * - it uses a 1 MHz clock instead of a 250 kHz clock
- * - it performs a rekey every 5 minutes, which is equivalent
- * to a (source,dest) tulple dependent forward jump of the
- * clock by 0..2^(HASH_BITS+1)
- *
- * Thus the average ISN wraparound time is 68 minutes instead of
- * 4.55 hours.
- *
- * SMP cleanup and lock avoidance with poor man's RCU.
- * Manfred Spraul <manfred@colorfullife.com>
- *
- */
-#define COUNT_BITS 8
-#define COUNT_MASK ((1 << COUNT_BITS) - 1)
-#define HASH_BITS 24
-#define HASH_MASK ((1 << HASH_BITS) - 1)
+static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
-static struct keydata {
- __u32 count; /* already shifted to the final position */
- __u32 secret[12];
-} ____cacheline_aligned ip_keydata[2];
-
-static unsigned int ip_cnt;
-
-static void rekey_seq_generator(struct work_struct *work);
-
-static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
-
-/*
- * Lock avoidance:
- * The ISN generation runs lockless - it's just a hash over random data.
- * State changes happen every 5 minutes when the random key is replaced.
- * Synchronization is performed by having two copies of the hash function
- * state and rekey_seq_generator always updates the inactive copy.
- * The copy is then activated by updating ip_cnt.
- * The implementation breaks down if someone blocks the thread
- * that processes SYN requests for more than 5 minutes. Should never
- * happen, and even if that happens only a not perfectly compliant
- * ISN is generated, nothing fatal.
- */
-static void rekey_seq_generator(struct work_struct *work)
+static int __init random_int_secret_init(void)
{
- struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)];
-
- get_random_bytes(keyptr->secret, sizeof(keyptr->secret));
- keyptr->count = (ip_cnt & COUNT_MASK) << HASH_BITS;
- smp_wmb();
- ip_cnt++;
- schedule_delayed_work(&rekey_work,
- round_jiffies_relative(REKEY_INTERVAL));
-}
-
-static inline struct keydata *get_keyptr(void)
-{
- struct keydata *keyptr = &ip_keydata[ip_cnt & 1];
-
- smp_rmb();
-
- return keyptr;
-}
-
-static __init int seqgen_init(void)
-{
- rekey_seq_generator(NULL);
+ get_random_bytes(random_int_secret, sizeof(random_int_secret));
return 0;
}
-late_initcall(seqgen_init);
-
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
- __be16 sport, __be16 dport)
-{
- __u32 seq;
- __u32 hash[12];
- struct keydata *keyptr = get_keyptr();
-
- /* The procedure is the same as for IPv4, but addresses are longer.
- * Thus we must use twothirdsMD4Transform.
- */
-
- memcpy(hash, saddr, 16);
- hash[4] = ((__force u16)sport << 16) + (__force u16)dport;
- memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
-
- seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
- seq += keyptr->count;
-
- seq += ktime_to_ns(ktime_get_real());
-
- return seq;
-}
-EXPORT_SYMBOL(secure_tcpv6_sequence_number);
-#endif
-
-/* The code below is shamelessly stolen from secure_tcp_sequence_number().
- * All blames to Andrey V. Savochkin <saw@msu.ru>.
- */
-__u32 secure_ip_id(__be32 daddr)
-{
- struct keydata *keyptr;
- __u32 hash[4];
-
- keyptr = get_keyptr();
-
- /*
- * Pick a unique starting offset for each IP destination.
- * The dest ip address is placed in the starting vector,
- * which is then hashed with random data.
- */
- hash[0] = (__force __u32)daddr;
- hash[1] = keyptr->secret[9];
- hash[2] = keyptr->secret[10];
- hash[3] = keyptr->secret[11];
-
- return half_md4_transform(hash, keyptr->secret);
-}
-
-__u32 secure_ipv6_id(const __be32 daddr[4])
-{
- const struct keydata *keyptr;
- __u32 hash[4];
-
- keyptr = get_keyptr();
-
- hash[0] = (__force __u32)daddr[0];
- hash[1] = (__force __u32)daddr[1];
- hash[2] = (__force __u32)daddr[2];
- hash[3] = (__force __u32)daddr[3];
-
- return half_md4_transform(hash, keyptr->secret);
-}
-
-#ifdef CONFIG_INET
-
-__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
- __be16 sport, __be16 dport)
-{
- __u32 seq;
- __u32 hash[4];
- struct keydata *keyptr = get_keyptr();
-
- /*
- * Pick a unique starting offset for each TCP connection endpoints
- * (saddr, daddr, sport, dport).
- * Note that the words are placed into the starting vector, which is
- * then mixed with a partial MD4 over random data.
- */
- hash[0] = (__force u32)saddr;
- hash[1] = (__force u32)daddr;
- hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
- hash[3] = keyptr->secret[11];
-
- seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
- seq += keyptr->count;
- /*
- * As close as possible to RFC 793, which
- * suggests using a 250 kHz clock.
- * Further reading shows this assumes 2 Mb/s networks.
- * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
- * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
- * we also need to limit the resolution so that the u32 seq
- * overlaps less than one time per MSL (2 minutes).
- * Choosing a clock of 64 ns period is OK. (period of 274 s)
- */
- seq += ktime_to_ns(ktime_get_real()) >> 6;
-
- return seq;
-}
-
-/* Generate secure starting point for ephemeral IPV4 transport port search */
-u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
-{
- struct keydata *keyptr = get_keyptr();
- u32 hash[4];
-
- /*
- * Pick a unique starting offset for each ephemeral port search
- * (saddr, daddr, dport) and 48bits of random data.
- */
- hash[0] = (__force u32)saddr;
- hash[1] = (__force u32)daddr;
- hash[2] = (__force u32)dport ^ keyptr->secret[10];
- hash[3] = keyptr->secret[11];
-
- return half_md4_transform(hash, keyptr->secret);
-}
-EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
-
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
- __be16 dport)
-{
- struct keydata *keyptr = get_keyptr();
- u32 hash[12];
-
- memcpy(hash, saddr, 16);
- hash[4] = (__force u32)dport;
- memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
-
- return twothirdsMD4Transform((const __u32 *)daddr, hash);
-}
-#endif
-
-#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
-/* Similar to secure_tcp_sequence_number but generate a 48 bit value
- * bit's 32-47 increase every key exchange
- * 0-31 hash(source, dest)
- */
-u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
- __be16 sport, __be16 dport)
-{
- u64 seq;
- __u32 hash[4];
- struct keydata *keyptr = get_keyptr();
-
- hash[0] = (__force u32)saddr;
- hash[1] = (__force u32)daddr;
- hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
- hash[3] = keyptr->secret[11];
-
- seq = half_md4_transform(hash, keyptr->secret);
- seq |= ((u64)keyptr->count) << (32 - HASH_BITS);
-
- seq += ktime_to_ns(ktime_get_real());
- seq &= (1ull << 48) - 1;
-
- return seq;
-}
-EXPORT_SYMBOL(secure_dccp_sequence_number);
-#endif
-
-#endif /* CONFIG_INET */
-
+late_initcall(random_int_secret_init);
/*
* Get a random word for internal kernel use only. Similar to urandom but
@@ -1646,17 +1315,15 @@ EXPORT_SYMBOL(secure_dccp_sequence_number);
* value is not cryptographically secure but for several uses the cost of
* depleting entropy is too high
*/
-DEFINE_PER_CPU(__u32 [4], get_random_int_hash);
+DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
unsigned int get_random_int(void)
{
- struct keydata *keyptr;
__u32 *hash = get_cpu_var(get_random_int_hash);
- int ret;
+ unsigned int ret;
- keyptr = get_keyptr();
hash[0] += current->pid + jiffies + get_cycles();
-
- ret = half_md4_transform(hash, keyptr->secret);
+ md5_transform(hash, random_int_secret);
+ ret = hash[0];
put_cpu_var(get_random_int_hash);
return ret;
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
new file mode 100644
index 0000000..cf3ee00
--- /dev/null
+++ b/drivers/char/tile-srom.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ *
+ * SPI Flash ROM driver
+ *
+ * This source code is derived from code provided in "Linux Device
+ * Drivers, Third Edition", by Jonathan Corbet, Alessandro Rubini, and
+ * Greg Kroah-Hartman, published by O'Reilly Media, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/slab.h> /* kmalloc() */
+#include <linux/fs.h> /* everything... */
+#include <linux/errno.h> /* error codes */
+#include <linux/types.h> /* size_t */
+#include <linux/proc_fs.h>
+#include <linux/fcntl.h> /* O_ACCMODE */
+#include <linux/aio.h>
+#include <linux/pagemap.h>
+#include <linux/hugetlb.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <hv/hypervisor.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <hv/drv_srom_intf.h>
+
+/*
+ * Size of our hypervisor I/O requests. We break up large transfers
+ * so that we don't spend large uninterrupted spans of time in the
+ * hypervisor. Erasing an SROM sector takes a significant fraction of
+ * a second, so if we allowed the user to, say, do one I/O to write the
+ * entire ROM, we'd get soft lockup timeouts, or worse.
+ */
+#define SROM_CHUNK_SIZE ((size_t)4096)
+
+/*
+ * When hypervisor is busy (e.g. erasing), poll the status periodically.
+ */
+
+/*
+ * Interval to poll the state in msec
+ */
+#define SROM_WAIT_TRY_INTERVAL 20
+
+/*
+ * Maximum times to poll the state
+ */
+#define SROM_MAX_WAIT_TRY_TIMES 1000
+
+struct srom_dev {
+ int hv_devhdl; /* Handle for hypervisor device */
+ u32 total_size; /* Size of this device */
+ u32 sector_size; /* Size of a sector */
+ u32 page_size; /* Size of a page */
+ struct mutex lock; /* Allow only one accessor at a time */
+};
+
+static int srom_major; /* Dynamic major by default */
+module_param(srom_major, int, 0);
+MODULE_AUTHOR("Tilera Corporation");
+MODULE_LICENSE("GPL");
+
+static int srom_devs; /* Number of SROM partitions */
+static struct cdev srom_cdev;
+static struct class *srom_class;
+static struct srom_dev *srom_devices;
+
+/*
+ * Handle calling the hypervisor and managing EAGAIN/EBUSY.
+ */
+
+static ssize_t _srom_read(int hv_devhdl, void *buf,
+ loff_t off, size_t count)
+{
+ int retval, retries = SROM_MAX_WAIT_TRY_TIMES;
+ for (;;) {
+ retval = hv_dev_pread(hv_devhdl, 0, (HV_VirtAddr)buf,
+ count, off);
+ if (retval >= 0)
+ return retval;
+ if (retval == HV_EAGAIN)
+ continue;
+ if (retval == HV_EBUSY && --retries > 0) {
+ msleep(SROM_WAIT_TRY_INTERVAL);
+ continue;
+ }
+ pr_err("_srom_read: error %d\n", retval);
+ return -EIO;
+ }
+}
+
+static ssize_t _srom_write(int hv_devhdl, const void *buf,
+ loff_t off, size_t count)
+{
+ int retval, retries = SROM_MAX_WAIT_TRY_TIMES;
+ for (;;) {
+ retval = hv_dev_pwrite(hv_devhdl, 0, (HV_VirtAddr)buf,
+ count, off);
+ if (retval >= 0)
+ return retval;
+ if (retval == HV_EAGAIN)
+ continue;
+ if (retval == HV_EBUSY && --retries > 0) {
+ msleep(SROM_WAIT_TRY_INTERVAL);
+ continue;
+ }
+ pr_err("_srom_write: error %d\n", retval);
+ return -EIO;
+ }
+}
+
+/**
+ * srom_open() - Device open routine.
+ * @inode: Inode for this device.
+ * @filp: File for this specific open of the device.
+ *
+ * Returns zero, or an error code.
+ */
+static int srom_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = &srom_devices[iminor(inode)];
+ return 0;
+}
+
+
+/**
+ * srom_release() - Device release routine.
+ * @inode: Inode for this device.
+ * @filp: File for this specific open of the device.
+ *
+ * Returns zero, or an error code.
+ */
+static int srom_release(struct inode *inode, struct file *filp)
+{
+ struct srom_dev *srom = filp->private_data;
+ char dummy;
+
+ /* Make sure we've flushed anything written to the ROM. */
+ mutex_lock(&srom->lock);
+ if (srom->hv_devhdl >= 0)
+ _srom_write(srom->hv_devhdl, &dummy, SROM_FLUSH_OFF, 1);
+ mutex_unlock(&srom->lock);
+
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+
+/**
+ * srom_read() - Read data from the device.
+ * @filp: File for this specific open of the device.
+ * @buf: User's data buffer.
+ * @count: Number of bytes requested.
+ * @f_pos: File position.
+ *
+ * Returns number of bytes read, or an error code.
+ */
+static ssize_t srom_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ int retval = 0;
+ void *kernbuf;
+ struct srom_dev *srom = filp->private_data;
+
+ kernbuf = kmalloc(SROM_CHUNK_SIZE, GFP_KERNEL);
+ if (!kernbuf)
+ return -ENOMEM;
+
+ if (mutex_lock_interruptible(&srom->lock)) {
+ retval = -ERESTARTSYS;
+ kfree(kernbuf);
+ return retval;
+ }
+
+ while (count) {
+ int hv_retval;
+ int bytes_this_pass = min(count, SROM_CHUNK_SIZE);
+
+ hv_retval = _srom_read(srom->hv_devhdl, kernbuf,
+ *f_pos, bytes_this_pass);
+ if (hv_retval > 0) {
+ if (copy_to_user(buf, kernbuf, hv_retval) != 0) {
+ retval = -EFAULT;
+ break;
+ }
+ } else if (hv_retval <= 0) {
+ if (retval == 0)
+ retval = hv_retval;
+ break;
+ }
+
+ retval += hv_retval;
+ *f_pos += hv_retval;
+ buf += hv_retval;
+ count -= hv_retval;
+ }
+
+ mutex_unlock(&srom->lock);
+ kfree(kernbuf);
+
+ return retval;
+}
+
+/**
+ * srom_write() - Write data to the device.
+ * @filp: File for this specific open of the device.
+ * @buf: User's data buffer.
+ * @count: Number of bytes requested.
+ * @f_pos: File position.
+ *
+ * Returns number of bytes written, or an error code.
+ */
+static ssize_t srom_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ int retval = 0;
+ void *kernbuf;
+ struct srom_dev *srom = filp->private_data;
+
+ kernbuf = kmalloc(SROM_CHUNK_SIZE, GFP_KERNEL);
+ if (!kernbuf)
+ return -ENOMEM;
+
+ if (mutex_lock_interruptible(&srom->lock)) {
+ retval = -ERESTARTSYS;
+ kfree(kernbuf);
+ return retval;
+ }
+
+ while (count) {
+ int hv_retval;
+ int bytes_this_pass = min(count, SROM_CHUNK_SIZE);
+
+ if (copy_from_user(kernbuf, buf, bytes_this_pass) != 0) {
+ retval = -EFAULT;
+ break;
+ }
+
+ hv_retval = _srom_write(srom->hv_devhdl, kernbuf,
+ *f_pos, bytes_this_pass);
+ if (hv_retval <= 0) {
+ if (retval == 0)
+ retval = hv_retval;
+ break;
+ }
+
+ retval += hv_retval;
+ *f_pos += hv_retval;
+ buf += hv_retval;
+ count -= hv_retval;
+ }
+
+ mutex_unlock(&srom->lock);
+ kfree(kernbuf);
+
+ return retval;
+}
+
+/* Provide our own implementation so we can use srom->total_size. */
+loff_t srom_llseek(struct file *filp, loff_t offset, int origin)
+{
+ struct srom_dev *srom = filp->private_data;
+
+ if (mutex_lock_interruptible(&srom->lock))
+ return -ERESTARTSYS;
+
+ switch (origin) {
+ case SEEK_END:
+ offset += srom->total_size;
+ break;
+ case SEEK_CUR:
+ offset += filp->f_pos;
+ break;
+ }
+
+ if (offset < 0 || offset > srom->total_size) {
+ offset = -EINVAL;
+ } else {
+ filp->f_pos = offset;
+ filp->f_version = 0;
+ }
+
+ mutex_unlock(&srom->lock);
+
+ return offset;
+}
+
+static ssize_t total_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srom_dev *srom = dev_get_drvdata(dev);
+ return sprintf(buf, "%u\n", srom->total_size);
+}
+
+static ssize_t sector_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srom_dev *srom = dev_get_drvdata(dev);
+ return sprintf(buf, "%u\n", srom->sector_size);
+}
+
+static ssize_t page_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srom_dev *srom = dev_get_drvdata(dev);
+ return sprintf(buf, "%u\n", srom->page_size);
+}
+
+static struct device_attribute srom_dev_attrs[] = {
+ __ATTR(total_size, S_IRUGO, total_show, NULL),
+ __ATTR(sector_size, S_IRUGO, sector_show, NULL),
+ __ATTR(page_size, S_IRUGO, page_show, NULL),
+ __ATTR_NULL
+};
+
+static char *srom_devnode(struct device *dev, mode_t *mode)
+{
+ *mode = S_IRUGO | S_IWUSR;
+ return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev));
+}
+
+/*
+ * The fops
+ */
+static const struct file_operations srom_fops = {
+ .owner = THIS_MODULE,
+ .llseek = srom_llseek,
+ .read = srom_read,
+ .write = srom_write,
+ .open = srom_open,
+ .release = srom_release,
+};
+
+/**
+ * srom_setup_minor() - Initialize per-minor information.
+ * @srom: Per-device SROM state.
+ * @index: Device to set up.
+ */
+static int srom_setup_minor(struct srom_dev *srom, int index)
+{
+ struct device *dev;
+ int devhdl = srom->hv_devhdl;
+
+ mutex_init(&srom->lock);
+
+ if (_srom_read(devhdl, &srom->total_size,
+ SROM_TOTAL_SIZE_OFF, sizeof(srom->total_size)) < 0)
+ return -EIO;
+ if (_srom_read(devhdl, &srom->sector_size,
+ SROM_SECTOR_SIZE_OFF, sizeof(srom->sector_size)) < 0)
+ return -EIO;
+ if (_srom_read(devhdl, &srom->page_size,
+ SROM_PAGE_SIZE_OFF, sizeof(srom->page_size)) < 0)
+ return -EIO;
+
+ dev = device_create(srom_class, &platform_bus,
+ MKDEV(srom_major, index), srom, "%d", index);
+ return IS_ERR(dev) ? PTR_ERR(dev) : 0;
+}
+
+/** srom_init() - Initialize the driver's module. */
+static int srom_init(void)
+{
+ int result, i;
+ dev_t dev = MKDEV(srom_major, 0);
+
+ /*
+ * Start with a plausible number of partitions; the krealloc() call
+ * below will yield about log(srom_devs) additional allocations.
+ */
+ srom_devices = kzalloc(4 * sizeof(struct srom_dev), GFP_KERNEL);
+
+ /* Discover the number of srom partitions. */
+ for (i = 0; ; i++) {
+ int devhdl;
+ char buf[20];
+ struct srom_dev *new_srom_devices =
+ krealloc(srom_devices, (i+1) * sizeof(struct srom_dev),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!new_srom_devices) {
+ result = -ENOMEM;
+ goto fail_mem;
+ }
+ srom_devices = new_srom_devices;
+ sprintf(buf, "srom/0/%d", i);
+ devhdl = hv_dev_open((HV_VirtAddr)buf, 0);
+ if (devhdl < 0) {
+ if (devhdl != HV_ENODEV)
+ pr_notice("srom/%d: hv_dev_open failed: %d.\n",
+ i, devhdl);
+ break;
+ }
+ srom_devices[i].hv_devhdl = devhdl;
+ }
+ srom_devs = i;
+
+ /* Bail out early if we have no partitions at all. */
+ if (srom_devs == 0) {
+ result = -ENODEV;
+ goto fail_mem;
+ }
+
+ /* Register our major, and accept a dynamic number. */
+ if (srom_major)
+ result = register_chrdev_region(dev, srom_devs, "srom");
+ else {
+ result = alloc_chrdev_region(&dev, 0, srom_devs, "srom");
+ srom_major = MAJOR(dev);
+ }
+ if (result < 0)
+ goto fail_mem;
+
+ /* Register a character device. */
+ cdev_init(&srom_cdev, &srom_fops);
+ srom_cdev.owner = THIS_MODULE;
+ srom_cdev.ops = &srom_fops;
+ result = cdev_add(&srom_cdev, dev, srom_devs);
+ if (result < 0)
+ goto fail_chrdev;
+
+ /* Create a sysfs class. */
+ srom_class = class_create(THIS_MODULE, "srom");
+ if (IS_ERR(srom_class)) {
+ result = PTR_ERR(srom_class);
+ goto fail_cdev;
+ }
+ srom_class->dev_attrs = srom_dev_attrs;
+ srom_class->devnode = srom_devnode;
+
+ /* Do per-partition initialization */
+ for (i = 0; i < srom_devs; i++) {
+ result = srom_setup_minor(srom_devices + i, i);
+ if (result < 0)
+ goto fail_class;
+ }
+
+ return 0;
+
+fail_class:
+ for (i = 0; i < srom_devs; i++)
+ device_destroy(srom_class, MKDEV(srom_major, i));
+ class_destroy(srom_class);
+fail_cdev:
+ cdev_del(&srom_cdev);
+fail_chrdev:
+ unregister_chrdev_region(dev, srom_devs);
+fail_mem:
+ kfree(srom_devices);
+ return result;
+}
+
+/** srom_cleanup() - Clean up the driver's module. */
+static void srom_cleanup(void)
+{
+ int i;
+ for (i = 0; i < srom_devs; i++)
+ device_destroy(srom_class, MKDEV(srom_major, i));
+ class_destroy(srom_class);
+ cdev_del(&srom_cdev);
+ unregister_chrdev_region(MKDEV(srom_major, 0), srom_devs);
+ kfree(srom_devices);
+}
+
+module_init(srom_init);
+module_exit(srom_cleanup);
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 7beb0e2..caf8012 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -534,6 +534,7 @@ void tpm_get_timeouts(struct tpm_chip *chip)
struct duration_t *duration_cap;
ssize_t rc;
u32 timeout;
+ unsigned int scale = 1;
tpm_cmd.header.in = tpm_getcap_header;
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
@@ -545,24 +546,30 @@ void tpm_get_timeouts(struct tpm_chip *chip)
if (rc)
goto duration;
- if (be32_to_cpu(tpm_cmd.header.out.length)
- != 4 * sizeof(u32))
- goto duration;
+ if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
+ be32_to_cpu(tpm_cmd.header.out.length)
+ != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
+ return;
timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
/* Don't overwrite default if value is 0 */
timeout = be32_to_cpu(timeout_cap->a);
+ if (timeout && timeout < 1000) {
+ /* timeouts in msec rather usec */
+ scale = 1000;
+ chip->vendor.timeout_adjusted = true;
+ }
if (timeout)
- chip->vendor.timeout_a = usecs_to_jiffies(timeout);
+ chip->vendor.timeout_a = usecs_to_jiffies(timeout * scale);
timeout = be32_to_cpu(timeout_cap->b);
if (timeout)
- chip->vendor.timeout_b = usecs_to_jiffies(timeout);
+ chip->vendor.timeout_b = usecs_to_jiffies(timeout * scale);
timeout = be32_to_cpu(timeout_cap->c);
if (timeout)
- chip->vendor.timeout_c = usecs_to_jiffies(timeout);
+ chip->vendor.timeout_c = usecs_to_jiffies(timeout * scale);
timeout = be32_to_cpu(timeout_cap->d);
if (timeout)
- chip->vendor.timeout_d = usecs_to_jiffies(timeout);
+ chip->vendor.timeout_d = usecs_to_jiffies(timeout * scale);
duration:
tpm_cmd.header.in = tpm_getcap_header;
@@ -575,23 +582,31 @@ duration:
if (rc)
return;
- if (be32_to_cpu(tpm_cmd.header.out.return_code)
- != 3 * sizeof(u32))
+ if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
+ be32_to_cpu(tpm_cmd.header.out.length)
+ != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
return;
+
duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
chip->vendor.duration[TPM_SHORT] =
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
+ chip->vendor.duration[TPM_MEDIUM] =
+ usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
+ chip->vendor.duration[TPM_LONG] =
+ usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
+
/* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
* value wrong and apparently reports msecs rather than usecs. So we
* fix up the resulting too-small TPM_SHORT value to make things work.
+ * We also scale the TPM_MEDIUM and -_LONG values by 1000.
*/
- if (chip->vendor.duration[TPM_SHORT] < (HZ/100))
+ if (chip->vendor.duration[TPM_SHORT] < (HZ / 100)) {
chip->vendor.duration[TPM_SHORT] = HZ;
-
- chip->vendor.duration[TPM_MEDIUM] =
- usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
- chip->vendor.duration[TPM_LONG] =
- usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
+ chip->vendor.duration[TPM_MEDIUM] *= 1000;
+ chip->vendor.duration[TPM_LONG] *= 1000;
+ chip->vendor.duration_adjusted = true;
+ dev_info(chip->dev, "Adjusting TPM timeout parameters.");
+ }
}
EXPORT_SYMBOL_GPL(tpm_get_timeouts);
@@ -600,7 +615,7 @@ void tpm_continue_selftest(struct tpm_chip *chip)
u8 data[] = {
0, 193, /* TPM_TAG_RQU_COMMAND */
0, 0, 0, 10, /* length */
- 0, 0, 0, 83, /* TPM_ORD_GetCapability */
+ 0, 0, 0, 83, /* TPM_ORD_ContinueSelfTest */
};
tpm_transmit(chip, data, sizeof(data));
@@ -863,18 +878,24 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
data = tpm_cmd.params.readpubek_out_buffer;
str +=
sprintf(str,
- "Algorithm: %02X %02X %02X %02X\nEncscheme: %02X %02X\n"
- "Sigscheme: %02X %02X\nParameters: %02X %02X %02X %02X"
- " %02X %02X %02X %02X %02X %02X %02X %02X\n"
- "Modulus length: %d\nModulus: \n",
- data[10], data[11], data[12], data[13], data[14],
- data[15], data[16], data[17], data[22], data[23],
- data[24], data[25], data[26], data[27], data[28],
- data[29], data[30], data[31], data[32], data[33],
- be32_to_cpu(*((__be32 *) (data + 34))));
+ "Algorithm: %02X %02X %02X %02X\n"
+ "Encscheme: %02X %02X\n"
+ "Sigscheme: %02X %02X\n"
+ "Parameters: %02X %02X %02X %02X "
+ "%02X %02X %02X %02X "
+ "%02X %02X %02X %02X\n"
+ "Modulus length: %d\n"
+ "Modulus:\n",
+ data[0], data[1], data[2], data[3],
+ data[4], data[5],
+ data[6], data[7],
+ data[12], data[13], data[14], data[15],
+ data[16], data[17], data[18], data[19],
+ data[20], data[21], data[22], data[23],
+ be32_to_cpu(*((__be32 *) (data + 24))));
for (i = 0; i < 256; i++) {
- str += sprintf(str, "%02X ", data[i + 38]);
+ str += sprintf(str, "%02X ", data[i + 28]);
if ((i + 1) % 16 == 0)
str += sprintf(str, "\n");
}
@@ -937,6 +958,35 @@ ssize_t tpm_show_caps_1_2(struct device * dev,
}
EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
+ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d %d %d [%s]\n",
+ jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
+ jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
+ jiffies_to_usecs(chip->vendor.duration[TPM_LONG]),
+ chip->vendor.duration_adjusted
+ ? "adjusted" : "original");
+}
+EXPORT_SYMBOL_GPL(tpm_show_durations);
+
+ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d %d %d %d [%s]\n",
+ jiffies_to_usecs(chip->vendor.timeout_a),
+ jiffies_to_usecs(chip->vendor.timeout_b),
+ jiffies_to_usecs(chip->vendor.timeout_c),
+ jiffies_to_usecs(chip->vendor.timeout_d),
+ chip->vendor.timeout_adjusted
+ ? "adjusted" : "original");
+}
+EXPORT_SYMBOL_GPL(tpm_show_timeouts);
+
ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 72ddb03..9c4163c 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -56,6 +56,10 @@ extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr,
char *);
extern ssize_t tpm_show_temp_deactivated(struct device *,
struct device_attribute *attr, char *);
+extern ssize_t tpm_show_durations(struct device *,
+ struct device_attribute *attr, char *);
+extern ssize_t tpm_show_timeouts(struct device *,
+ struct device_attribute *attr, char *);
struct tpm_chip;
@@ -67,6 +71,7 @@ struct tpm_vendor_specific {
unsigned long base; /* TPM base address */
int irq;
+ int probed_irq;
int region_size;
int have_region;
@@ -81,7 +86,9 @@ struct tpm_vendor_specific {
struct list_head list;
int locality;
unsigned long timeout_a, timeout_b, timeout_c, timeout_d; /* jiffies */
+ bool timeout_adjusted;
unsigned long duration[3]; /* jiffies */
+ bool duration_adjusted;
wait_queue_head_t read_queue;
wait_queue_head_t int_queue;
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index a605cb7..82facc9 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -330,12 +330,12 @@ static int __init init_nsc(void)
pdev->dev.driver = &nsc_drv.driver;
pdev->dev.release = tpm_nsc_remove;
- if ((rc = platform_device_register(pdev)) < 0)
- goto err_free_dev;
+ if ((rc = platform_device_add(pdev)) < 0)
+ goto err_put_dev;
if (request_region(base, 2, "tpm_nsc0") == NULL ) {
rc = -EBUSY;
- goto err_unreg_dev;
+ goto err_del_dev;
}
if (!(chip = tpm_register_hardware(&pdev->dev, &tpm_nsc))) {
@@ -382,10 +382,10 @@ static int __init init_nsc(void)
err_rel_reg:
release_region(base, 2);
-err_unreg_dev:
- platform_device_unregister(pdev);
-err_free_dev:
- kfree(pdev);
+err_del_dev:
+ platform_device_del(pdev);
+err_put_dev:
+ platform_device_put(pdev);
err_unreg_drv:
platform_driver_unregister(&nsc_drv);
return rc;
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index dd21df5..3f4051a 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/acpi.h>
+#include <linux/freezer.h>
#include "tpm.h"
#define TPM_HEADER_SIZE 10
@@ -79,7 +80,7 @@ enum tis_defaults {
static LIST_HEAD(tis_chips);
static DEFINE_SPINLOCK(tis_lock);
-#ifdef CONFIG_ACPI
+#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
static int is_itpm(struct pnp_dev *dev)
{
struct acpi_device *acpi = pnp_acpi_device(dev);
@@ -93,7 +94,7 @@ static int is_itpm(struct pnp_dev *dev)
return 0;
}
#else
-static int is_itpm(struct pnp_dev *dev)
+static inline int is_itpm(struct pnp_dev *dev)
{
return 0;
}
@@ -120,7 +121,7 @@ static void release_locality(struct tpm_chip *chip, int l, int force)
static int request_locality(struct tpm_chip *chip, int l)
{
- unsigned long stop;
+ unsigned long stop, timeout;
long rc;
if (check_locality(chip, l) >= 0)
@@ -129,17 +130,25 @@ static int request_locality(struct tpm_chip *chip, int l)
iowrite8(TPM_ACCESS_REQUEST_USE,
chip->vendor.iobase + TPM_ACCESS(l));
+ stop = jiffies + chip->vendor.timeout_a;
+
if (chip->vendor.irq) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -1;
rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
(check_locality
(chip, l) >= 0),
- chip->vendor.timeout_a);
+ timeout);
if (rc > 0)
return l;
-
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
} else {
/* wait for burstcount */
- stop = jiffies + chip->vendor.timeout_a;
do {
if (check_locality(chip, l) >= 0)
return l;
@@ -196,15 +205,24 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
if ((status & mask) == mask)
return 0;
+ stop = jiffies + timeout;
+
if (chip->vendor.irq) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -ETIME;
rc = wait_event_interruptible_timeout(*queue,
((tpm_tis_status
(chip) & mask) ==
mask), timeout);
if (rc > 0)
return 0;
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
} else {
- stop = jiffies + timeout;
do {
msleep(TPM_TIMEOUT);
status = tpm_tis_status(chip);
@@ -288,11 +306,10 @@ MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
-static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
{
int rc, status, burstcnt;
size_t count = 0;
- u32 ordinal;
if (request_locality(chip, 0) < 0)
return -EBUSY;
@@ -327,8 +344,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
/* write last byte */
iowrite8(buf[count],
- chip->vendor.iobase +
- TPM_DATA_FIFO(chip->vendor.locality));
+ chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
&chip->vendor.int_queue);
status = tpm_tis_status(chip);
@@ -337,6 +353,28 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
goto out_err;
}
+ return 0;
+
+out_err:
+ tpm_tis_ready(chip);
+ release_locality(chip, chip->vendor.locality, 0);
+ return rc;
+}
+
+/*
+ * If interrupts are used (signaled by an irq set in the vendor structure)
+ * tpm.c can skip polling for the data to be available as the interrupt is
+ * waited for here
+ */
+static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ int rc;
+ u32 ordinal;
+
+ rc = tpm_tis_send_data(chip, buf, len);
+ if (rc < 0)
+ return rc;
+
/* go and do it */
iowrite8(TPM_STS_GO,
chip->vendor.iobase + TPM_STS(chip->vendor.locality));
@@ -358,6 +396,47 @@ out_err:
return rc;
}
+/*
+ * Early probing for iTPM with STS_DATA_EXPECT flaw.
+ * Try sending command without itpm flag set and if that
+ * fails, repeat with itpm flag set.
+ */
+static int probe_itpm(struct tpm_chip *chip)
+{
+ int rc = 0;
+ u8 cmd_getticks[] = {
+ 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
+ 0x00, 0x00, 0x00, 0xf1
+ };
+ size_t len = sizeof(cmd_getticks);
+ int rem_itpm = itpm;
+
+ itpm = 0;
+
+ rc = tpm_tis_send_data(chip, cmd_getticks, len);
+ if (rc == 0)
+ goto out;
+
+ tpm_tis_ready(chip);
+ release_locality(chip, chip->vendor.locality, 0);
+
+ itpm = 1;
+
+ rc = tpm_tis_send_data(chip, cmd_getticks, len);
+ if (rc == 0) {
+ dev_info(chip->dev, "Detected an iTPM.\n");
+ rc = 1;
+ } else
+ rc = -EFAULT;
+
+out:
+ itpm = rem_itpm;
+ tpm_tis_ready(chip);
+ release_locality(chip, chip->vendor.locality, 0);
+
+ return rc;
+}
+
static const struct file_operations tis_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -376,6 +455,8 @@ static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
NULL);
static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
+static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
+static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
static struct attribute *tis_attrs[] = {
&dev_attr_pubek.attr,
@@ -385,7 +466,9 @@ static struct attribute *tis_attrs[] = {
&dev_attr_owned.attr,
&dev_attr_temp_deactivated.attr,
&dev_attr_caps.attr,
- &dev_attr_cancel.attr, NULL,
+ &dev_attr_cancel.attr,
+ &dev_attr_durations.attr,
+ &dev_attr_timeouts.attr, NULL,
};
static struct attribute_group tis_attr_grp = {
@@ -416,7 +499,7 @@ static irqreturn_t tis_int_probe(int irq, void *dev_id)
if (interrupt == 0)
return IRQ_NONE;
- chip->vendor.irq = irq;
+ chip->vendor.probed_irq = irq;
/* Clear interrupts handled with TPM_EOI */
iowrite32(interrupt,
@@ -464,7 +547,7 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
resource_size_t len, unsigned int irq)
{
u32 vendor, intfcaps, intmask;
- int rc, i;
+ int rc, i, irq_s, irq_e;
struct tpm_chip *chip;
if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
@@ -493,6 +576,14 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
"1.2 TPM (device-id 0x%X, rev-id %d)\n",
vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
+ if (!itpm) {
+ itpm = probe_itpm(chip);
+ if (itpm < 0) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+ }
+
if (itpm)
dev_info(dev, "Intel iTPM workaround enabled\n");
@@ -522,6 +613,9 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
dev_dbg(dev, "\tData Avail Int Support\n");
+ /* get the timeouts before testing for irqs */
+ tpm_get_timeouts(chip);
+
/* INTERRUPT Setup */
init_waitqueue_head(&chip->vendor.read_queue);
init_waitqueue_head(&chip->vendor.int_queue);
@@ -540,13 +634,19 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
if (interrupts)
chip->vendor.irq = irq;
if (interrupts && !chip->vendor.irq) {
- chip->vendor.irq =
+ irq_s =
ioread8(chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
+ if (irq_s) {
+ irq_e = irq_s;
+ } else {
+ irq_s = 3;
+ irq_e = 15;
+ }
- for (i = 3; i < 16 && chip->vendor.irq == 0; i++) {
+ for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) {
iowrite8(i, chip->vendor.iobase +
- TPM_INT_VECTOR(chip->vendor.locality));
+ TPM_INT_VECTOR(chip->vendor.locality));
if (request_irq
(i, tis_int_probe, IRQF_SHARED,
chip->vendor.miscdev.name, chip) != 0) {
@@ -568,9 +668,22 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
+ chip->vendor.probed_irq = 0;
+
/* Generate Interrupts */
tpm_gen_interrupt(chip);
+ chip->vendor.irq = chip->vendor.probed_irq;
+
+ /* free_irq will call into tis_int_probe;
+ clear all irqs we haven't seen while doing
+ tpm_gen_interrupt */
+ iowrite32(ioread32
+ (chip->vendor.iobase +
+ TPM_INT_STATUS(chip->vendor.locality)),
+ chip->vendor.iobase +
+ TPM_INT_STATUS(chip->vendor.locality));
+
/* Turn off */
iowrite32(intmask,
chip->vendor.iobase +
@@ -609,7 +722,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
list_add(&chip->vendor.list, &tis_chips);
spin_unlock(&tis_lock);
- tpm_get_timeouts(chip);
tpm_continue_selftest(chip);
return 0;
@@ -619,6 +731,29 @@ out_err:
tpm_remove_hardware(chip->dev);
return rc;
}
+
+static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
+{
+ u32 intmask;
+
+ /* reenable interrupts that device may have lost or
+ BIOS/firmware may have disabled */
+ iowrite8(chip->vendor.irq, chip->vendor.iobase +
+ TPM_INT_VECTOR(chip->vendor.locality));
+
+ intmask =
+ ioread32(chip->vendor.iobase +
+ TPM_INT_ENABLE(chip->vendor.locality));
+
+ intmask |= TPM_INTF_CMD_READY_INT
+ | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
+ | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
+
+ iowrite32(intmask,
+ chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
+}
+
+
#ifdef CONFIG_PNP
static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
const struct pnp_device_id *pnp_id)
@@ -650,6 +785,9 @@ static int tpm_tis_pnp_resume(struct pnp_dev *dev)
struct tpm_chip *chip = pnp_get_drvdata(dev);
int ret;
+ if (chip->vendor.irq)
+ tpm_tis_reenable_interrupts(chip);
+
ret = tpm_pm_resume(&dev->dev);
if (!ret)
tpm_continue_selftest(chip);
@@ -702,6 +840,11 @@ static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
static int tpm_tis_resume(struct platform_device *dev)
{
+ struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
+
+ if (chip->vendor.irq)
+ tpm_tis_reenable_interrupts(chip);
+
return tpm_pm_resume(&dev->dev);
}
static struct platform_driver tis_drv = {
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 4168c88..3530927 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -2,3 +2,6 @@
config CLKDEV_LOOKUP
bool
select HAVE_CLK
+
+config HAVE_MACH_CLKDEV
+ bool
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index dc7c033..32a77be 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -26,6 +26,7 @@
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/err.h>
+#include <linux/delay.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/sh_timer.h>
@@ -150,13 +151,13 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
{
- int ret;
+ int k, ret;
/* enable clock */
ret = clk_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
- return ret;
+ goto err0;
}
/* make sure channel is disabled */
@@ -174,9 +175,38 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
sh_cmt_write(p, CMCOR, 0xffffffff);
sh_cmt_write(p, CMCNT, 0);
+ /*
+ * According to the sh73a0 user's manual, as CMCNT can be operated
+ * only by the RCLK (Pseudo 32 KHz), there's one restriction on
+ * modifying CMCNT register; two RCLK cycles are necessary before
+ * this register is either read or any modification of the value
+ * it holds is reflected in the LSI's actual operation.
+ *
+ * While at it, we're supposed to clear out the CMCNT as of this
+ * moment, so make sure it's processed properly here. This will
+ * take RCLKx2 at maximum.
+ */
+ for (k = 0; k < 100; k++) {
+ if (!sh_cmt_read(p, CMCNT))
+ break;
+ udelay(1);
+ }
+
+ if (sh_cmt_read(p, CMCNT)) {
+ dev_err(&p->pdev->dev, "cannot clear CMCNT\n");
+ ret = -ETIMEDOUT;
+ goto err1;
+ }
+
/* enable channel */
sh_cmt_start_stop_ch(p, 1);
return 0;
+ err1:
+ /* stop clock */
+ clk_disable(p->clk);
+
+ err0:
+ return ret;
}
static void sh_cmt_disable(struct sh_cmt_priv *p)
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 0debc17..e55814b 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -29,7 +29,8 @@
#include <linux/connector.h>
#include <linux/gfp.h>
#include <linux/ptrace.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
+
#include <asm/unaligned.h>
#include <linux/cn_proc.h>
@@ -56,6 +57,7 @@ void proc_fork_connector(struct task_struct *task)
struct proc_event *ev;
__u8 buffer[CN_PROC_MSG_SIZE];
struct timespec ts;
+ struct task_struct *parent;
if (atomic_read(&proc_event_num_listeners) < 1)
return;
@@ -66,8 +68,11 @@ void proc_fork_connector(struct task_struct *task)
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
ev->what = PROC_EVENT_FORK;
- ev->event_data.fork.parent_pid = task->real_parent->pid;
- ev->event_data.fork.parent_tgid = task->real_parent->tgid;
+ rcu_read_lock();
+ parent = rcu_dereference(task->real_parent);
+ ev->event_data.fork.parent_pid = parent->pid;
+ ev->event_data.fork.parent_tgid = parent->tgid;
+ rcu_read_unlock();
ev->event_data.fork.child_pid = task->pid;
ev->event_data.fork.child_tgid = task->tgid;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 0a5bea9..987a165 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1199,6 +1199,26 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_quick_get);
+/**
+ * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
+ * @cpu: CPU number
+ *
+ * Just return the max possible frequency for a given CPU.
+ */
+unsigned int cpufreq_quick_get_max(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ unsigned int ret_freq = 0;
+
+ if (policy) {
+ ret_freq = policy->max;
+ cpufreq_cpu_put(policy);
+ }
+
+ return ret_freq;
+}
+EXPORT_SYMBOL(cpufreq_quick_get_max);
+
static unsigned int __cpufreq_get(unsigned int cpu)
{
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 7b0603e..cdc02ac 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -261,6 +261,9 @@ static int pcc_get_offset(int cpu)
pr = per_cpu(processors, cpu);
pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
+ if (!pr)
+ return -ENODEV;
+
status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer);
if (ACPI_FAILURE(status))
return -ENODEV;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index bf50924..d4c5423 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -25,9 +25,19 @@ DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
DEFINE_MUTEX(cpuidle_lock);
LIST_HEAD(cpuidle_detected_devices);
-static void (*pm_idle_old)(void);
static int enabled_devices;
+static int off __read_mostly;
+static int initialized __read_mostly;
+
+int cpuidle_disabled(void)
+{
+ return off;
+}
+void disable_cpuidle(void)
+{
+ off = 1;
+}
#if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT)
static void cpuidle_kick_cpus(void)
@@ -46,25 +56,23 @@ static int __cpuidle_register_device(struct cpuidle_device *dev);
* cpuidle_idle_call - the main idle loop
*
* NOTE: no locks or semaphores should be used here
+ * return non-zero on failure
*/
-static void cpuidle_idle_call(void)
+int cpuidle_idle_call(void)
{
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_state *target_state;
int next_state;
+ if (off)
+ return -ENODEV;
+
+ if (!initialized)
+ return -ENODEV;
+
/* check if the device is ready */
- if (!dev || !dev->enabled) {
- if (pm_idle_old)
- pm_idle_old();
- else
-#if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE)
- default_idle();
-#else
- local_irq_enable();
-#endif
- return;
- }
+ if (!dev || !dev->enabled)
+ return -EBUSY;
#if 0
/* shows regressions, re-enable for 2.6.29 */
@@ -89,7 +97,7 @@ static void cpuidle_idle_call(void)
next_state = cpuidle_curr_governor->select(dev);
if (need_resched()) {
local_irq_enable();
- return;
+ return 0;
}
target_state = &dev->states[next_state];
@@ -114,6 +122,8 @@ static void cpuidle_idle_call(void)
/* give the governor an opportunity to reflect on the outcome */
if (cpuidle_curr_governor->reflect)
cpuidle_curr_governor->reflect(dev);
+
+ return 0;
}
/**
@@ -121,10 +131,10 @@ static void cpuidle_idle_call(void)
*/
void cpuidle_install_idle_handler(void)
{
- if (enabled_devices && (pm_idle != cpuidle_idle_call)) {
+ if (enabled_devices) {
/* Make sure all changes finished before we switch to new idle */
smp_wmb();
- pm_idle = cpuidle_idle_call;
+ initialized = 1;
}
}
@@ -133,8 +143,8 @@ void cpuidle_install_idle_handler(void)
*/
void cpuidle_uninstall_idle_handler(void)
{
- if (enabled_devices && pm_idle_old && (pm_idle != pm_idle_old)) {
- pm_idle = pm_idle_old;
+ if (enabled_devices) {
+ initialized = 0;
cpuidle_kick_cpus();
}
}
@@ -427,7 +437,8 @@ static int __init cpuidle_init(void)
{
int ret;
- pm_idle_old = pm_idle;
+ if (cpuidle_disabled())
+ return -ENODEV;
ret = cpuidle_add_class_sysfs(&cpu_sysdev_class);
if (ret)
@@ -438,4 +449,5 @@ static int __init cpuidle_init(void)
return 0;
}
+module_param(off, int, 0444);
core_initcall(cpuidle_init);
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 33e50d5..38c3fd8 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -13,6 +13,7 @@ extern struct list_head cpuidle_governors;
extern struct list_head cpuidle_detected_devices;
extern struct mutex cpuidle_lock;
extern spinlock_t cpuidle_driver_lock;
+extern int cpuidle_disabled(void);
/* idle loop */
extern void cpuidle_install_idle_handler(void);
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index fd1601e..3f7e3ce 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -26,6 +26,9 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
if (!drv)
return -EINVAL;
+ if (cpuidle_disabled())
+ return -ENODEV;
+
spin_lock(&cpuidle_driver_lock);
if (cpuidle_curr_driver) {
spin_unlock(&cpuidle_driver_lock);
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index 724c164..ea2f8e7 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -81,6 +81,9 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
if (!gov || !gov->select)
return -EINVAL;
+ if (cpuidle_disabled())
+ return -ENODEV;
+
mutex_lock(&cpuidle_lock);
if (__cpuidle_find_governor(gov->name) == NULL) {
ret = 0;
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 2e5b204..d0183dd 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1,6 +1,6 @@
/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
*
- * Copyright (C) 2010 David S. Miller <davem@davemloft.net>
+ * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -31,8 +31,8 @@
#include "n2_core.h"
#define DRV_MODULE_NAME "n2_crypto"
-#define DRV_MODULE_VERSION "0.1"
-#define DRV_MODULE_RELDATE "April 29, 2010"
+#define DRV_MODULE_VERSION "0.2"
+#define DRV_MODULE_RELDATE "July 28, 2011"
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -1823,22 +1823,17 @@ static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *de
static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
struct spu_mdesc_info *ip)
{
- const u64 *intr, *ino;
- int intr_len, ino_len;
+ const u64 *ino;
+ int ino_len;
int i;
- intr = mdesc_get_property(mdesc, node, "intr", &intr_len);
- if (!intr)
- return -ENODEV;
-
ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
- if (!ino)
+ if (!ino) {
+ printk("NO 'ino'\n");
return -ENODEV;
+ }
- if (intr_len != ino_len)
- return -EINVAL;
-
- ip->num_intrs = intr_len / sizeof(u64);
+ ip->num_intrs = ino_len / sizeof(u64);
ip->ino_table = kzalloc((sizeof(struct ino_blob) *
ip->num_intrs),
GFP_KERNEL);
@@ -1847,7 +1842,7 @@ static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
for (i = 0; i < ip->num_intrs; i++) {
struct ino_blob *b = &ip->ino_table[i];
- b->intr = intr[i];
+ b->intr = i + 1;
b->ino = ino[i];
}
@@ -2204,6 +2199,10 @@ static struct of_device_id n2_crypto_match[] = {
.name = "n2cp",
.compatible = "SUNW,vf-cwq",
},
+ {
+ .name = "n2cp",
+ .compatible = "SUNW,kt-cwq",
+ },
{},
};
@@ -2228,6 +2227,10 @@ static struct of_device_id n2_mau_match[] = {
.name = "ncp",
.compatible = "SUNW,vf-mau",
},
+ {
+ .name = "ncp",
+ .compatible = "SUNW,kt-mau",
+ },
{},
};
diff --git a/drivers/dma/TODO b/drivers/dma/TODO
index a4af858..734ed02 100644
--- a/drivers/dma/TODO
+++ b/drivers/dma/TODO
@@ -9,6 +9,5 @@ TODO for slave dma
- mxs-dma.c
- dw_dmac
- intel_mid_dma
- - ste_dma40
4. Check other subsystems for dma drivers and merge/move to dmaengine
5. Remove dma_slave_config's dma direction.
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index e6d7228..be21e3f 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -80,6 +80,7 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/dmaengine.h>
#include <linux/amba/bus.h>
@@ -156,14 +157,10 @@ struct pl08x_driver_data {
#define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
-/* Minimum period between work queue runs */
-#define PL08X_WQ_PERIODMIN 20
-
/* Size (bytes) of each LLI buffer allocated for one transfer */
# define PL08X_LLI_TSFR_SIZE 0x2000
/* Maximum times we call dma_pool_alloc on this pool without freeing */
-#define PL08X_MAX_ALLOCS 0x40
#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
#define PL08X_ALIGN 8
@@ -495,10 +492,10 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
struct pl08x_lli_build_data {
struct pl08x_txd *txd;
- struct pl08x_driver_data *pl08x;
struct pl08x_bus_data srcbus;
struct pl08x_bus_data dstbus;
size_t remainder;
+ u32 lli_bus;
};
/*
@@ -551,8 +548,7 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
llis_va[num_llis].src = bd->srcbus.addr;
llis_va[num_llis].dst = bd->dstbus.addr;
llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli);
- if (bd->pl08x->lli_buses & PL08X_AHB2)
- llis_va[num_llis].lli |= PL080_LLI_LM_AHB2;
+ llis_va[num_llis].lli |= bd->lli_bus;
if (cctl & PL080_CONTROL_SRC_INCR)
bd->srcbus.addr += len;
@@ -605,9 +601,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
cctl = txd->cctl;
bd.txd = txd;
- bd.pl08x = pl08x;
bd.srcbus.addr = txd->src_addr;
bd.dstbus.addr = txd->dst_addr;
+ bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
/* Find maximum width of the source bus */
bd.srcbus.maxwidth =
@@ -622,25 +618,15 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
/* Set up the bus widths to the maximum */
bd.srcbus.buswidth = bd.srcbus.maxwidth;
bd.dstbus.buswidth = bd.dstbus.maxwidth;
- dev_vdbg(&pl08x->adev->dev,
- "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
- __func__, bd.srcbus.buswidth, bd.dstbus.buswidth);
-
/*
* Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
*/
max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) *
PL080_CONTROL_TRANSFER_SIZE_MASK;
- dev_vdbg(&pl08x->adev->dev,
- "%s max bytes per lli = %zu\n",
- __func__, max_bytes_per_lli);
/* We need to count this down to zero */
bd.remainder = txd->len;
- dev_vdbg(&pl08x->adev->dev,
- "%s remainder = %zu\n",
- __func__, bd.remainder);
/*
* Choose bus to align to
@@ -649,6 +635,16 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
*/
pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
+ dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n",
+ bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
+ bd.srcbus.buswidth,
+ bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
+ bd.dstbus.buswidth,
+ bd.remainder, max_bytes_per_lli);
+ dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
+ mbus == &bd.srcbus ? "src" : "dst",
+ sbus == &bd.srcbus ? "src" : "dst");
+
if (txd->len < mbus->buswidth) {
/* Less than a bus width available - send as single bytes */
while (bd.remainder) {
@@ -840,15 +836,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
{
int i;
+ dev_vdbg(&pl08x->adev->dev,
+ "%-3s %-9s %-10s %-10s %-10s %s\n",
+ "lli", "", "csrc", "cdst", "clli", "cctl");
for (i = 0; i < num_llis; i++) {
dev_vdbg(&pl08x->adev->dev,
- "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n",
- i,
- &llis_va[i],
- llis_va[i].src,
- llis_va[i].dst,
- llis_va[i].cctl,
- llis_va[i].lli
+ "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, &llis_va[i], llis_va[i].src,
+ llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl
);
}
}
@@ -1054,64 +1049,105 @@ pl08x_dma_tx_status(struct dma_chan *chan,
/* PrimeCell DMA extension */
struct burst_table {
- int burstwords;
+ u32 burstwords;
u32 reg;
};
static const struct burst_table burst_sizes[] = {
{
.burstwords = 256,
- .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_256,
},
{
.burstwords = 128,
- .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_128,
},
{
.burstwords = 64,
- .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_64,
},
{
.burstwords = 32,
- .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_32,
},
{
.burstwords = 16,
- .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_16,
},
{
.burstwords = 8,
- .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_8,
},
{
.burstwords = 4,
- .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_4,
},
{
- .burstwords = 1,
- .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .burstwords = 0,
+ .reg = PL080_BSIZE_1,
},
};
+/*
+ * Given the source and destination available bus masks, select which
+ * will be routed to each port. We try to have source and destination
+ * on separate ports, but always respect the allowable settings.
+ */
+static u32 pl08x_select_bus(u8 src, u8 dst)
+{
+ u32 cctl = 0;
+
+ if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
+ cctl |= PL080_CONTROL_DST_AHB2;
+ if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
+ cctl |= PL080_CONTROL_SRC_AHB2;
+
+ return cctl;
+}
+
+static u32 pl08x_cctl(u32 cctl)
+{
+ cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
+ PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
+ PL080_CONTROL_PROT_MASK);
+
+ /* Access the cell in privileged mode, non-bufferable, non-cacheable */
+ return cctl | PL080_CONTROL_PROT_SYS;
+}
+
+static u32 pl08x_width(enum dma_slave_buswidth width)
+{
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ return PL080_WIDTH_8BIT;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ return PL080_WIDTH_16BIT;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ return PL080_WIDTH_32BIT;
+ default:
+ return ~0;
+ }
+}
+
+static u32 pl08x_burst(u32 maxburst)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
+ if (burst_sizes[i].burstwords <= maxburst)
+ break;
+
+ return burst_sizes[i].reg;
+}
+
static int dma_set_runtime_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
- struct pl08x_channel_data *cd = plchan->cd;
enum dma_slave_buswidth addr_width;
- dma_addr_t addr;
- u32 maxburst;
+ u32 width, burst, maxburst;
u32 cctl = 0;
- int i;
if (!plchan->slave)
return -EINVAL;
@@ -1119,11 +1155,9 @@ static int dma_set_runtime_config(struct dma_chan *chan,
/* Transfer direction */
plchan->runtime_direction = config->direction;
if (config->direction == DMA_TO_DEVICE) {
- addr = config->dst_addr;
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
} else if (config->direction == DMA_FROM_DEVICE) {
- addr = config->src_addr;
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
} else {
@@ -1132,46 +1166,40 @@ static int dma_set_runtime_config(struct dma_chan *chan,
return -EINVAL;
}
- switch (addr_width) {
- case DMA_SLAVE_BUSWIDTH_1_BYTE:
- cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) |
- (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT);
- break;
- case DMA_SLAVE_BUSWIDTH_2_BYTES:
- cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) |
- (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT);
- break;
- case DMA_SLAVE_BUSWIDTH_4_BYTES:
- cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) |
- (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT);
- break;
- default:
+ width = pl08x_width(addr_width);
+ if (width == ~0) {
dev_err(&pl08x->adev->dev,
"bad runtime_config: alien address width\n");
return -EINVAL;
}
+ cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
+ cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
+
/*
- * Now decide on a maxburst:
* If this channel will only request single transfers, set this
* down to ONE element. Also select one element if no maxburst
* is specified.
*/
- if (plchan->cd->single || maxburst == 0) {
- cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT);
+ if (plchan->cd->single)
+ maxburst = 1;
+
+ burst = pl08x_burst(maxburst);
+ cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
+ cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
+
+ if (plchan->runtime_direction == DMA_FROM_DEVICE) {
+ plchan->src_addr = config->src_addr;
+ plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
+ pl08x_select_bus(plchan->cd->periph_buses,
+ pl08x->mem_buses);
} else {
- for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
- if (burst_sizes[i].burstwords <= maxburst)
- break;
- cctl |= burst_sizes[i].reg;
+ plchan->dst_addr = config->dst_addr;
+ plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR |
+ pl08x_select_bus(pl08x->mem_buses,
+ plchan->cd->periph_buses);
}
- plchan->runtime_addr = addr;
-
- /* Modify the default channel data to fit PrimeCell request */
- cd->cctl = cctl;
-
dev_dbg(&pl08x->adev->dev,
"configured channel %s (%s) for %s, data width %d, "
"maxburst %d words, LE, CCTL=0x%08x\n",
@@ -1270,23 +1298,6 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
return 0;
}
-/*
- * Given the source and destination available bus masks, select which
- * will be routed to each port. We try to have source and destination
- * on separate ports, but always respect the allowable settings.
- */
-static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst)
-{
- u32 cctl = 0;
-
- if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
- cctl |= PL080_CONTROL_DST_AHB2;
- if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
- cctl |= PL080_CONTROL_SRC_AHB2;
-
- return cctl;
-}
-
static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
unsigned long flags)
{
@@ -1338,8 +1349,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
if (pl08x->vd->dualmaster)
- txd->cctl |= pl08x_select_bus(pl08x,
- pl08x->mem_buses, pl08x->mem_buses);
+ txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
+ pl08x->mem_buses);
ret = pl08x_prep_channel_resources(plchan, txd);
if (ret)
@@ -1356,7 +1367,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_txd *txd;
- u8 src_buses, dst_buses;
int ret;
/*
@@ -1390,42 +1400,22 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
txd->direction = direction;
txd->len = sgl->length;
- txd->cctl = plchan->cd->cctl &
- ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
- PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
- PL080_CONTROL_PROT_MASK);
-
- /* Access the cell in privileged mode, non-bufferable, non-cacheable */
- txd->cctl |= PL080_CONTROL_PROT_SYS;
-
if (direction == DMA_TO_DEVICE) {
txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
- txd->cctl |= PL080_CONTROL_SRC_INCR;
+ txd->cctl = plchan->dst_cctl;
txd->src_addr = sgl->dma_address;
- if (plchan->runtime_addr)
- txd->dst_addr = plchan->runtime_addr;
- else
- txd->dst_addr = plchan->cd->addr;
- src_buses = pl08x->mem_buses;
- dst_buses = plchan->cd->periph_buses;
+ txd->dst_addr = plchan->dst_addr;
} else if (direction == DMA_FROM_DEVICE) {
txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
- txd->cctl |= PL080_CONTROL_DST_INCR;
- if (plchan->runtime_addr)
- txd->src_addr = plchan->runtime_addr;
- else
- txd->src_addr = plchan->cd->addr;
+ txd->cctl = plchan->src_cctl;
+ txd->src_addr = plchan->src_addr;
txd->dst_addr = sgl->dma_address;
- src_buses = plchan->cd->periph_buses;
- dst_buses = pl08x->mem_buses;
} else {
dev_err(&pl08x->adev->dev,
"%s direction unsupported\n", __func__);
return NULL;
}
- txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses);
-
ret = pl08x_prep_channel_resources(plchan, txd);
if (ret)
return NULL;
@@ -1676,6 +1666,20 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
return mask ? IRQ_HANDLED : IRQ_NONE;
}
+static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
+{
+ u32 cctl = pl08x_cctl(chan->cd->cctl);
+
+ chan->slave = true;
+ chan->name = chan->cd->bus_id;
+ chan->src_addr = chan->cd->addr;
+ chan->dst_addr = chan->cd->addr;
+ chan->src_cctl = cctl | PL080_CONTROL_DST_INCR |
+ pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses);
+ chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR |
+ pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses);
+}
+
/*
* Initialise the DMAC memcpy/slave channels.
* Make a local wrapper to hold required data
@@ -1707,9 +1711,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
chan->state = PL08X_CHAN_IDLE;
if (slave) {
- chan->slave = true;
- chan->name = pl08x->pd->slave_channels[i].bus_id;
chan->cd = &pl08x->pd->slave_channels[i];
+ pl08x_dma_slave_init(chan);
} else {
chan->cd = &pl08x->pd->memcpy_channel;
chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 36144f8..6a483ea 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1216,7 +1216,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
atdma->dma_common.cap_mask = pdata->cap_mask;
atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
- size = io->end - io->start + 1;
+ size = resource_size(io);
if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
err = -EBUSY;
goto err_kfree;
@@ -1362,7 +1362,7 @@ static int __exit at_dma_remove(struct platform_device *pdev)
atdma->regs = NULL;
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(io->start, io->end - io->start + 1);
+ release_mem_region(io->start, resource_size(io));
kfree(atdma);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index a92d95e..4234f41 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -41,6 +41,8 @@ struct coh901318_desc {
struct coh901318_lli *lli;
enum dma_data_direction dir;
unsigned long flags;
+ u32 head_config;
+ u32 head_ctrl;
};
struct coh901318_base {
@@ -661,6 +663,9 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
coh901318_desc_submit(cohc, cohd);
+ /* Program the transaction head */
+ coh901318_set_conf(cohc, cohd->head_config);
+ coh901318_set_ctrl(cohc, cohd->head_ctrl);
coh901318_prep_linked_list(cohc, cohd->lli);
/* start dma job on this channel */
@@ -1091,8 +1096,6 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
} else
goto err_direction;
- coh901318_set_conf(cohc, config);
-
/* The dma only supports transmitting packages up to
* MAX_DMA_PACKET_SIZE. Calculate to total number of
* dma elemts required to send the entire sg list
@@ -1129,16 +1132,18 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (ret)
goto err_lli_fill;
- /*
- * Set the default ctrl for the channel to the one from the lli,
- * things may have changed due to odd buffer alignment etc.
- */
- coh901318_set_ctrl(cohc, lli->control);
COH_DBG(coh901318_list_print(cohc, lli));
/* Pick a descriptor to handle this transfer */
cohd = coh901318_desc_get(cohc);
+ cohd->head_config = config;
+ /*
+ * Set the default head ctrl for the channel to the one from the
+ * lli, things may have changed due to odd buffer alignment
+ * etc.
+ */
+ cohd->head_ctrl = lli->control;
cohd->dir = direction;
cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 48694c3..b48967b 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -62,9 +62,9 @@
#include <linux/slab.h>
static DEFINE_MUTEX(dma_list_mutex);
+static DEFINE_IDR(dma_idr);
static LIST_HEAD(dma_device_list);
static long dmaengine_ref_count;
-static struct idr dma_idr;
/* --- sysfs implementation --- */
@@ -510,8 +510,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
dma_chan_name(chan));
list_del_rcu(&device->global_node);
} else if (err)
- pr_err("dmaengine: failed to get %s: (%d)\n",
- dma_chan_name(chan), err);
+ pr_debug("dmaengine: failed to get %s: (%d)\n",
+ dma_chan_name(chan), err);
else
break;
if (--device->privatecnt == 0)
@@ -1050,8 +1050,6 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies);
static int __init dma_bus_init(void)
{
- idr_init(&dma_idr);
- mutex_init(&dma_list_mutex);
return class_register(&dma_devclass);
}
arch_initcall(dma_bus_init);
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 0766c1e..5d7a49b 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -902,7 +902,7 @@ static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
*
* Returns a valid DMA descriptor or %NULL in case of failure.
*/
-struct dma_async_tx_descriptor *
+static struct dma_async_tx_descriptor *
ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
dma_addr_t src, size_t len, unsigned long flags)
{
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index b6d1455..7bd7e98 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -32,6 +32,8 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <asm/irq.h>
#include <mach/sdma.h>
@@ -65,8 +67,8 @@
#define SDMA_ONCE_RTB 0x060
#define SDMA_XTRIG_CONF1 0x070
#define SDMA_XTRIG_CONF2 0x074
-#define SDMA_CHNENBL0_V2 0x200
-#define SDMA_CHNENBL0_V1 0x080
+#define SDMA_CHNENBL0_IMX35 0x200
+#define SDMA_CHNENBL0_IMX31 0x080
#define SDMA_CHNPRI_0 0x100
/*
@@ -299,13 +301,18 @@ struct sdma_firmware_header {
u32 ram_code_size;
};
+enum sdma_devtype {
+ IMX31_SDMA, /* runs on i.mx31 */
+ IMX35_SDMA, /* runs on i.mx35 and later */
+};
+
struct sdma_engine {
struct device *dev;
struct device_dma_parameters dma_parms;
struct sdma_channel channel[MAX_DMA_CHANNELS];
struct sdma_channel_control *channel_control;
void __iomem *regs;
- unsigned int version;
+ enum sdma_devtype devtype;
unsigned int num_events;
struct sdma_context_data *context;
dma_addr_t context_phys;
@@ -314,6 +321,26 @@ struct sdma_engine {
struct sdma_script_start_addrs *script_addrs;
};
+static struct platform_device_id sdma_devtypes[] = {
+ {
+ .name = "imx31-sdma",
+ .driver_data = IMX31_SDMA,
+ }, {
+ .name = "imx35-sdma",
+ .driver_data = IMX35_SDMA,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, sdma_devtypes);
+
+static const struct of_device_id sdma_dt_ids[] = {
+ { .compatible = "fsl,imx31-sdma", .data = &sdma_devtypes[IMX31_SDMA], },
+ { .compatible = "fsl,imx35-sdma", .data = &sdma_devtypes[IMX35_SDMA], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sdma_dt_ids);
+
#define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */
#define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */
#define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */
@@ -321,8 +348,8 @@ struct sdma_engine {
static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
{
- u32 chnenbl0 = (sdma->version == 2 ? SDMA_CHNENBL0_V2 : SDMA_CHNENBL0_V1);
-
+ u32 chnenbl0 = (sdma->devtype == IMX31_SDMA ? SDMA_CHNENBL0_IMX31 :
+ SDMA_CHNENBL0_IMX35);
return chnenbl0 + event * 4;
}
@@ -1105,25 +1132,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
}
static int __init sdma_get_firmware(struct sdma_engine *sdma,
- const char *cpu_name, int to_version)
+ const char *fw_name)
{
const struct firmware *fw;
- char *fwname;
const struct sdma_firmware_header *header;
int ret;
const struct sdma_script_start_addrs *addr;
unsigned short *ram_code;
- fwname = kasprintf(GFP_KERNEL, "sdma-%s-to%d.bin", cpu_name, to_version);
- if (!fwname)
- return -ENOMEM;
-
- ret = request_firmware(&fw, fwname, sdma->dev);
- if (ret) {
- kfree(fwname);
+ ret = request_firmware(&fw, fw_name, sdma->dev);
+ if (ret)
return ret;
- }
- kfree(fwname);
if (fw->size < sizeof(*header))
goto err_firmware;
@@ -1162,15 +1181,16 @@ static int __init sdma_init(struct sdma_engine *sdma)
int i, ret;
dma_addr_t ccb_phys;
- switch (sdma->version) {
- case 1:
+ switch (sdma->devtype) {
+ case IMX31_SDMA:
sdma->num_events = 32;
break;
- case 2:
+ case IMX35_SDMA:
sdma->num_events = 48;
break;
default:
- dev_err(sdma->dev, "Unknown version %d. aborting\n", sdma->version);
+ dev_err(sdma->dev, "Unknown sdma type %d. aborting\n",
+ sdma->devtype);
return -ENODEV;
}
@@ -1239,6 +1259,10 @@ err_dma_alloc:
static int __init sdma_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id =
+ of_match_device(sdma_dt_ids, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
+ const char *fw_name;
int ret;
int irq;
struct resource *iores;
@@ -1254,7 +1278,7 @@ static int __init sdma_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!iores || irq < 0 || !pdata) {
+ if (!iores || irq < 0) {
ret = -EINVAL;
goto err_irq;
}
@@ -1281,10 +1305,14 @@ static int __init sdma_probe(struct platform_device *pdev)
goto err_request_irq;
sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
- if (!sdma->script_addrs)
+ if (!sdma->script_addrs) {
+ ret = -ENOMEM;
goto err_alloc;
+ }
- sdma->version = pdata->sdma_version;
+ if (of_id)
+ pdev->id_entry = of_id->data;
+ sdma->devtype = pdev->id_entry->driver_data;
dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
@@ -1314,10 +1342,30 @@ static int __init sdma_probe(struct platform_device *pdev)
if (ret)
goto err_init;
- if (pdata->script_addrs)
+ if (pdata && pdata->script_addrs)
sdma_add_scripts(sdma, pdata->script_addrs);
- sdma_get_firmware(sdma, pdata->cpu_name, pdata->to_version);
+ if (pdata) {
+ sdma_get_firmware(sdma, pdata->fw_name);
+ } else {
+ /*
+ * Because that device tree does not encode ROM script address,
+ * the RAM script in firmware is mandatory for device tree
+ * probe, otherwise it fails.
+ */
+ ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
+ &fw_name);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get firmware name\n");
+ goto err_init;
+ }
+
+ ret = sdma_get_firmware(sdma, fw_name);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get firmware\n");
+ goto err_init;
+ }
+ }
sdma->dma_device.dev = &pdev->dev;
@@ -1365,7 +1413,9 @@ static int __exit sdma_remove(struct platform_device *pdev)
static struct platform_driver sdma_driver = {
.driver = {
.name = "imx-sdma",
+ .of_match_table = sdma_dt_ids,
},
+ .id_table = sdma_devtypes,
.remove = __exit_p(sdma_remove),
};
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index f653517..8a3fdd8 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -1351,7 +1351,6 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
return -EAGAIN;
}
device->state = SUSPENDED;
- pci_set_drvdata(pci, device);
pci_save_state(pci);
pci_disable_device(pci);
pci_set_power_state(pci, PCI_D3hot);
@@ -1380,7 +1379,6 @@ int dma_resume(struct pci_dev *pci)
}
device->state = RUNNING;
iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
- pci_set_drvdata(pci, device);
return 0;
}
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index d845dc4..f519c93 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -73,10 +73,10 @@
/* provide a lookup table for setting the source address in the base or
* extended descriptor of an xor or pq descriptor
*/
-static const u8 xor_idx_to_desc __read_mostly = 0xd0;
-static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
-static const u8 pq_idx_to_desc __read_mostly = 0xf8;
-static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
+static const u8 xor_idx_to_desc = 0xe0;
+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
+static const u8 pq_idx_to_desc = 0xf8;
+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
{
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index fab37d1..5e3a40f 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -72,6 +72,17 @@ static struct pci_device_id ioat_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
+
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index fd7d2b3..6815905 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1706,16 +1706,14 @@ static int __init ipu_probe(struct platform_device *pdev)
ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base);
/* Remap IPU common registers */
- ipu_data.reg_ipu = ioremap(mem_ipu->start,
- mem_ipu->end - mem_ipu->start + 1);
+ ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu));
if (!ipu_data.reg_ipu) {
ret = -ENOMEM;
goto err_ioremap_ipu;
}
/* Remap Image Converter and Image DMA Controller registers */
- ipu_data.reg_ic = ioremap(mem_ic->start,
- mem_ic->end - mem_ic->start + 1);
+ ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic));
if (!ipu_data.reg_ic) {
ret = -ENOMEM;
goto err_ioremap_ic;
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 06f9f27..9a353c2 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1304,7 +1304,8 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- msp->xor_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ msp->xor_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
if (!msp->xor_base)
return -EBUSY;
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 88aad4f..be641cb 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -327,10 +327,12 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
memset(mxs_chan->ccw, 0, PAGE_SIZE);
- ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
- 0, "mxs-dma", mxs_dma);
- if (ret)
- goto err_irq;
+ if (mxs_chan->chan_irq != NO_IRQ) {
+ ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
+ 0, "mxs-dma", mxs_dma);
+ if (ret)
+ goto err_irq;
+ }
ret = clk_enable(mxs_dma->clk);
if (ret)
@@ -535,6 +537,7 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_TERMINATE_ALL:
mxs_dma_disable_chan(mxs_chan);
+ mxs_dma_reset_chan(mxs_chan);
break;
case DMA_PAUSE:
mxs_dma_pause_chan(mxs_chan);
@@ -707,6 +710,8 @@ static struct platform_device_id mxs_dma_type[] = {
}, {
.name = "mxs-dma-apbx",
.driver_data = MXS_DMA_APBX,
+ }, {
+ /* end of list */
}
};
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index ff5b38f..1ac8d4b 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -45,7 +45,8 @@
#define DMA_STATUS_MASK_BITS 0x3
#define DMA_STATUS_SHIFT_BITS 16
#define DMA_STATUS_IRQ(x) (0x1 << (x))
-#define DMA_STATUS_ERR(x) (0x1 << ((x) + 8))
+#define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8))
+#define DMA_STATUS2_ERR(x) (0x1 << (x))
#define DMA_DESC_WIDTH_SHIFT_BITS 12
#define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
@@ -61,6 +62,9 @@
#define MAX_CHAN_NR 8
+#define DMA_MASK_CTL0_MODE 0x33333333
+#define DMA_MASK_CTL2_MODE 0x00003333
+
static unsigned int init_nr_desc_per_channel = 64;
module_param(init_nr_desc_per_channel, uint, 0644);
MODULE_PARM_DESC(init_nr_desc_per_channel,
@@ -133,6 +137,7 @@ struct pch_dma {
#define PCH_DMA_CTL3 0x0C
#define PCH_DMA_STS0 0x10
#define PCH_DMA_STS1 0x14
+#define PCH_DMA_STS2 0x18
#define dma_readl(pd, name) \
readl((pd)->membase + PCH_DMA_##name)
@@ -183,13 +188,19 @@ static void pdc_enable_irq(struct dma_chan *chan, int enable)
{
struct pch_dma *pd = to_pd(chan->device);
u32 val;
+ int pos;
+
+ if (chan->chan_id < 8)
+ pos = chan->chan_id;
+ else
+ pos = chan->chan_id + 8;
val = dma_readl(pd, CTL2);
if (enable)
- val |= 0x1 << chan->chan_id;
+ val |= 0x1 << pos;
else
- val &= ~(0x1 << chan->chan_id);
+ val &= ~(0x1 << pos);
dma_writel(pd, CTL2, val);
@@ -202,10 +213,17 @@ static void pdc_set_dir(struct dma_chan *chan)
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
struct pch_dma *pd = to_pd(chan->device);
u32 val;
+ u32 mask_mode;
+ u32 mask_ctl;
if (chan->chan_id < 8) {
val = dma_readl(pd, CTL0);
+ mask_mode = DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * chan->chan_id);
+ mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * chan->chan_id));
+ val &= mask_mode;
if (pd_chan->dir == DMA_TO_DEVICE)
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
DMA_CTL0_DIR_SHIFT_BITS);
@@ -213,18 +231,24 @@ static void pdc_set_dir(struct dma_chan *chan)
val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
DMA_CTL0_DIR_SHIFT_BITS));
+ val |= mask_ctl;
dma_writel(pd, CTL0, val);
} else {
int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
val = dma_readl(pd, CTL3);
+ mask_mode = DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * ch);
+ mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * ch));
+ val &= mask_mode;
if (pd_chan->dir == DMA_TO_DEVICE)
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
DMA_CTL0_DIR_SHIFT_BITS);
else
val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
DMA_CTL0_DIR_SHIFT_BITS));
-
+ val |= mask_ctl;
dma_writel(pd, CTL3, val);
}
@@ -236,33 +260,37 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode)
{
struct pch_dma *pd = to_pd(chan->device);
u32 val;
+ u32 mask_ctl;
+ u32 mask_dir;
if (chan->chan_id < 8) {
+ mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * chan->chan_id));
+ mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\
+ DMA_CTL0_DIR_SHIFT_BITS);
val = dma_readl(pd, CTL0);
-
- val &= ~(DMA_CTL0_MODE_MASK_BITS <<
- (DMA_CTL0_BITS_PER_CH * chan->chan_id));
+ val &= mask_dir;
val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
-
+ val |= mask_ctl;
dma_writel(pd, CTL0, val);
} else {
int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
-
+ mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * ch));
+ mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\
+ DMA_CTL0_DIR_SHIFT_BITS);
val = dma_readl(pd, CTL3);
-
- val &= ~(DMA_CTL0_MODE_MASK_BITS <<
- (DMA_CTL0_BITS_PER_CH * ch));
+ val &= mask_dir;
val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
-
+ val |= mask_ctl;
dma_writel(pd, CTL3, val);
-
}
dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
chan->chan_id, val);
}
-static u32 pdc_get_status(struct pch_dma_chan *pd_chan)
+static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
{
struct pch_dma *pd = to_pd(pd_chan->chan.device);
u32 val;
@@ -272,9 +300,27 @@ static u32 pdc_get_status(struct pch_dma_chan *pd_chan)
DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
}
+static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
+{
+ struct pch_dma *pd = to_pd(pd_chan->chan.device);
+ u32 val;
+
+ val = dma_readl(pd, STS2);
+ return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
+ DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
+}
+
static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
{
- if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE)
+ u32 sts;
+
+ if (pd_chan->chan.chan_id < 8)
+ sts = pdc_get_status0(pd_chan);
+ else
+ sts = pdc_get_status2(pd_chan);
+
+
+ if (sts == DMA_STATUS_IDLE)
return true;
else
return false;
@@ -495,11 +541,11 @@ static int pd_alloc_chan_resources(struct dma_chan *chan)
list_add_tail(&desc->desc_node, &tmp_list);
}
- spin_lock_bh(&pd_chan->lock);
+ spin_lock_irq(&pd_chan->lock);
list_splice(&tmp_list, &pd_chan->free_list);
pd_chan->descs_allocated = i;
pd_chan->completed_cookie = chan->cookie = 1;
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock_irq(&pd_chan->lock);
pdc_enable_irq(chan, 1);
@@ -517,10 +563,10 @@ static void pd_free_chan_resources(struct dma_chan *chan)
BUG_ON(!list_empty(&pd_chan->active_list));
BUG_ON(!list_empty(&pd_chan->queue));
- spin_lock_bh(&pd_chan->lock);
+ spin_lock_irq(&pd_chan->lock);
list_splice_init(&pd_chan->free_list, &tmp_list);
pd_chan->descs_allocated = 0;
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock_irq(&pd_chan->lock);
list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
pci_pool_free(pd->pool, desc, desc->txd.phys);
@@ -536,10 +582,10 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
dma_cookie_t last_completed;
int ret;
- spin_lock_bh(&pd_chan->lock);
+ spin_lock_irq(&pd_chan->lock);
last_completed = pd_chan->completed_cookie;
last_used = chan->cookie;
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock_irq(&pd_chan->lock);
ret = dma_async_is_complete(cookie, last_completed, last_used);
@@ -654,7 +700,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
- spin_lock_bh(&pd_chan->lock);
+ spin_lock_irq(&pd_chan->lock);
pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
@@ -664,7 +710,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
list_for_each_entry_safe(desc, _d, &list, desc_node)
pdc_chain_complete(pd_chan, desc);
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock_irq(&pd_chan->lock);
return 0;
}
@@ -693,30 +739,45 @@ static irqreturn_t pd_irq(int irq, void *devid)
struct pch_dma *pd = (struct pch_dma *)devid;
struct pch_dma_chan *pd_chan;
u32 sts0;
+ u32 sts2;
int i;
- int ret = IRQ_NONE;
+ int ret0 = IRQ_NONE;
+ int ret2 = IRQ_NONE;
sts0 = dma_readl(pd, STS0);
+ sts2 = dma_readl(pd, STS2);
dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
for (i = 0; i < pd->dma.chancnt; i++) {
pd_chan = &pd->channels[i];
- if (sts0 & DMA_STATUS_IRQ(i)) {
- if (sts0 & DMA_STATUS_ERR(i))
- set_bit(0, &pd_chan->err_status);
+ if (i < 8) {
+ if (sts0 & DMA_STATUS_IRQ(i)) {
+ if (sts0 & DMA_STATUS0_ERR(i))
+ set_bit(0, &pd_chan->err_status);
- tasklet_schedule(&pd_chan->tasklet);
- ret = IRQ_HANDLED;
- }
+ tasklet_schedule(&pd_chan->tasklet);
+ ret0 = IRQ_HANDLED;
+ }
+ } else {
+ if (sts2 & DMA_STATUS_IRQ(i - 8)) {
+ if (sts2 & DMA_STATUS2_ERR(i))
+ set_bit(0, &pd_chan->err_status);
+ tasklet_schedule(&pd_chan->tasklet);
+ ret2 = IRQ_HANDLED;
+ }
+ }
}
/* clear interrupt bits in status register */
- dma_writel(pd, STS0, sts0);
+ if (ret0)
+ dma_writel(pd, STS0, sts0);
+ if (ret2)
+ dma_writel(pd, STS2, sts2);
- return ret;
+ return ret0 | ret2;
}
#ifdef CONFIG_PM
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 6abe1ec..00eee59 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -82,7 +82,7 @@ struct dma_pl330_dmac {
spinlock_t pool_lock;
/* Peripheral channels connected to this DMAC */
- struct dma_pl330_chan peripherals[0]; /* keep at end */
+ struct dma_pl330_chan *peripherals; /* keep at end */
};
struct dma_pl330_desc {
@@ -451,8 +451,13 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
desc->txd.cookie = 0;
async_tx_ack(&desc->txd);
- desc->req.rqtype = peri->rqtype;
- desc->req.peri = peri->peri_id;
+ if (peri) {
+ desc->req.rqtype = peri->rqtype;
+ desc->req.peri = peri->peri_id;
+ } else {
+ desc->req.rqtype = MEMTOMEM;
+ desc->req.peri = 0;
+ }
dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
@@ -529,10 +534,10 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
struct pl330_info *pi;
int burst;
- if (unlikely(!pch || !len || !peri))
+ if (unlikely(!pch || !len))
return NULL;
- if (peri->rqtype != MEMTOMEM)
+ if (peri && peri->rqtype != MEMTOMEM)
return NULL;
pi = &pch->dmac->pif;
@@ -577,7 +582,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
int i, burst_size;
dma_addr_t addr;
- if (unlikely(!pch || !sgl || !sg_len))
+ if (unlikely(!pch || !sgl || !sg_len || !peri))
return NULL;
/* Make sure the direction is consistent */
@@ -666,17 +671,12 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
struct dma_device *pd;
struct resource *res;
int i, ret, irq;
+ int num_chan;
pdat = adev->dev.platform_data;
- if (!pdat || !pdat->nr_valid_peri) {
- dev_err(&adev->dev, "platform data missing\n");
- return -ENODEV;
- }
-
/* Allocate a new DMAC and its Channels */
- pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch)
- + sizeof(*pdmac), GFP_KERNEL);
+ pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
if (!pdmac) {
dev_err(&adev->dev, "unable to allocate mem\n");
return -ENOMEM;
@@ -685,7 +685,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pi = &pdmac->pif;
pi->dev = &adev->dev;
pi->pl330_data = NULL;
- pi->mcbufsz = pdat->mcbuf_sz;
+ pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
res = &adev->res;
request_mem_region(res->start, resource_size(res), "dma-pl330");
@@ -717,27 +717,35 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
INIT_LIST_HEAD(&pd->channels);
/* Initialize channel parameters */
- for (i = 0; i < pdat->nr_valid_peri; i++) {
- struct dma_pl330_peri *peri = &pdat->peri[i];
- pch = &pdmac->peripherals[i];
+ num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
+ pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
- switch (peri->rqtype) {
- case MEMTOMEM:
+ for (i = 0; i < num_chan; i++) {
+ pch = &pdmac->peripherals[i];
+ if (pdat) {
+ struct dma_pl330_peri *peri = &pdat->peri[i];
+
+ switch (peri->rqtype) {
+ case MEMTOMEM:
+ dma_cap_set(DMA_MEMCPY, pd->cap_mask);
+ break;
+ case MEMTODEV:
+ case DEVTOMEM:
+ dma_cap_set(DMA_SLAVE, pd->cap_mask);
+ break;
+ default:
+ dev_err(&adev->dev, "DEVTODEV Not Supported\n");
+ continue;
+ }
+ pch->chan.private = peri;
+ } else {
dma_cap_set(DMA_MEMCPY, pd->cap_mask);
- break;
- case MEMTODEV:
- case DEVTOMEM:
- dma_cap_set(DMA_SLAVE, pd->cap_mask);
- break;
- default:
- dev_err(&adev->dev, "DEVTODEV Not Supported\n");
- continue;
+ pch->chan.private = NULL;
}
INIT_LIST_HEAD(&pch->work_list);
spin_lock_init(&pch->lock);
pch->pl330_chid = NULL;
- pch->chan.private = peri;
pch->chan.device = pd;
pch->chan.chan_id = i;
pch->dmac = pdmac;
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 0283300..7f49235 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -70,12 +70,36 @@ static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
static u16 dmaor_read(struct sh_dmae_device *shdev)
{
- return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32));
+ u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+
+ if (shdev->pdata->dmaor_is_32bit)
+ return __raw_readl(addr);
+ else
+ return __raw_readw(addr);
}
static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
{
- __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32));
+ u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+
+ if (shdev->pdata->dmaor_is_32bit)
+ __raw_writel(data, addr);
+ else
+ __raw_writew(data, addr);
+}
+
+static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+ __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
+}
+
+static u32 chcr_read(struct sh_dmae_chan *sh_dc)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+ return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
}
/*
@@ -120,7 +144,7 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
{
- u32 chcr = sh_dmae_readl(sh_chan, CHCR);
+ u32 chcr = chcr_read(sh_chan);
if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
return true; /* working */
@@ -130,8 +154,7 @@ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
{
- struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
- struct sh_dmae_device, common);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
struct sh_dmae_pdata *pdata = shdev->pdata;
int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
@@ -144,8 +167,7 @@ static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
{
- struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
- struct sh_dmae_device, common);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
struct sh_dmae_pdata *pdata = shdev->pdata;
int i;
@@ -169,18 +191,23 @@ static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
static void dmae_start(struct sh_dmae_chan *sh_chan)
{
- u32 chcr = sh_dmae_readl(sh_chan, CHCR);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ u32 chcr = chcr_read(sh_chan);
+
+ if (shdev->pdata->needs_tend_set)
+ sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
- chcr |= CHCR_DE | CHCR_IE;
- sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
+ chcr |= CHCR_DE | shdev->chcr_ie_bit;
+ chcr_write(sh_chan, chcr & ~CHCR_TE);
}
static void dmae_halt(struct sh_dmae_chan *sh_chan)
{
- u32 chcr = sh_dmae_readl(sh_chan, CHCR);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ u32 chcr = chcr_read(sh_chan);
- chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
- sh_dmae_writel(sh_chan, chcr, CHCR);
+ chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
+ chcr_write(sh_chan, chcr);
}
static void dmae_init(struct sh_dmae_chan *sh_chan)
@@ -192,7 +219,7 @@ static void dmae_init(struct sh_dmae_chan *sh_chan)
u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
LOG2_DEFAULT_XFER_SIZE);
sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
- sh_dmae_writel(sh_chan, chcr, CHCR);
+ chcr_write(sh_chan, chcr);
}
static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
@@ -202,23 +229,25 @@ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
return -EBUSY;
sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
- sh_dmae_writel(sh_chan, val, CHCR);
+ chcr_write(sh_chan, val);
return 0;
}
static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
{
- struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
- struct sh_dmae_device, common);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
struct sh_dmae_pdata *pdata = shdev->pdata;
const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
u16 __iomem *addr = shdev->dmars;
- int shift = chan_pdata->dmars_bit;
+ unsigned int shift = chan_pdata->dmars_bit;
if (dmae_is_busy(sh_chan))
return -EBUSY;
+ if (pdata->no_dmars)
+ return 0;
+
/* in the case of a missing DMARS resource use first memory window */
if (!addr)
addr = (u16 __iomem *)shdev->chan_reg;
@@ -296,9 +325,7 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
static const struct sh_dmae_slave_config *sh_dmae_find_slave(
struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
{
- struct dma_device *dma_dev = sh_chan->common.device;
- struct sh_dmae_device *shdev = container_of(dma_dev,
- struct sh_dmae_device, common);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
struct sh_dmae_pdata *pdata = shdev->pdata;
int i;
@@ -771,10 +798,8 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
spin_lock_bh(&sh_chan->desc_lock);
/* DMA work check */
- if (dmae_is_busy(sh_chan)) {
- spin_unlock_bh(&sh_chan->desc_lock);
- return;
- }
+ if (dmae_is_busy(sh_chan))
+ goto sh_chan_xfer_ld_queue_end;
/* Find the first not transferred descriptor */
list_for_each_entry(desc, &sh_chan->ld_queue, node)
@@ -788,6 +813,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
break;
}
+sh_chan_xfer_ld_queue_end:
spin_unlock_bh(&sh_chan->desc_lock);
}
@@ -846,7 +872,7 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data)
spin_lock(&sh_chan->desc_lock);
- chcr = sh_dmae_readl(sh_chan, CHCR);
+ chcr = chcr_read(sh_chan);
if (chcr & CHCR_TE) {
/* DMA stop */
@@ -1144,6 +1170,16 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
/* platform data */
shdev->pdata = pdata;
+ if (pdata->chcr_offset)
+ shdev->chcr_offset = pdata->chcr_offset;
+ else
+ shdev->chcr_offset = CHCR;
+
+ if (pdata->chcr_ie_bit)
+ shdev->chcr_ie_bit = pdata->chcr_ie_bit;
+ else
+ shdev->chcr_ie_bit = CHCR_IE;
+
platform_set_drvdata(pdev, shdev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 5ae9fc5..dc56576 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -47,10 +47,14 @@ struct sh_dmae_device {
struct list_head node;
u32 __iomem *chan_reg;
u16 __iomem *dmars;
+ unsigned int chcr_offset;
+ u32 chcr_ie_bit;
};
#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common)
#define to_sh_desc(lh) container_of(lh, struct sh_desc, node)
#define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
+#define to_sh_dev(chan) container_of(chan->common.device,\
+ struct sh_dmae_device, common)
#endif /* __DMA_SHDMA_H */
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 29d1add..467e4dc 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/amba/bus.h>
#include <plat/ste_dma40.h>
@@ -45,9 +46,6 @@
#define D40_ALLOC_PHY (1 << 30)
#define D40_ALLOC_LOG_FREE 0
-/* Hardware designer of the block */
-#define D40_HW_DESIGNER 0x8
-
/**
* enum 40_command - The different commands and/or statuses.
*
@@ -176,8 +174,10 @@ struct d40_base;
* @tasklet: Tasklet that gets scheduled from interrupt context to complete a
* transfer and call client callback.
* @client: Cliented owned descriptor list.
+ * @pending_queue: Submitted jobs, to be issued by issue_pending()
* @active: Active descriptor.
* @queue: Queued jobs.
+ * @prepare_queue: Prepared jobs.
* @dma_cfg: The client configuration of this dma channel.
* @configured: whether the dma_cfg configuration is valid
* @base: Pointer to the device instance struct.
@@ -186,6 +186,8 @@ struct d40_base;
* @log_def: Default logical channel settings.
* @lcla: Space for one dst src pair for logical channel transfers.
* @lcpa: Pointer to dst and src lcpa settings.
+ * @runtime_addr: runtime configured address.
+ * @runtime_direction: runtime configured direction.
*
* This struct can either "be" a logical or a physical channel.
*/
@@ -200,8 +202,10 @@ struct d40_chan {
struct dma_chan chan;
struct tasklet_struct tasklet;
struct list_head client;
+ struct list_head pending_queue;
struct list_head active;
struct list_head queue;
+ struct list_head prepare_queue;
struct stedma40_chan_cfg dma_cfg;
bool configured;
struct d40_base *base;
@@ -476,7 +480,6 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
list_for_each_entry_safe(d, _d, &d40c->client, node)
if (async_tx_test_ack(&d->txd)) {
- d40_pool_lli_free(d40c, d);
d40_desc_remove(d);
desc = d;
memset(desc, 0, sizeof(*desc));
@@ -643,9 +646,25 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
return d;
}
+/* remove desc from current queue and add it to the pending_queue */
static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
{
- list_add_tail(&desc->node, &d40c->queue);
+ d40_desc_remove(desc);
+ desc->is_in_client_list = false;
+ list_add_tail(&desc->node, &d40c->pending_queue);
+}
+
+static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
+{
+ struct d40_desc *d;
+
+ if (list_empty(&d40c->pending_queue))
+ return NULL;
+
+ d = list_first_entry(&d40c->pending_queue,
+ struct d40_desc,
+ node);
+ return d;
}
static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
@@ -789,6 +808,7 @@ done:
static void d40_term_all(struct d40_chan *d40c)
{
struct d40_desc *d40d;
+ struct d40_desc *_d;
/* Release active descriptors */
while ((d40d = d40_first_active_get(d40c))) {
@@ -802,6 +822,26 @@ static void d40_term_all(struct d40_chan *d40c)
d40_desc_free(d40c, d40d);
}
+ /* Release pending descriptors */
+ while ((d40d = d40_first_pending(d40c))) {
+ d40_desc_remove(d40d);
+ d40_desc_free(d40c, d40d);
+ }
+
+ /* Release client owned descriptors */
+ if (!list_empty(&d40c->client))
+ list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
+ d40_desc_remove(d40d);
+ d40_desc_free(d40c, d40d);
+ }
+
+ /* Release descriptors in prepare queue */
+ if (!list_empty(&d40c->prepare_queue))
+ list_for_each_entry_safe(d40d, _d,
+ &d40c->prepare_queue, node) {
+ d40_desc_remove(d40d);
+ d40_desc_free(d40c, d40d);
+ }
d40c->pending_tx = 0;
d40c->busy = false;
@@ -1189,7 +1229,6 @@ static void dma_tasklet(unsigned long data)
if (!d40d->cyclic) {
if (async_tx_test_ack(&d40d->txd)) {
- d40_pool_lli_free(d40c, d40d);
d40_desc_remove(d40d);
d40_desc_free(d40c, d40d);
} else {
@@ -1576,21 +1615,10 @@ static int d40_free_dma(struct d40_chan *d40c)
u32 event;
struct d40_phy_res *phy = d40c->phy_chan;
bool is_src;
- struct d40_desc *d;
- struct d40_desc *_d;
-
/* Terminate all queued and active transfers */
d40_term_all(d40c);
- /* Release client owned descriptors */
- if (!list_empty(&d40c->client))
- list_for_each_entry_safe(d, _d, &d40c->client, node) {
- d40_pool_lli_free(d40c, d);
- d40_desc_remove(d);
- d40_desc_free(d40c, d);
- }
-
if (phy == NULL) {
chan_err(d40c, "phy == null\n");
return -EINVAL;
@@ -1892,6 +1920,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
goto err;
}
+ /*
+ * add descriptor to the prepare queue in order to be able
+ * to free them later in terminate_all
+ */
+ list_add_tail(&desc->node, &chan->prepare_queue);
+
spin_unlock_irqrestore(&chan->lock, flags);
return &desc->txd;
@@ -2092,7 +2126,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
struct scatterlist *sg;
int i;
- sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL);
+ sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
for (i = 0; i < periods; i++) {
sg_dma_address(&sg[i]) = dma_addr;
sg_dma_len(&sg[i]) = period_len;
@@ -2152,24 +2186,87 @@ static void d40_issue_pending(struct dma_chan *chan)
spin_lock_irqsave(&d40c->lock, flags);
- /* Busy means that pending jobs are already being processed */
+ list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
+
+ /* Busy means that queued jobs are already being processed */
if (!d40c->busy)
(void) d40_queue_start(d40c);
spin_unlock_irqrestore(&d40c->lock, flags);
}
+static int
+dma40_config_to_halfchannel(struct d40_chan *d40c,
+ struct stedma40_half_channel_info *info,
+ enum dma_slave_buswidth width,
+ u32 maxburst)
+{
+ enum stedma40_periph_data_width addr_width;
+ int psize;
+
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ addr_width = STEDMA40_BYTE_WIDTH;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ addr_width = STEDMA40_HALFWORD_WIDTH;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ addr_width = STEDMA40_WORD_WIDTH;
+ break;
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ addr_width = STEDMA40_DOUBLEWORD_WIDTH;
+ break;
+ default:
+ dev_err(d40c->base->dev,
+ "illegal peripheral address width "
+ "requested (%d)\n",
+ width);
+ return -EINVAL;
+ }
+
+ if (chan_is_logical(d40c)) {
+ if (maxburst >= 16)
+ psize = STEDMA40_PSIZE_LOG_16;
+ else if (maxburst >= 8)
+ psize = STEDMA40_PSIZE_LOG_8;
+ else if (maxburst >= 4)
+ psize = STEDMA40_PSIZE_LOG_4;
+ else
+ psize = STEDMA40_PSIZE_LOG_1;
+ } else {
+ if (maxburst >= 16)
+ psize = STEDMA40_PSIZE_PHY_16;
+ else if (maxburst >= 8)
+ psize = STEDMA40_PSIZE_PHY_8;
+ else if (maxburst >= 4)
+ psize = STEDMA40_PSIZE_PHY_4;
+ else
+ psize = STEDMA40_PSIZE_PHY_1;
+ }
+
+ info->data_width = addr_width;
+ info->psize = psize;
+ info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
+
+ return 0;
+}
+
/* Runtime reconfiguration extension */
-static void d40_set_runtime_config(struct dma_chan *chan,
- struct dma_slave_config *config)
+static int d40_set_runtime_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
{
struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
- enum dma_slave_buswidth config_addr_width;
+ enum dma_slave_buswidth src_addr_width, dst_addr_width;
dma_addr_t config_addr;
- u32 config_maxburst;
- enum stedma40_periph_data_width addr_width;
- int psize;
+ u32 src_maxburst, dst_maxburst;
+ int ret;
+
+ src_addr_width = config->src_addr_width;
+ src_maxburst = config->src_maxburst;
+ dst_addr_width = config->dst_addr_width;
+ dst_maxburst = config->dst_maxburst;
if (config->direction == DMA_FROM_DEVICE) {
dma_addr_t dev_addr_rx =
@@ -2188,8 +2285,11 @@ static void d40_set_runtime_config(struct dma_chan *chan,
cfg->dir);
cfg->dir = STEDMA40_PERIPH_TO_MEM;
- config_addr_width = config->src_addr_width;
- config_maxburst = config->src_maxburst;
+ /* Configure the memory side */
+ if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ dst_addr_width = src_addr_width;
+ if (dst_maxburst == 0)
+ dst_maxburst = src_maxburst;
} else if (config->direction == DMA_TO_DEVICE) {
dma_addr_t dev_addr_tx =
@@ -2208,68 +2308,39 @@ static void d40_set_runtime_config(struct dma_chan *chan,
cfg->dir);
cfg->dir = STEDMA40_MEM_TO_PERIPH;
- config_addr_width = config->dst_addr_width;
- config_maxburst = config->dst_maxburst;
-
+ /* Configure the memory side */
+ if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ src_addr_width = dst_addr_width;
+ if (src_maxburst == 0)
+ src_maxburst = dst_maxburst;
} else {
dev_err(d40c->base->dev,
"unrecognized channel direction %d\n",
config->direction);
- return;
+ return -EINVAL;
}
- switch (config_addr_width) {
- case DMA_SLAVE_BUSWIDTH_1_BYTE:
- addr_width = STEDMA40_BYTE_WIDTH;
- break;
- case DMA_SLAVE_BUSWIDTH_2_BYTES:
- addr_width = STEDMA40_HALFWORD_WIDTH;
- break;
- case DMA_SLAVE_BUSWIDTH_4_BYTES:
- addr_width = STEDMA40_WORD_WIDTH;
- break;
- case DMA_SLAVE_BUSWIDTH_8_BYTES:
- addr_width = STEDMA40_DOUBLEWORD_WIDTH;
- break;
- default:
+ if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
dev_err(d40c->base->dev,
- "illegal peripheral address width "
- "requested (%d)\n",
- config->src_addr_width);
- return;
+ "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
+ src_maxburst,
+ src_addr_width,
+ dst_maxburst,
+ dst_addr_width);
+ return -EINVAL;
}
- if (chan_is_logical(d40c)) {
- if (config_maxburst >= 16)
- psize = STEDMA40_PSIZE_LOG_16;
- else if (config_maxburst >= 8)
- psize = STEDMA40_PSIZE_LOG_8;
- else if (config_maxburst >= 4)
- psize = STEDMA40_PSIZE_LOG_4;
- else
- psize = STEDMA40_PSIZE_LOG_1;
- } else {
- if (config_maxburst >= 16)
- psize = STEDMA40_PSIZE_PHY_16;
- else if (config_maxburst >= 8)
- psize = STEDMA40_PSIZE_PHY_8;
- else if (config_maxburst >= 4)
- psize = STEDMA40_PSIZE_PHY_4;
- else if (config_maxburst >= 2)
- psize = STEDMA40_PSIZE_PHY_2;
- else
- psize = STEDMA40_PSIZE_PHY_1;
- }
+ ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
+ src_addr_width,
+ src_maxburst);
+ if (ret)
+ return ret;
- /* Set up all the endpoint configs */
- cfg->src_info.data_width = addr_width;
- cfg->src_info.psize = psize;
- cfg->src_info.big_endian = false;
- cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
- cfg->dst_info.data_width = addr_width;
- cfg->dst_info.psize = psize;
- cfg->dst_info.big_endian = false;
- cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
+ ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
+ dst_addr_width,
+ dst_maxburst);
+ if (ret)
+ return ret;
/* Fill in register values */
if (chan_is_logical(d40c))
@@ -2282,12 +2353,14 @@ static void d40_set_runtime_config(struct dma_chan *chan,
d40c->runtime_addr = config_addr;
d40c->runtime_direction = config->direction;
dev_dbg(d40c->base->dev,
- "configured channel %s for %s, data width %d, "
- "maxburst %d bytes, LE, no flow control\n",
+ "configured channel %s for %s, data width %d/%d, "
+ "maxburst %d/%d elements, LE, no flow control\n",
dma_chan_name(chan),
(config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
- config_addr_width,
- config_maxburst);
+ src_addr_width, dst_addr_width,
+ src_maxburst, dst_maxburst);
+
+ return 0;
}
static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -2308,9 +2381,8 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
case DMA_RESUME:
return d40_resume(d40c);
case DMA_SLAVE_CONFIG:
- d40_set_runtime_config(chan,
+ return d40_set_runtime_config(chan,
(struct dma_slave_config *) arg);
- return 0;
default:
break;
}
@@ -2341,7 +2413,9 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
INIT_LIST_HEAD(&d40c->active);
INIT_LIST_HEAD(&d40c->queue);
+ INIT_LIST_HEAD(&d40c->pending_queue);
INIT_LIST_HEAD(&d40c->client);
+ INIT_LIST_HEAD(&d40c->prepare_queue);
tasklet_init(&d40c->tasklet, dma_tasklet,
(unsigned long) d40c);
@@ -2502,25 +2576,6 @@ static int __init d40_phy_res_init(struct d40_base *base)
static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
{
- static const struct d40_reg_val dma_id_regs[] = {
- /* Peripheral Id */
- { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
- { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
- /*
- * D40_DREG_PERIPHID2 Depends on HW revision:
- * DB8500ed has 0x0008,
- * ? has 0x0018,
- * DB8500v1 has 0x0028
- * DB8500v2 has 0x0038
- */
- { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
-
- /* PCell Id */
- { .reg = D40_DREG_CELLID0, .val = 0x000d},
- { .reg = D40_DREG_CELLID1, .val = 0x00f0},
- { .reg = D40_DREG_CELLID2, .val = 0x0005},
- { .reg = D40_DREG_CELLID3, .val = 0x00b1}
- };
struct stedma40_platform_data *plat_data;
struct clk *clk = NULL;
void __iomem *virtbase = NULL;
@@ -2529,8 +2584,9 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
int num_log_chans = 0;
int num_phy_chans;
int i;
- u32 val;
- u32 rev;
+ u32 pid;
+ u32 cid;
+ u8 rev;
clk = clk_get(&pdev->dev, NULL);
@@ -2554,32 +2610,32 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
if (!virtbase)
goto failure;
- /* HW version check */
- for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
- if (dma_id_regs[i].val !=
- readl(virtbase + dma_id_regs[i].reg)) {
- d40_err(&pdev->dev,
- "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
- dma_id_regs[i].val,
- dma_id_regs[i].reg,
- readl(virtbase + dma_id_regs[i].reg));
- goto failure;
- }
- }
-
- /* Get silicon revision and designer */
- val = readl(virtbase + D40_DREG_PERIPHID2);
+ /* This is just a regular AMBA PrimeCell ID actually */
+ for (pid = 0, i = 0; i < 4; i++)
+ pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
+ & 255) << (i * 8);
+ for (cid = 0, i = 0; i < 4; i++)
+ cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
+ & 255) << (i * 8);
- if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
- D40_HW_DESIGNER) {
+ if (cid != AMBA_CID) {
+ d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
+ goto failure;
+ }
+ if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
- val & D40_DREG_PERIPHID2_DESIGNER_MASK,
- D40_HW_DESIGNER);
+ AMBA_MANF_BITS(pid),
+ AMBA_VENDOR_ST);
goto failure;
}
-
- rev = (val & D40_DREG_PERIPHID2_REV_MASK) >>
- D40_DREG_PERIPHID2_REV_POS;
+ /*
+ * HW revision:
+ * DB8500ed has revision 0
+ * ? has revision 1
+ * DB8500v1 has revision 2
+ * DB8500v2 has revision 3
+ */
+ rev = AMBA_REV_BITS(pid);
/* The number of physical channels on this HW */
num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 195ee65..b44c4551 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -184,9 +184,6 @@
#define D40_DREG_PERIPHID0 0xFE0
#define D40_DREG_PERIPHID1 0xFE4
#define D40_DREG_PERIPHID2 0xFE8
-#define D40_DREG_PERIPHID2_REV_POS 4
-#define D40_DREG_PERIPHID2_REV_MASK (0xf << D40_DREG_PERIPHID2_REV_POS)
-#define D40_DREG_PERIPHID2_DESIGNER_MASK 0xf
#define D40_DREG_PERIPHID3 0xFEC
#define D40_DREG_CELLID0 0xFF0
#define D40_DREG_CELLID1 0xFF4
diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c
index aab9707..86ad2ee 100644
--- a/drivers/edac/edac_stub.c
+++ b/drivers/edac/edac_stub.c
@@ -14,7 +14,7 @@
*/
#include <linux/module.h>
#include <linux/edac.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/edac.h>
int edac_op_state = EDAC_OPSTATE_INVAL;
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 04f1e7c..f6cf448 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
char *type, *optype, *err, *msg;
unsigned long error = m->status & 0x1ff0000l;
u32 optypenum = (m->status >> 4) & 0x07;
- u32 core_err_cnt = (m->status >> 38) && 0x7fff;
+ u32 core_err_cnt = (m->status >> 38) & 0x7fff;
u32 dimm = (m->misc >> 16) & 0x3;
u32 channel = (m->misc >> 18) & 0x3;
u32 syndrome = m->misc >> 32;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 11e1a5d..8af8e86 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -854,11 +854,11 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci)
mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
if (err_detect & DDR_EDE_SBE)
- edac_mc_handle_ce(mci, pfn, err_addr & PAGE_MASK,
+ edac_mc_handle_ce(mci, pfn, err_addr & ~PAGE_MASK,
syndrome, row_index, 0, mci->ctl_name);
if (err_detect & DDR_EDE_MBE)
- edac_mc_handle_ue(mci, pfn, err_addr & PAGE_MASK,
+ edac_mc_handle_ue(mci, pfn, err_addr & ~PAGE_MASK,
row_index, mci->ctl_name);
out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c
index 30da70d..cdae207 100644
--- a/drivers/eisa/pci_eisa.c
+++ b/drivers/eisa/pci_eisa.c
@@ -45,13 +45,13 @@ static int __init pci_eisa_init(struct pci_dev *pdev,
return 0;
}
-static struct pci_device_id __initdata pci_eisa_pci_tbl[] = {
+static struct pci_device_id pci_eisa_pci_tbl[] = {
{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 },
{ 0, }
};
-static struct pci_driver __initdata pci_eisa_driver = {
+static struct pci_driver __refdata pci_eisa_driver = {
.name = "pci_eisa",
.id_table = pci_eisa_pci_tbl,
.probe = pci_eisa_init,
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 29d2423..85661b0 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -32,7 +32,7 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/byteorder.h>
#include "core.h"
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index e6ad3bb..4799393 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -216,15 +216,33 @@ struct inbound_phy_packet_event {
struct fw_cdev_event_phy_packet phy_packet;
};
-static inline void __user *u64_to_uptr(__u64 value)
+#ifdef CONFIG_COMPAT
+static void __user *u64_to_uptr(u64 value)
+{
+ if (is_compat_task())
+ return compat_ptr(value);
+ else
+ return (void __user *)(unsigned long)value;
+}
+
+static u64 uptr_to_u64(void __user *ptr)
+{
+ if (is_compat_task())
+ return ptr_to_compat(ptr);
+ else
+ return (u64)(unsigned long)ptr;
+}
+#else
+static inline void __user *u64_to_uptr(u64 value)
{
return (void __user *)(unsigned long)value;
}
-static inline __u64 uptr_to_u64(void __user *ptr)
+static inline u64 uptr_to_u64(void __user *ptr)
{
- return (__u64)(unsigned long)ptr;
+ return (u64)(unsigned long)ptr;
}
+#endif /* CONFIG_COMPAT */
static int fw_device_op_open(struct inode *inode, struct file *file)
{
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 95a4714..f3b890d 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -38,7 +38,7 @@
#include <linux/string.h>
#include <linux/workqueue.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/byteorder.h>
#include <asm/system.h>
@@ -455,15 +455,20 @@ static struct device_attribute fw_device_attributes[] = {
static int read_rom(struct fw_device *device,
int generation, int index, u32 *data)
{
- int rcode;
+ u64 offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4;
+ int i, rcode;
/* device->node_id, accessed below, must not be older than generation */
smp_rmb();
- rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST,
- device->node_id, generation, device->max_speed,
- (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4,
- data, 4);
+ for (i = 10; i < 100; i += 10) {
+ rcode = fw_run_transaction(device->card,
+ TCODE_READ_QUADLET_REQUEST, device->node_id,
+ generation, device->max_speed, offset, data, 4);
+ if (rcode != RCODE_BUSY)
+ break;
+ msleep(i);
+ }
be32_to_cpus(data);
return rcode;
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 193ed92..94d3b49 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -29,7 +29,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/byteorder.h>
#include <asm/system.h>
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 0fe4e4e..b45be57 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -9,7 +9,7 @@
#include <linux/slab.h>
#include <linux/types.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
struct device;
struct fw_card;
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 0618145..763626b 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -37,7 +37,7 @@
#include <linux/uaccess.h>
#include <linux/wait.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/byteorder.h>
#include "nosy.h"
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index bcf792f..57cd3a4 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2179,8 +2179,13 @@ static int ohci_enable(struct fw_card *card,
ohci_driver_name, ohci)) {
fw_error("Failed to allocate interrupt %d.\n", dev->irq);
pci_disable_msi(dev);
- dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
- ohci->config_rom, ohci->config_rom_bus);
+
+ if (config_rom) {
+ dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+ ohci->next_config_rom,
+ ohci->next_config_rom_bus);
+ ohci->next_config_rom = NULL;
+ }
return -EIO;
}
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 41841a3..17cef86 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1198,6 +1198,10 @@ static int sbp2_remove(struct device *dev)
{
struct fw_unit *unit = fw_unit(dev);
struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
+ struct sbp2_logical_unit *lu;
+
+ list_for_each_entry(lu, &tgt->lu_list, link)
+ cancel_delayed_work_sync(&lu->work);
sbp2_target_put(tgt);
return 0;
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 5f29aaf..eb80b54 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -78,6 +78,7 @@
#include <linux/kobject.h>
#include <linux/device.h>
#include <linux/slab.h>
+#include <linux/pstore.h>
#include <asm/uaccess.h>
@@ -89,6 +90,8 @@ MODULE_DESCRIPTION("sysfs interface to EFI Variables");
MODULE_LICENSE("GPL");
MODULE_VERSION(EFIVARS_VERSION);
+#define DUMP_NAME_LEN 52
+
/*
* The maximum size of VariableName + Data = 1024
* Therefore, it's reasonable to save that much
@@ -119,6 +122,10 @@ struct efivar_attribute {
ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
};
+#define PSTORE_EFI_ATTRIBUTES \
+ (EFI_VARIABLE_NON_VOLATILE | \
+ EFI_VARIABLE_BOOTSERVICE_ACCESS | \
+ EFI_VARIABLE_RUNTIME_ACCESS)
#define EFIVAR_ATTR(_name, _mode, _show, _store) \
struct efivar_attribute efivar_attr_##_name = { \
@@ -141,38 +148,72 @@ efivar_create_sysfs_entry(struct efivars *efivars,
/* Return the number of unicode characters in data */
static unsigned long
-utf8_strlen(efi_char16_t *data, unsigned long maxlength)
+utf16_strnlen(efi_char16_t *s, size_t maxlength)
{
unsigned long length = 0;
- while (*data++ != 0 && length < maxlength)
+ while (*s++ != 0 && length < maxlength)
length++;
return length;
}
+static inline unsigned long
+utf16_strlen(efi_char16_t *s)
+{
+ return utf16_strnlen(s, ~0UL);
+}
+
/*
* Return the number of bytes is the length of this string
* Note: this is NOT the same as the number of unicode characters
*/
static inline unsigned long
-utf8_strsize(efi_char16_t *data, unsigned long maxlength)
+utf16_strsize(efi_char16_t *data, unsigned long maxlength)
{
- return utf8_strlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t);
+ return utf16_strnlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t);
+}
+
+static inline int
+utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len)
+{
+ while (1) {
+ if (len == 0)
+ return 0;
+ if (*a < *b)
+ return -1;
+ if (*a > *b)
+ return 1;
+ if (*a == 0) /* implies *b == 0 */
+ return 0;
+ a++;
+ b++;
+ len--;
+ }
}
static efi_status_t
-get_var_data(struct efivars *efivars, struct efi_variable *var)
+get_var_data_locked(struct efivars *efivars, struct efi_variable *var)
{
efi_status_t status;
- spin_lock(&efivars->lock);
var->DataSize = 1024;
status = efivars->ops->get_variable(var->VariableName,
&var->VendorGuid,
&var->Attributes,
&var->DataSize,
var->Data);
+ return status;
+}
+
+static efi_status_t
+get_var_data(struct efivars *efivars, struct efi_variable *var)
+{
+ efi_status_t status;
+
+ spin_lock(&efivars->lock);
+ status = get_var_data_locked(efivars, var);
spin_unlock(&efivars->lock);
+
if (status != EFI_SUCCESS) {
printk(KERN_WARNING "efivars: get_variable() failed 0x%lx!\n",
status);
@@ -387,12 +428,180 @@ static struct kobj_type efivar_ktype = {
.default_attrs = def_attrs,
};
+static struct pstore_info efi_pstore_info;
+
static inline void
efivar_unregister(struct efivar_entry *var)
{
kobject_put(&var->kobj);
}
+#ifdef CONFIG_PSTORE
+
+static int efi_pstore_open(struct pstore_info *psi)
+{
+ struct efivars *efivars = psi->data;
+
+ spin_lock(&efivars->lock);
+ efivars->walk_entry = list_first_entry(&efivars->list,
+ struct efivar_entry, list);
+ return 0;
+}
+
+static int efi_pstore_close(struct pstore_info *psi)
+{
+ struct efivars *efivars = psi->data;
+
+ spin_unlock(&efivars->lock);
+ return 0;
+}
+
+static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
+ struct timespec *timespec, struct pstore_info *psi)
+{
+ efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
+ struct efivars *efivars = psi->data;
+ char name[DUMP_NAME_LEN];
+ int i;
+ unsigned int part, size;
+ unsigned long time;
+
+ while (&efivars->walk_entry->list != &efivars->list) {
+ if (!efi_guidcmp(efivars->walk_entry->var.VendorGuid,
+ vendor)) {
+ for (i = 0; i < DUMP_NAME_LEN; i++) {
+ name[i] = efivars->walk_entry->var.VariableName[i];
+ }
+ if (sscanf(name, "dump-type%u-%u-%lu", type, &part, &time) == 3) {
+ *id = part;
+ timespec->tv_sec = time;
+ timespec->tv_nsec = 0;
+ get_var_data_locked(efivars, &efivars->walk_entry->var);
+ size = efivars->walk_entry->var.DataSize;
+ memcpy(psi->buf, efivars->walk_entry->var.Data, size);
+ efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
+ struct efivar_entry, list);
+ return size;
+ }
+ }
+ efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
+ struct efivar_entry, list);
+ }
+ return 0;
+}
+
+static u64 efi_pstore_write(enum pstore_type_id type, unsigned int part,
+ size_t size, struct pstore_info *psi)
+{
+ char name[DUMP_NAME_LEN];
+ char stub_name[DUMP_NAME_LEN];
+ efi_char16_t efi_name[DUMP_NAME_LEN];
+ efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
+ struct efivars *efivars = psi->data;
+ struct efivar_entry *entry, *found = NULL;
+ int i;
+
+ sprintf(stub_name, "dump-type%u-%u-", type, part);
+ sprintf(name, "%s%lu", stub_name, get_seconds());
+
+ spin_lock(&efivars->lock);
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
+ efi_name[i] = stub_name[i];
+
+ /*
+ * Clean up any entries with the same name
+ */
+
+ list_for_each_entry(entry, &efivars->list, list) {
+ get_var_data_locked(efivars, &entry->var);
+
+ if (efi_guidcmp(entry->var.VendorGuid, vendor))
+ continue;
+ if (utf16_strncmp(entry->var.VariableName, efi_name,
+ utf16_strlen(efi_name)))
+ continue;
+ /* Needs to be a prefix */
+ if (entry->var.VariableName[utf16_strlen(efi_name)] == 0)
+ continue;
+
+ /* found */
+ found = entry;
+ efivars->ops->set_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ PSTORE_EFI_ATTRIBUTES,
+ 0, NULL);
+ }
+
+ if (found)
+ list_del(&found->list);
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
+ efi_name[i] = name[i];
+
+ efivars->ops->set_variable(efi_name, &vendor, PSTORE_EFI_ATTRIBUTES,
+ size, psi->buf);
+
+ spin_unlock(&efivars->lock);
+
+ if (found)
+ efivar_unregister(found);
+
+ if (size)
+ efivar_create_sysfs_entry(efivars,
+ utf16_strsize(efi_name,
+ DUMP_NAME_LEN * 2),
+ efi_name, &vendor);
+
+ return part;
+};
+
+static int efi_pstore_erase(enum pstore_type_id type, u64 id,
+ struct pstore_info *psi)
+{
+ efi_pstore_write(type, id, 0, psi);
+
+ return 0;
+}
+#else
+static int efi_pstore_open(struct pstore_info *psi)
+{
+ return 0;
+}
+
+static int efi_pstore_close(struct pstore_info *psi)
+{
+ return 0;
+}
+
+static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
+ struct timespec *time, struct pstore_info *psi)
+{
+ return -1;
+}
+
+static u64 efi_pstore_write(enum pstore_type_id type, unsigned int part,
+ size_t size, struct pstore_info *psi)
+{
+ return 0;
+}
+
+static int efi_pstore_erase(enum pstore_type_id type, u64 id,
+ struct pstore_info *psi)
+{
+ return 0;
+}
+#endif
+
+static struct pstore_info efi_pstore_info = {
+ .owner = THIS_MODULE,
+ .name = "efi",
+ .open = efi_pstore_open,
+ .close = efi_pstore_close,
+ .read = efi_pstore_read,
+ .write = efi_pstore_write,
+ .erase = efi_pstore_erase,
+};
static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
@@ -414,8 +623,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
* Does this variable already exist?
*/
list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
- strsize1 = utf8_strsize(search_efivar->var.VariableName, 1024);
- strsize2 = utf8_strsize(new_var->VariableName, 1024);
+ strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
+ strsize2 = utf16_strsize(new_var->VariableName, 1024);
if (strsize1 == strsize2 &&
!memcmp(&(search_efivar->var.VariableName),
new_var->VariableName, strsize1) &&
@@ -447,8 +656,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
/* Create the entry in sysfs. Locking is not required here */
status = efivar_create_sysfs_entry(efivars,
- utf8_strsize(new_var->VariableName,
- 1024),
+ utf16_strsize(new_var->VariableName,
+ 1024),
new_var->VariableName,
&new_var->VendorGuid);
if (status) {
@@ -477,8 +686,8 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
* Does this variable already exist?
*/
list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
- strsize1 = utf8_strsize(search_efivar->var.VariableName, 1024);
- strsize2 = utf8_strsize(del_var->VariableName, 1024);
+ strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
+ strsize2 = utf16_strsize(del_var->VariableName, 1024);
if (strsize1 == strsize2 &&
!memcmp(&(search_efivar->var.VariableName),
del_var->VariableName, strsize1) &&
@@ -763,6 +972,16 @@ int register_efivars(struct efivars *efivars,
if (error)
unregister_efivars(efivars);
+ efivars->efi_pstore_info = efi_pstore_info;
+
+ efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
+ if (efivars->efi_pstore_info.buf) {
+ efivars->efi_pstore_info.bufsize = 1024;
+ efivars->efi_pstore_info.data = efivars;
+ mutex_init(&efivars->efi_pstore_info.buf_mutex);
+ pstore_register(&efivars->efi_pstore_info);
+ }
+
out:
kfree(variable_name);
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 68810fd..aa83de9 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -420,7 +420,7 @@ static efi_status_t gsmi_get_next_variable(unsigned long *name_size,
static efi_status_t gsmi_set_variable(efi_char16_t *name,
efi_guid_t *vendor,
- unsigned long attr,
+ u32 attr,
unsigned long data_size,
void *data)
{
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 3634986..d539efd 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -103,6 +103,22 @@ config GPIO_MPC5200
def_bool y
depends on PPC_MPC52xx
+config GPIO_MSM_V1
+ tristate "Qualcomm MSM GPIO v1"
+ depends on GPIOLIB && ARCH_MSM
+ help
+ Say yes here to support the GPIO interface on ARM v6 based
+ Qualcomm MSM chips. Most of the pins on the MSM can be
+ selected for GPIO, and are controlled by this driver.
+
+config GPIO_MSM_V2
+ tristate "Qualcomm MSM GPIO v2"
+ depends on GPIOLIB && ARCH_MSM
+ help
+ Say yes here to support the GPIO interface on ARM v7 based
+ Qualcomm MSM chips. Most of the pins on the MSM can be
+ selected for GPIO, and are controlled by this driver.
+
config GPIO_MXC
def_bool y
depends on ARCH_MXC
@@ -280,6 +296,12 @@ config GPIO_TC3589X
This enables support for the GPIOs found on the TC3589X
I/O Expander.
+config GPIO_TPS65912
+ tristate "TI TPS65912 GPIO"
+ depends on (MFD_TPS65912_I2C || MFD_TPS65912_SPI)
+ help
+ This driver supports TPS65912 gpio chip
+
config GPIO_TWL4030
tristate "TWL4030, TWL5030, and TPS659x0 GPIOs"
depends on TWL4030_CORE
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 7207112..9588948 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -27,6 +27,8 @@ obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o
obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
+obj-$(CONFIG_GPIO_MSM_V1) += gpio-msm-v1.o
+obj-$(CONFIG_GPIO_MSM_V2) += gpio-msm-v2.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
obj-$(CONFIG_PLAT_NOMADIK) += gpio-nomadik.o
@@ -48,6 +50,7 @@ obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
+obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
obj-$(CONFIG_MACH_U300) += gpio-u300.o
obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
diff --git a/drivers/gpio/gpio-ab8500.c b/drivers/gpio/gpio-ab8500.c
index ed795e6..050c05d 100644
--- a/drivers/gpio/gpio-ab8500.c
+++ b/drivers/gpio/gpio-ab8500.c
@@ -516,5 +516,5 @@ module_exit(ab8500_gpio_exit);
MODULE_AUTHOR("BIBEK BASU <bibek.basu@stericsson.com>");
MODULE_DESCRIPTION("Driver allows to use AB8500 unused pins to be used as GPIO");
-MODULE_ALIAS("AB8500 GPIO driver");
+MODULE_ALIAS("platform:ab8500-gpio");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index 231714d..4e24436 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -351,7 +351,7 @@ static int bgpio_setup_direction(struct bgpio_chip *bgc,
return 0;
}
-int __devexit bgpio_remove(struct bgpio_chip *bgc)
+int bgpio_remove(struct bgpio_chip *bgc)
{
int err = gpiochip_remove(&bgc->gc);
@@ -361,15 +361,10 @@ int __devexit bgpio_remove(struct bgpio_chip *bgc)
}
EXPORT_SYMBOL_GPL(bgpio_remove);
-int __devinit bgpio_init(struct bgpio_chip *bgc,
- struct device *dev,
- unsigned long sz,
- void __iomem *dat,
- void __iomem *set,
- void __iomem *clr,
- void __iomem *dirout,
- void __iomem *dirin,
- bool big_endian)
+int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
+ unsigned long sz, void __iomem *dat, void __iomem *set,
+ void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
+ bool big_endian)
{
int ret;
diff --git a/drivers/gpio/gpio-msm-v1.c b/drivers/gpio/gpio-msm-v1.c
new file mode 100644
index 0000000..52a4d42
--- /dev/null
+++ b/drivers/gpio/gpio-msm-v1.c
@@ -0,0 +1,636 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <mach/cpu.h>
+#include <mach/msm_gpiomux.h>
+#include <mach/msm_iomap.h>
+
+/* see 80-VA736-2 Rev C pp 695-751
+**
+** These are actually the *shadow* gpio registers, since the
+** real ones (which allow full access) are only available to the
+** ARM9 side of the world.
+**
+** Since the _BASE need to be page-aligned when we're mapping them
+** to virtual addresses, adjust for the additional offset in these
+** macros.
+*/
+
+#define MSM_GPIO1_REG(off) (MSM_GPIO1_BASE + (off))
+#define MSM_GPIO2_REG(off) (MSM_GPIO2_BASE + 0x400 + (off))
+#define MSM_GPIO1_SHADOW_REG(off) (MSM_GPIO1_BASE + 0x800 + (off))
+#define MSM_GPIO2_SHADOW_REG(off) (MSM_GPIO2_BASE + 0xC00 + (off))
+
+/*
+ * MSM7X00 registers
+ */
+/* output value */
+#define MSM7X00_GPIO_OUT_0 MSM_GPIO1_SHADOW_REG(0x00) /* gpio 15-0 */
+#define MSM7X00_GPIO_OUT_1 MSM_GPIO2_SHADOW_REG(0x00) /* gpio 42-16 */
+#define MSM7X00_GPIO_OUT_2 MSM_GPIO1_SHADOW_REG(0x04) /* gpio 67-43 */
+#define MSM7X00_GPIO_OUT_3 MSM_GPIO1_SHADOW_REG(0x08) /* gpio 94-68 */
+#define MSM7X00_GPIO_OUT_4 MSM_GPIO1_SHADOW_REG(0x0C) /* gpio 106-95 */
+#define MSM7X00_GPIO_OUT_5 MSM_GPIO1_SHADOW_REG(0x50) /* gpio 107-121 */
+
+/* same pin map as above, output enable */
+#define MSM7X00_GPIO_OE_0 MSM_GPIO1_SHADOW_REG(0x10)
+#define MSM7X00_GPIO_OE_1 MSM_GPIO2_SHADOW_REG(0x08)
+#define MSM7X00_GPIO_OE_2 MSM_GPIO1_SHADOW_REG(0x14)
+#define MSM7X00_GPIO_OE_3 MSM_GPIO1_SHADOW_REG(0x18)
+#define MSM7X00_GPIO_OE_4 MSM_GPIO1_SHADOW_REG(0x1C)
+#define MSM7X00_GPIO_OE_5 MSM_GPIO1_SHADOW_REG(0x54)
+
+/* same pin map as above, input read */
+#define MSM7X00_GPIO_IN_0 MSM_GPIO1_SHADOW_REG(0x34)
+#define MSM7X00_GPIO_IN_1 MSM_GPIO2_SHADOW_REG(0x20)
+#define MSM7X00_GPIO_IN_2 MSM_GPIO1_SHADOW_REG(0x38)
+#define MSM7X00_GPIO_IN_3 MSM_GPIO1_SHADOW_REG(0x3C)
+#define MSM7X00_GPIO_IN_4 MSM_GPIO1_SHADOW_REG(0x40)
+#define MSM7X00_GPIO_IN_5 MSM_GPIO1_SHADOW_REG(0x44)
+
+/* same pin map as above, 1=edge 0=level interrup */
+#define MSM7X00_GPIO_INT_EDGE_0 MSM_GPIO1_SHADOW_REG(0x60)
+#define MSM7X00_GPIO_INT_EDGE_1 MSM_GPIO2_SHADOW_REG(0x50)
+#define MSM7X00_GPIO_INT_EDGE_2 MSM_GPIO1_SHADOW_REG(0x64)
+#define MSM7X00_GPIO_INT_EDGE_3 MSM_GPIO1_SHADOW_REG(0x68)
+#define MSM7X00_GPIO_INT_EDGE_4 MSM_GPIO1_SHADOW_REG(0x6C)
+#define MSM7X00_GPIO_INT_EDGE_5 MSM_GPIO1_SHADOW_REG(0xC0)
+
+/* same pin map as above, 1=positive 0=negative */
+#define MSM7X00_GPIO_INT_POS_0 MSM_GPIO1_SHADOW_REG(0x70)
+#define MSM7X00_GPIO_INT_POS_1 MSM_GPIO2_SHADOW_REG(0x58)
+#define MSM7X00_GPIO_INT_POS_2 MSM_GPIO1_SHADOW_REG(0x74)
+#define MSM7X00_GPIO_INT_POS_3 MSM_GPIO1_SHADOW_REG(0x78)
+#define MSM7X00_GPIO_INT_POS_4 MSM_GPIO1_SHADOW_REG(0x7C)
+#define MSM7X00_GPIO_INT_POS_5 MSM_GPIO1_SHADOW_REG(0xBC)
+
+/* same pin map as above, interrupt enable */
+#define MSM7X00_GPIO_INT_EN_0 MSM_GPIO1_SHADOW_REG(0x80)
+#define MSM7X00_GPIO_INT_EN_1 MSM_GPIO2_SHADOW_REG(0x60)
+#define MSM7X00_GPIO_INT_EN_2 MSM_GPIO1_SHADOW_REG(0x84)
+#define MSM7X00_GPIO_INT_EN_3 MSM_GPIO1_SHADOW_REG(0x88)
+#define MSM7X00_GPIO_INT_EN_4 MSM_GPIO1_SHADOW_REG(0x8C)
+#define MSM7X00_GPIO_INT_EN_5 MSM_GPIO1_SHADOW_REG(0xB8)
+
+/* same pin map as above, write 1 to clear interrupt */
+#define MSM7X00_GPIO_INT_CLEAR_0 MSM_GPIO1_SHADOW_REG(0x90)
+#define MSM7X00_GPIO_INT_CLEAR_1 MSM_GPIO2_SHADOW_REG(0x68)
+#define MSM7X00_GPIO_INT_CLEAR_2 MSM_GPIO1_SHADOW_REG(0x94)
+#define MSM7X00_GPIO_INT_CLEAR_3 MSM_GPIO1_SHADOW_REG(0x98)
+#define MSM7X00_GPIO_INT_CLEAR_4 MSM_GPIO1_SHADOW_REG(0x9C)
+#define MSM7X00_GPIO_INT_CLEAR_5 MSM_GPIO1_SHADOW_REG(0xB4)
+
+/* same pin map as above, 1=interrupt pending */
+#define MSM7X00_GPIO_INT_STATUS_0 MSM_GPIO1_SHADOW_REG(0xA0)
+#define MSM7X00_GPIO_INT_STATUS_1 MSM_GPIO2_SHADOW_REG(0x70)
+#define MSM7X00_GPIO_INT_STATUS_2 MSM_GPIO1_SHADOW_REG(0xA4)
+#define MSM7X00_GPIO_INT_STATUS_3 MSM_GPIO1_SHADOW_REG(0xA8)
+#define MSM7X00_GPIO_INT_STATUS_4 MSM_GPIO1_SHADOW_REG(0xAC)
+#define MSM7X00_GPIO_INT_STATUS_5 MSM_GPIO1_SHADOW_REG(0xB0)
+
+/*
+ * QSD8X50 registers
+ */
+/* output value */
+#define QSD8X50_GPIO_OUT_0 MSM_GPIO1_SHADOW_REG(0x00) /* gpio 15-0 */
+#define QSD8X50_GPIO_OUT_1 MSM_GPIO2_SHADOW_REG(0x00) /* gpio 42-16 */
+#define QSD8X50_GPIO_OUT_2 MSM_GPIO1_SHADOW_REG(0x04) /* gpio 67-43 */
+#define QSD8X50_GPIO_OUT_3 MSM_GPIO1_SHADOW_REG(0x08) /* gpio 94-68 */
+#define QSD8X50_GPIO_OUT_4 MSM_GPIO1_SHADOW_REG(0x0C) /* gpio 103-95 */
+#define QSD8X50_GPIO_OUT_5 MSM_GPIO1_SHADOW_REG(0x10) /* gpio 121-104 */
+#define QSD8X50_GPIO_OUT_6 MSM_GPIO1_SHADOW_REG(0x14) /* gpio 152-122 */
+#define QSD8X50_GPIO_OUT_7 MSM_GPIO1_SHADOW_REG(0x18) /* gpio 164-153 */
+
+/* same pin map as above, output enable */
+#define QSD8X50_GPIO_OE_0 MSM_GPIO1_SHADOW_REG(0x20)
+#define QSD8X50_GPIO_OE_1 MSM_GPIO2_SHADOW_REG(0x08)
+#define QSD8X50_GPIO_OE_2 MSM_GPIO1_SHADOW_REG(0x24)
+#define QSD8X50_GPIO_OE_3 MSM_GPIO1_SHADOW_REG(0x28)
+#define QSD8X50_GPIO_OE_4 MSM_GPIO1_SHADOW_REG(0x2C)
+#define QSD8X50_GPIO_OE_5 MSM_GPIO1_SHADOW_REG(0x30)
+#define QSD8X50_GPIO_OE_6 MSM_GPIO1_SHADOW_REG(0x34)
+#define QSD8X50_GPIO_OE_7 MSM_GPIO1_SHADOW_REG(0x38)
+
+/* same pin map as above, input read */
+#define QSD8X50_GPIO_IN_0 MSM_GPIO1_SHADOW_REG(0x50)
+#define QSD8X50_GPIO_IN_1 MSM_GPIO2_SHADOW_REG(0x20)
+#define QSD8X50_GPIO_IN_2 MSM_GPIO1_SHADOW_REG(0x54)
+#define QSD8X50_GPIO_IN_3 MSM_GPIO1_SHADOW_REG(0x58)
+#define QSD8X50_GPIO_IN_4 MSM_GPIO1_SHADOW_REG(0x5C)
+#define QSD8X50_GPIO_IN_5 MSM_GPIO1_SHADOW_REG(0x60)
+#define QSD8X50_GPIO_IN_6 MSM_GPIO1_SHADOW_REG(0x64)
+#define QSD8X50_GPIO_IN_7 MSM_GPIO1_SHADOW_REG(0x68)
+
+/* same pin map as above, 1=edge 0=level interrup */
+#define QSD8X50_GPIO_INT_EDGE_0 MSM_GPIO1_SHADOW_REG(0x70)
+#define QSD8X50_GPIO_INT_EDGE_1 MSM_GPIO2_SHADOW_REG(0x50)
+#define QSD8X50_GPIO_INT_EDGE_2 MSM_GPIO1_SHADOW_REG(0x74)
+#define QSD8X50_GPIO_INT_EDGE_3 MSM_GPIO1_SHADOW_REG(0x78)
+#define QSD8X50_GPIO_INT_EDGE_4 MSM_GPIO1_SHADOW_REG(0x7C)
+#define QSD8X50_GPIO_INT_EDGE_5 MSM_GPIO1_SHADOW_REG(0x80)
+#define QSD8X50_GPIO_INT_EDGE_6 MSM_GPIO1_SHADOW_REG(0x84)
+#define QSD8X50_GPIO_INT_EDGE_7 MSM_GPIO1_SHADOW_REG(0x88)
+
+/* same pin map as above, 1=positive 0=negative */
+#define QSD8X50_GPIO_INT_POS_0 MSM_GPIO1_SHADOW_REG(0x90)
+#define QSD8X50_GPIO_INT_POS_1 MSM_GPIO2_SHADOW_REG(0x58)
+#define QSD8X50_GPIO_INT_POS_2 MSM_GPIO1_SHADOW_REG(0x94)
+#define QSD8X50_GPIO_INT_POS_3 MSM_GPIO1_SHADOW_REG(0x98)
+#define QSD8X50_GPIO_INT_POS_4 MSM_GPIO1_SHADOW_REG(0x9C)
+#define QSD8X50_GPIO_INT_POS_5 MSM_GPIO1_SHADOW_REG(0xA0)
+#define QSD8X50_GPIO_INT_POS_6 MSM_GPIO1_SHADOW_REG(0xA4)
+#define QSD8X50_GPIO_INT_POS_7 MSM_GPIO1_SHADOW_REG(0xA8)
+
+/* same pin map as above, interrupt enable */
+#define QSD8X50_GPIO_INT_EN_0 MSM_GPIO1_SHADOW_REG(0xB0)
+#define QSD8X50_GPIO_INT_EN_1 MSM_GPIO2_SHADOW_REG(0x60)
+#define QSD8X50_GPIO_INT_EN_2 MSM_GPIO1_SHADOW_REG(0xB4)
+#define QSD8X50_GPIO_INT_EN_3 MSM_GPIO1_SHADOW_REG(0xB8)
+#define QSD8X50_GPIO_INT_EN_4 MSM_GPIO1_SHADOW_REG(0xBC)
+#define QSD8X50_GPIO_INT_EN_5 MSM_GPIO1_SHADOW_REG(0xC0)
+#define QSD8X50_GPIO_INT_EN_6 MSM_GPIO1_SHADOW_REG(0xC4)
+#define QSD8X50_GPIO_INT_EN_7 MSM_GPIO1_SHADOW_REG(0xC8)
+
+/* same pin map as above, write 1 to clear interrupt */
+#define QSD8X50_GPIO_INT_CLEAR_0 MSM_GPIO1_SHADOW_REG(0xD0)
+#define QSD8X50_GPIO_INT_CLEAR_1 MSM_GPIO2_SHADOW_REG(0x68)
+#define QSD8X50_GPIO_INT_CLEAR_2 MSM_GPIO1_SHADOW_REG(0xD4)
+#define QSD8X50_GPIO_INT_CLEAR_3 MSM_GPIO1_SHADOW_REG(0xD8)
+#define QSD8X50_GPIO_INT_CLEAR_4 MSM_GPIO1_SHADOW_REG(0xDC)
+#define QSD8X50_GPIO_INT_CLEAR_5 MSM_GPIO1_SHADOW_REG(0xE0)
+#define QSD8X50_GPIO_INT_CLEAR_6 MSM_GPIO1_SHADOW_REG(0xE4)
+#define QSD8X50_GPIO_INT_CLEAR_7 MSM_GPIO1_SHADOW_REG(0xE8)
+
+/* same pin map as above, 1=interrupt pending */
+#define QSD8X50_GPIO_INT_STATUS_0 MSM_GPIO1_SHADOW_REG(0xF0)
+#define QSD8X50_GPIO_INT_STATUS_1 MSM_GPIO2_SHADOW_REG(0x70)
+#define QSD8X50_GPIO_INT_STATUS_2 MSM_GPIO1_SHADOW_REG(0xF4)
+#define QSD8X50_GPIO_INT_STATUS_3 MSM_GPIO1_SHADOW_REG(0xF8)
+#define QSD8X50_GPIO_INT_STATUS_4 MSM_GPIO1_SHADOW_REG(0xFC)
+#define QSD8X50_GPIO_INT_STATUS_5 MSM_GPIO1_SHADOW_REG(0x100)
+#define QSD8X50_GPIO_INT_STATUS_6 MSM_GPIO1_SHADOW_REG(0x104)
+#define QSD8X50_GPIO_INT_STATUS_7 MSM_GPIO1_SHADOW_REG(0x108)
+
+/*
+ * MSM7X30 registers
+ */
+/* output value */
+#define MSM7X30_GPIO_OUT_0 MSM_GPIO1_REG(0x00) /* gpio 15-0 */
+#define MSM7X30_GPIO_OUT_1 MSM_GPIO2_REG(0x00) /* gpio 43-16 */
+#define MSM7X30_GPIO_OUT_2 MSM_GPIO1_REG(0x04) /* gpio 67-44 */
+#define MSM7X30_GPIO_OUT_3 MSM_GPIO1_REG(0x08) /* gpio 94-68 */
+#define MSM7X30_GPIO_OUT_4 MSM_GPIO1_REG(0x0C) /* gpio 106-95 */
+#define MSM7X30_GPIO_OUT_5 MSM_GPIO1_REG(0x50) /* gpio 133-107 */
+#define MSM7X30_GPIO_OUT_6 MSM_GPIO1_REG(0xC4) /* gpio 150-134 */
+#define MSM7X30_GPIO_OUT_7 MSM_GPIO1_REG(0x214) /* gpio 181-151 */
+
+/* same pin map as above, output enable */
+#define MSM7X30_GPIO_OE_0 MSM_GPIO1_REG(0x10)
+#define MSM7X30_GPIO_OE_1 MSM_GPIO2_REG(0x08)
+#define MSM7X30_GPIO_OE_2 MSM_GPIO1_REG(0x14)
+#define MSM7X30_GPIO_OE_3 MSM_GPIO1_REG(0x18)
+#define MSM7X30_GPIO_OE_4 MSM_GPIO1_REG(0x1C)
+#define MSM7X30_GPIO_OE_5 MSM_GPIO1_REG(0x54)
+#define MSM7X30_GPIO_OE_6 MSM_GPIO1_REG(0xC8)
+#define MSM7X30_GPIO_OE_7 MSM_GPIO1_REG(0x218)
+
+/* same pin map as above, input read */
+#define MSM7X30_GPIO_IN_0 MSM_GPIO1_REG(0x34)
+#define MSM7X30_GPIO_IN_1 MSM_GPIO2_REG(0x20)
+#define MSM7X30_GPIO_IN_2 MSM_GPIO1_REG(0x38)
+#define MSM7X30_GPIO_IN_3 MSM_GPIO1_REG(0x3C)
+#define MSM7X30_GPIO_IN_4 MSM_GPIO1_REG(0x40)
+#define MSM7X30_GPIO_IN_5 MSM_GPIO1_REG(0x44)
+#define MSM7X30_GPIO_IN_6 MSM_GPIO1_REG(0xCC)
+#define MSM7X30_GPIO_IN_7 MSM_GPIO1_REG(0x21C)
+
+/* same pin map as above, 1=edge 0=level interrup */
+#define MSM7X30_GPIO_INT_EDGE_0 MSM_GPIO1_REG(0x60)
+#define MSM7X30_GPIO_INT_EDGE_1 MSM_GPIO2_REG(0x50)
+#define MSM7X30_GPIO_INT_EDGE_2 MSM_GPIO1_REG(0x64)
+#define MSM7X30_GPIO_INT_EDGE_3 MSM_GPIO1_REG(0x68)
+#define MSM7X30_GPIO_INT_EDGE_4 MSM_GPIO1_REG(0x6C)
+#define MSM7X30_GPIO_INT_EDGE_5 MSM_GPIO1_REG(0xC0)
+#define MSM7X30_GPIO_INT_EDGE_6 MSM_GPIO1_REG(0xD0)
+#define MSM7X30_GPIO_INT_EDGE_7 MSM_GPIO1_REG(0x240)
+
+/* same pin map as above, 1=positive 0=negative */
+#define MSM7X30_GPIO_INT_POS_0 MSM_GPIO1_REG(0x70)
+#define MSM7X30_GPIO_INT_POS_1 MSM_GPIO2_REG(0x58)
+#define MSM7X30_GPIO_INT_POS_2 MSM_GPIO1_REG(0x74)
+#define MSM7X30_GPIO_INT_POS_3 MSM_GPIO1_REG(0x78)
+#define MSM7X30_GPIO_INT_POS_4 MSM_GPIO1_REG(0x7C)
+#define MSM7X30_GPIO_INT_POS_5 MSM_GPIO1_REG(0xBC)
+#define MSM7X30_GPIO_INT_POS_6 MSM_GPIO1_REG(0xD4)
+#define MSM7X30_GPIO_INT_POS_7 MSM_GPIO1_REG(0x228)
+
+/* same pin map as above, interrupt enable */
+#define MSM7X30_GPIO_INT_EN_0 MSM_GPIO1_REG(0x80)
+#define MSM7X30_GPIO_INT_EN_1 MSM_GPIO2_REG(0x60)
+#define MSM7X30_GPIO_INT_EN_2 MSM_GPIO1_REG(0x84)
+#define MSM7X30_GPIO_INT_EN_3 MSM_GPIO1_REG(0x88)
+#define MSM7X30_GPIO_INT_EN_4 MSM_GPIO1_REG(0x8C)
+#define MSM7X30_GPIO_INT_EN_5 MSM_GPIO1_REG(0xB8)
+#define MSM7X30_GPIO_INT_EN_6 MSM_GPIO1_REG(0xD8)
+#define MSM7X30_GPIO_INT_EN_7 MSM_GPIO1_REG(0x22C)
+
+/* same pin map as above, write 1 to clear interrupt */
+#define MSM7X30_GPIO_INT_CLEAR_0 MSM_GPIO1_REG(0x90)
+#define MSM7X30_GPIO_INT_CLEAR_1 MSM_GPIO2_REG(0x68)
+#define MSM7X30_GPIO_INT_CLEAR_2 MSM_GPIO1_REG(0x94)
+#define MSM7X30_GPIO_INT_CLEAR_3 MSM_GPIO1_REG(0x98)
+#define MSM7X30_GPIO_INT_CLEAR_4 MSM_GPIO1_REG(0x9C)
+#define MSM7X30_GPIO_INT_CLEAR_5 MSM_GPIO1_REG(0xB4)
+#define MSM7X30_GPIO_INT_CLEAR_6 MSM_GPIO1_REG(0xDC)
+#define MSM7X30_GPIO_INT_CLEAR_7 MSM_GPIO1_REG(0x230)
+
+/* same pin map as above, 1=interrupt pending */
+#define MSM7X30_GPIO_INT_STATUS_0 MSM_GPIO1_REG(0xA0)
+#define MSM7X30_GPIO_INT_STATUS_1 MSM_GPIO2_REG(0x70)
+#define MSM7X30_GPIO_INT_STATUS_2 MSM_GPIO1_REG(0xA4)
+#define MSM7X30_GPIO_INT_STATUS_3 MSM_GPIO1_REG(0xA8)
+#define MSM7X30_GPIO_INT_STATUS_4 MSM_GPIO1_REG(0xAC)
+#define MSM7X30_GPIO_INT_STATUS_5 MSM_GPIO1_REG(0xB0)
+#define MSM7X30_GPIO_INT_STATUS_6 MSM_GPIO1_REG(0xE0)
+#define MSM7X30_GPIO_INT_STATUS_7 MSM_GPIO1_REG(0x234)
+
+#define FIRST_GPIO_IRQ MSM_GPIO_TO_INT(0)
+
+#define MSM_GPIO_BANK(soc, bank, first, last) \
+ { \
+ .regs = { \
+ .out = soc##_GPIO_OUT_##bank, \
+ .in = soc##_GPIO_IN_##bank, \
+ .int_status = soc##_GPIO_INT_STATUS_##bank, \
+ .int_clear = soc##_GPIO_INT_CLEAR_##bank, \
+ .int_en = soc##_GPIO_INT_EN_##bank, \
+ .int_edge = soc##_GPIO_INT_EDGE_##bank, \
+ .int_pos = soc##_GPIO_INT_POS_##bank, \
+ .oe = soc##_GPIO_OE_##bank, \
+ }, \
+ .chip = { \
+ .base = (first), \
+ .ngpio = (last) - (first) + 1, \
+ .get = msm_gpio_get, \
+ .set = msm_gpio_set, \
+ .direction_input = msm_gpio_direction_input, \
+ .direction_output = msm_gpio_direction_output, \
+ .to_irq = msm_gpio_to_irq, \
+ .request = msm_gpio_request, \
+ .free = msm_gpio_free, \
+ } \
+ }
+
+#define MSM_GPIO_BROKEN_INT_CLEAR 1
+
+struct msm_gpio_regs {
+ void __iomem *out;
+ void __iomem *in;
+ void __iomem *int_status;
+ void __iomem *int_clear;
+ void __iomem *int_en;
+ void __iomem *int_edge;
+ void __iomem *int_pos;
+ void __iomem *oe;
+};
+
+struct msm_gpio_chip {
+ spinlock_t lock;
+ struct gpio_chip chip;
+ struct msm_gpio_regs regs;
+#if MSM_GPIO_BROKEN_INT_CLEAR
+ unsigned int_status_copy;
+#endif
+ unsigned int both_edge_detect;
+ unsigned int int_enable[2]; /* 0: awake, 1: sleep */
+};
+
+static int msm_gpio_write(struct msm_gpio_chip *msm_chip,
+ unsigned offset, unsigned on)
+{
+ unsigned mask = BIT(offset);
+ unsigned val;
+
+ val = readl(msm_chip->regs.out);
+ if (on)
+ writel(val | mask, msm_chip->regs.out);
+ else
+ writel(val & ~mask, msm_chip->regs.out);
+ return 0;
+}
+
+static void msm_gpio_update_both_edge_detect(struct msm_gpio_chip *msm_chip)
+{
+ int loop_limit = 100;
+ unsigned pol, val, val2, intstat;
+ do {
+ val = readl(msm_chip->regs.in);
+ pol = readl(msm_chip->regs.int_pos);
+ pol = (pol & ~msm_chip->both_edge_detect) |
+ (~val & msm_chip->both_edge_detect);
+ writel(pol, msm_chip->regs.int_pos);
+ intstat = readl(msm_chip->regs.int_status);
+ val2 = readl(msm_chip->regs.in);
+ if (((val ^ val2) & msm_chip->both_edge_detect & ~intstat) == 0)
+ return;
+ } while (loop_limit-- > 0);
+ printk(KERN_ERR "msm_gpio_update_both_edge_detect, "
+ "failed to reach stable state %x != %x\n", val, val2);
+}
+
+static int msm_gpio_clear_detect_status(struct msm_gpio_chip *msm_chip,
+ unsigned offset)
+{
+ unsigned bit = BIT(offset);
+
+#if MSM_GPIO_BROKEN_INT_CLEAR
+ /* Save interrupts that already triggered before we loose them. */
+ /* Any interrupt that triggers between the read of int_status */
+ /* and the write to int_clear will still be lost though. */
+ msm_chip->int_status_copy |= readl(msm_chip->regs.int_status);
+ msm_chip->int_status_copy &= ~bit;
+#endif
+ writel(bit, msm_chip->regs.int_clear);
+ msm_gpio_update_both_edge_detect(msm_chip);
+ return 0;
+}
+
+static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct msm_gpio_chip *msm_chip;
+ unsigned long irq_flags;
+
+ msm_chip = container_of(chip, struct msm_gpio_chip, chip);
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ writel(readl(msm_chip->regs.oe) & ~BIT(offset), msm_chip->regs.oe);
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+ return 0;
+}
+
+static int
+msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct msm_gpio_chip *msm_chip;
+ unsigned long irq_flags;
+
+ msm_chip = container_of(chip, struct msm_gpio_chip, chip);
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ msm_gpio_write(msm_chip, offset, value);
+ writel(readl(msm_chip->regs.oe) | BIT(offset), msm_chip->regs.oe);
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+ return 0;
+}
+
+static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct msm_gpio_chip *msm_chip;
+
+ msm_chip = container_of(chip, struct msm_gpio_chip, chip);
+ return (readl(msm_chip->regs.in) & (1U << offset)) ? 1 : 0;
+}
+
+static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct msm_gpio_chip *msm_chip;
+ unsigned long irq_flags;
+
+ msm_chip = container_of(chip, struct msm_gpio_chip, chip);
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ msm_gpio_write(msm_chip, offset, value);
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+}
+
+static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ return MSM_GPIO_TO_INT(chip->base + offset);
+}
+
+#ifdef CONFIG_MSM_GPIOMUX
+static int msm_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ return msm_gpiomux_get(chip->base + offset);
+}
+
+static void msm_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ msm_gpiomux_put(chip->base + offset);
+}
+#else
+#define msm_gpio_request NULL
+#define msm_gpio_free NULL
+#endif
+
+static struct msm_gpio_chip *msm_gpio_chips;
+static int msm_gpio_count;
+
+static struct msm_gpio_chip msm_gpio_chips_msm7x01[] = {
+ MSM_GPIO_BANK(MSM7X00, 0, 0, 15),
+ MSM_GPIO_BANK(MSM7X00, 1, 16, 42),
+ MSM_GPIO_BANK(MSM7X00, 2, 43, 67),
+ MSM_GPIO_BANK(MSM7X00, 3, 68, 94),
+ MSM_GPIO_BANK(MSM7X00, 4, 95, 106),
+ MSM_GPIO_BANK(MSM7X00, 5, 107, 121),
+};
+
+static struct msm_gpio_chip msm_gpio_chips_msm7x30[] = {
+ MSM_GPIO_BANK(MSM7X30, 0, 0, 15),
+ MSM_GPIO_BANK(MSM7X30, 1, 16, 43),
+ MSM_GPIO_BANK(MSM7X30, 2, 44, 67),
+ MSM_GPIO_BANK(MSM7X30, 3, 68, 94),
+ MSM_GPIO_BANK(MSM7X30, 4, 95, 106),
+ MSM_GPIO_BANK(MSM7X30, 5, 107, 133),
+ MSM_GPIO_BANK(MSM7X30, 6, 134, 150),
+ MSM_GPIO_BANK(MSM7X30, 7, 151, 181),
+};
+
+static struct msm_gpio_chip msm_gpio_chips_qsd8x50[] = {
+ MSM_GPIO_BANK(QSD8X50, 0, 0, 15),
+ MSM_GPIO_BANK(QSD8X50, 1, 16, 42),
+ MSM_GPIO_BANK(QSD8X50, 2, 43, 67),
+ MSM_GPIO_BANK(QSD8X50, 3, 68, 94),
+ MSM_GPIO_BANK(QSD8X50, 4, 95, 103),
+ MSM_GPIO_BANK(QSD8X50, 5, 104, 121),
+ MSM_GPIO_BANK(QSD8X50, 6, 122, 152),
+ MSM_GPIO_BANK(QSD8X50, 7, 153, 164),
+};
+
+static void msm_gpio_irq_ack(struct irq_data *d)
+{
+ unsigned long irq_flags;
+ struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ msm_gpio_clear_detect_status(msm_chip,
+ d->irq - gpio_to_irq(msm_chip->chip.base));
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+}
+
+static void msm_gpio_irq_mask(struct irq_data *d)
+{
+ unsigned long irq_flags;
+ struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+ unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
+
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ /* level triggered interrupts are also latched */
+ if (!(readl(msm_chip->regs.int_edge) & BIT(offset)))
+ msm_gpio_clear_detect_status(msm_chip, offset);
+ msm_chip->int_enable[0] &= ~BIT(offset);
+ writel(msm_chip->int_enable[0], msm_chip->regs.int_en);
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+}
+
+static void msm_gpio_irq_unmask(struct irq_data *d)
+{
+ unsigned long irq_flags;
+ struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+ unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
+
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ /* level triggered interrupts are also latched */
+ if (!(readl(msm_chip->regs.int_edge) & BIT(offset)))
+ msm_gpio_clear_detect_status(msm_chip, offset);
+ msm_chip->int_enable[0] |= BIT(offset);
+ writel(msm_chip->int_enable[0], msm_chip->regs.int_en);
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+}
+
+static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ unsigned long irq_flags;
+ struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+ unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
+
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+
+ if (on)
+ msm_chip->int_enable[1] |= BIT(offset);
+ else
+ msm_chip->int_enable[1] &= ~BIT(offset);
+
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+ return 0;
+}
+
+static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ unsigned long irq_flags;
+ struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+ unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
+ unsigned val, mask = BIT(offset);
+
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ val = readl(msm_chip->regs.int_edge);
+ if (flow_type & IRQ_TYPE_EDGE_BOTH) {
+ writel(val | mask, msm_chip->regs.int_edge);
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+ } else {
+ writel(val & ~mask, msm_chip->regs.int_edge);
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ }
+ if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
+ msm_chip->both_edge_detect |= mask;
+ msm_gpio_update_both_edge_detect(msm_chip);
+ } else {
+ msm_chip->both_edge_detect &= ~mask;
+ val = readl(msm_chip->regs.int_pos);
+ if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH))
+ writel(val | mask, msm_chip->regs.int_pos);
+ else
+ writel(val & ~mask, msm_chip->regs.int_pos);
+ }
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+ return 0;
+}
+
+static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ int i, j, mask;
+ unsigned val;
+
+ for (i = 0; i < msm_gpio_count; i++) {
+ struct msm_gpio_chip *msm_chip = &msm_gpio_chips[i];
+ val = readl(msm_chip->regs.int_status);
+ val &= msm_chip->int_enable[0];
+ while (val) {
+ mask = val & -val;
+ j = fls(mask) - 1;
+ /* printk("%s %08x %08x bit %d gpio %d irq %d\n",
+ __func__, v, m, j, msm_chip->chip.start + j,
+ FIRST_GPIO_IRQ + msm_chip->chip.start + j); */
+ val &= ~mask;
+ generic_handle_irq(FIRST_GPIO_IRQ +
+ msm_chip->chip.base + j);
+ }
+ }
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
+}
+
+static struct irq_chip msm_gpio_irq_chip = {
+ .name = "msmgpio",
+ .irq_ack = msm_gpio_irq_ack,
+ .irq_mask = msm_gpio_irq_mask,
+ .irq_unmask = msm_gpio_irq_unmask,
+ .irq_set_wake = msm_gpio_irq_set_wake,
+ .irq_set_type = msm_gpio_irq_set_type,
+};
+
+static int __init msm_init_gpio(void)
+{
+ int i, j = 0;
+
+ if (cpu_is_msm7x01()) {
+ msm_gpio_chips = msm_gpio_chips_msm7x01;
+ msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_msm7x01);
+ } else if (cpu_is_msm7x30()) {
+ msm_gpio_chips = msm_gpio_chips_msm7x30;
+ msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_msm7x30);
+ } else if (cpu_is_qsd8x50()) {
+ msm_gpio_chips = msm_gpio_chips_qsd8x50;
+ msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_qsd8x50);
+ } else {
+ return 0;
+ }
+
+ for (i = FIRST_GPIO_IRQ; i < FIRST_GPIO_IRQ + NR_GPIO_IRQS; i++) {
+ if (i - FIRST_GPIO_IRQ >=
+ msm_gpio_chips[j].chip.base +
+ msm_gpio_chips[j].chip.ngpio)
+ j++;
+ irq_set_chip_data(i, &msm_gpio_chips[j]);
+ irq_set_chip_and_handler(i, &msm_gpio_irq_chip,
+ handle_edge_irq);
+ set_irq_flags(i, IRQF_VALID);
+ }
+
+ for (i = 0; i < msm_gpio_count; i++) {
+ spin_lock_init(&msm_gpio_chips[i].lock);
+ writel(0, msm_gpio_chips[i].regs.int_en);
+ gpiochip_add(&msm_gpio_chips[i].chip);
+ }
+
+ irq_set_chained_handler(INT_GPIO_GROUP1, msm_gpio_irq_handler);
+ irq_set_chained_handler(INT_GPIO_GROUP2, msm_gpio_irq_handler);
+ irq_set_irq_wake(INT_GPIO_GROUP1, 1);
+ irq_set_irq_wake(INT_GPIO_GROUP2, 2);
+ return 0;
+}
+
+postcore_initcall(msm_init_gpio);
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
new file mode 100644
index 0000000..5cb1227
--- /dev/null
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -0,0 +1,433 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#include <asm/mach/irq.h>
+
+#include <mach/msm_gpiomux.h>
+#include <mach/msm_iomap.h>
+
+/* Bits of interest in the GPIO_IN_OUT register.
+ */
+enum {
+ GPIO_IN = 0,
+ GPIO_OUT = 1
+};
+
+/* Bits of interest in the GPIO_INTR_STATUS register.
+ */
+enum {
+ INTR_STATUS = 0,
+};
+
+/* Bits of interest in the GPIO_CFG register.
+ */
+enum {
+ GPIO_OE = 9,
+};
+
+/* Bits of interest in the GPIO_INTR_CFG register.
+ * When a GPIO triggers, two separate decisions are made, controlled
+ * by two separate flags.
+ *
+ * - First, INTR_RAW_STATUS_EN controls whether or not the GPIO_INTR_STATUS
+ * register for that GPIO will be updated to reflect the triggering of that
+ * gpio. If this bit is 0, this register will not be updated.
+ * - Second, INTR_ENABLE controls whether an interrupt is triggered.
+ *
+ * If INTR_ENABLE is set and INTR_RAW_STATUS_EN is NOT set, an interrupt
+ * can be triggered but the status register will not reflect it.
+ */
+enum {
+ INTR_ENABLE = 0,
+ INTR_POL_CTL = 1,
+ INTR_DECT_CTL = 2,
+ INTR_RAW_STATUS_EN = 3,
+};
+
+/* Codes of interest in GPIO_INTR_CFG_SU.
+ */
+enum {
+ TARGET_PROC_SCORPION = 4,
+ TARGET_PROC_NONE = 7,
+};
+
+
+#define GPIO_INTR_CFG_SU(gpio) (MSM_TLMM_BASE + 0x0400 + (0x04 * (gpio)))
+#define GPIO_CONFIG(gpio) (MSM_TLMM_BASE + 0x1000 + (0x10 * (gpio)))
+#define GPIO_IN_OUT(gpio) (MSM_TLMM_BASE + 0x1004 + (0x10 * (gpio)))
+#define GPIO_INTR_CFG(gpio) (MSM_TLMM_BASE + 0x1008 + (0x10 * (gpio)))
+#define GPIO_INTR_STATUS(gpio) (MSM_TLMM_BASE + 0x100c + (0x10 * (gpio)))
+
+/**
+ * struct msm_gpio_dev: the MSM8660 SoC GPIO device structure
+ *
+ * @enabled_irqs: a bitmap used to optimize the summary-irq handler. By
+ * keeping track of which gpios are unmasked as irq sources, we avoid
+ * having to do readl calls on hundreds of iomapped registers each time
+ * the summary interrupt fires in order to locate the active interrupts.
+ *
+ * @wake_irqs: a bitmap for tracking which interrupt lines are enabled
+ * as wakeup sources. When the device is suspended, interrupts which are
+ * not wakeup sources are disabled.
+ *
+ * @dual_edge_irqs: a bitmap used to track which irqs are configured
+ * as dual-edge, as this is not supported by the hardware and requires
+ * some special handling in the driver.
+ */
+struct msm_gpio_dev {
+ struct gpio_chip gpio_chip;
+ DECLARE_BITMAP(enabled_irqs, NR_GPIO_IRQS);
+ DECLARE_BITMAP(wake_irqs, NR_GPIO_IRQS);
+ DECLARE_BITMAP(dual_edge_irqs, NR_GPIO_IRQS);
+};
+
+static DEFINE_SPINLOCK(tlmm_lock);
+
+static inline struct msm_gpio_dev *to_msm_gpio_dev(struct gpio_chip *chip)
+{
+ return container_of(chip, struct msm_gpio_dev, gpio_chip);
+}
+
+static inline void set_gpio_bits(unsigned n, void __iomem *reg)
+{
+ writel(readl(reg) | n, reg);
+}
+
+static inline void clear_gpio_bits(unsigned n, void __iomem *reg)
+{
+ writel(readl(reg) & ~n, reg);
+}
+
+static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ return readl(GPIO_IN_OUT(offset)) & BIT(GPIO_IN);
+}
+
+static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+{
+ writel(val ? BIT(GPIO_OUT) : 0, GPIO_IN_OUT(offset));
+}
+
+static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&tlmm_lock, irq_flags);
+ clear_gpio_bits(BIT(GPIO_OE), GPIO_CONFIG(offset));
+ spin_unlock_irqrestore(&tlmm_lock, irq_flags);
+ return 0;
+}
+
+static int msm_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset,
+ int val)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&tlmm_lock, irq_flags);
+ msm_gpio_set(chip, offset, val);
+ set_gpio_bits(BIT(GPIO_OE), GPIO_CONFIG(offset));
+ spin_unlock_irqrestore(&tlmm_lock, irq_flags);
+ return 0;
+}
+
+static int msm_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ return msm_gpiomux_get(chip->base + offset);
+}
+
+static void msm_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ msm_gpiomux_put(chip->base + offset);
+}
+
+static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ return MSM_GPIO_TO_INT(chip->base + offset);
+}
+
+static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq)
+{
+ return irq - MSM_GPIO_TO_INT(chip->base);
+}
+
+static struct msm_gpio_dev msm_gpio = {
+ .gpio_chip = {
+ .base = 0,
+ .ngpio = NR_GPIO_IRQS,
+ .direction_input = msm_gpio_direction_input,
+ .direction_output = msm_gpio_direction_output,
+ .get = msm_gpio_get,
+ .set = msm_gpio_set,
+ .to_irq = msm_gpio_to_irq,
+ .request = msm_gpio_request,
+ .free = msm_gpio_free,
+ },
+};
+
+/* For dual-edge interrupts in software, since the hardware has no
+ * such support:
+ *
+ * At appropriate moments, this function may be called to flip the polarity
+ * settings of both-edge irq lines to try and catch the next edge.
+ *
+ * The attempt is considered successful if:
+ * - the status bit goes high, indicating that an edge was caught, or
+ * - the input value of the gpio doesn't change during the attempt.
+ * If the value changes twice during the process, that would cause the first
+ * test to fail but would force the second, as two opposite
+ * transitions would cause a detection no matter the polarity setting.
+ *
+ * The do-loop tries to sledge-hammer closed the timing hole between
+ * the initial value-read and the polarity-write - if the line value changes
+ * during that window, an interrupt is lost, the new polarity setting is
+ * incorrect, and the first success test will fail, causing a retry.
+ *
+ * Algorithm comes from Google's msmgpio driver, see mach-msm/gpio.c.
+ */
+static void msm_gpio_update_dual_edge_pos(unsigned gpio)
+{
+ int loop_limit = 100;
+ unsigned val, val2, intstat;
+
+ do {
+ val = readl(GPIO_IN_OUT(gpio)) & BIT(GPIO_IN);
+ if (val)
+ clear_gpio_bits(BIT(INTR_POL_CTL), GPIO_INTR_CFG(gpio));
+ else
+ set_gpio_bits(BIT(INTR_POL_CTL), GPIO_INTR_CFG(gpio));
+ val2 = readl(GPIO_IN_OUT(gpio)) & BIT(GPIO_IN);
+ intstat = readl(GPIO_INTR_STATUS(gpio)) & BIT(INTR_STATUS);
+ if (intstat || val == val2)
+ return;
+ } while (loop_limit-- > 0);
+ pr_err("dual-edge irq failed to stabilize, "
+ "interrupts dropped. %#08x != %#08x\n",
+ val, val2);
+}
+
+static void msm_gpio_irq_ack(struct irq_data *d)
+{
+ int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
+
+ writel(BIT(INTR_STATUS), GPIO_INTR_STATUS(gpio));
+ if (test_bit(gpio, msm_gpio.dual_edge_irqs))
+ msm_gpio_update_dual_edge_pos(gpio);
+}
+
+static void msm_gpio_irq_mask(struct irq_data *d)
+{
+ int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&tlmm_lock, irq_flags);
+ writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
+ clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
+ __clear_bit(gpio, msm_gpio.enabled_irqs);
+ spin_unlock_irqrestore(&tlmm_lock, irq_flags);
+}
+
+static void msm_gpio_irq_unmask(struct irq_data *d)
+{
+ int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&tlmm_lock, irq_flags);
+ __set_bit(gpio, msm_gpio.enabled_irqs);
+ set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
+ writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
+ spin_unlock_irqrestore(&tlmm_lock, irq_flags);
+}
+
+static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
+ unsigned long irq_flags;
+ uint32_t bits;
+
+ spin_lock_irqsave(&tlmm_lock, irq_flags);
+
+ bits = readl(GPIO_INTR_CFG(gpio));
+
+ if (flow_type & IRQ_TYPE_EDGE_BOTH) {
+ bits |= BIT(INTR_DECT_CTL);
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+ if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+ __set_bit(gpio, msm_gpio.dual_edge_irqs);
+ else
+ __clear_bit(gpio, msm_gpio.dual_edge_irqs);
+ } else {
+ bits &= ~BIT(INTR_DECT_CTL);
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ __clear_bit(gpio, msm_gpio.dual_edge_irqs);
+ }
+
+ if (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
+ bits |= BIT(INTR_POL_CTL);
+ else
+ bits &= ~BIT(INTR_POL_CTL);
+
+ writel(bits, GPIO_INTR_CFG(gpio));
+
+ if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+ msm_gpio_update_dual_edge_pos(gpio);
+
+ spin_unlock_irqrestore(&tlmm_lock, irq_flags);
+
+ return 0;
+}
+
+/*
+ * When the summary IRQ is raised, any number of GPIO lines may be high.
+ * It is the job of the summary handler to find all those GPIO lines
+ * which have been set as summary IRQ lines and which are triggered,
+ * and to call their interrupt handlers.
+ */
+static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ unsigned long i;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+
+ for (i = find_first_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS);
+ i < NR_GPIO_IRQS;
+ i = find_next_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS, i + 1)) {
+ if (readl(GPIO_INTR_STATUS(i)) & BIT(INTR_STATUS))
+ generic_handle_irq(msm_gpio_to_irq(&msm_gpio.gpio_chip,
+ i));
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
+
+ if (on) {
+ if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS))
+ irq_set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 1);
+ set_bit(gpio, msm_gpio.wake_irqs);
+ } else {
+ clear_bit(gpio, msm_gpio.wake_irqs);
+ if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS))
+ irq_set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 0);
+ }
+
+ return 0;
+}
+
+static struct irq_chip msm_gpio_irq_chip = {
+ .name = "msmgpio",
+ .irq_mask = msm_gpio_irq_mask,
+ .irq_unmask = msm_gpio_irq_unmask,
+ .irq_ack = msm_gpio_irq_ack,
+ .irq_set_type = msm_gpio_irq_set_type,
+ .irq_set_wake = msm_gpio_irq_set_wake,
+};
+
+static int __devinit msm_gpio_probe(struct platform_device *dev)
+{
+ int i, irq, ret;
+
+ bitmap_zero(msm_gpio.enabled_irqs, NR_GPIO_IRQS);
+ bitmap_zero(msm_gpio.wake_irqs, NR_GPIO_IRQS);
+ bitmap_zero(msm_gpio.dual_edge_irqs, NR_GPIO_IRQS);
+ msm_gpio.gpio_chip.label = dev->name;
+ ret = gpiochip_add(&msm_gpio.gpio_chip);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < msm_gpio.gpio_chip.ngpio; ++i) {
+ irq = msm_gpio_to_irq(&msm_gpio.gpio_chip, i);
+ irq_set_chip_and_handler(irq, &msm_gpio_irq_chip,
+ handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID);
+ }
+
+ irq_set_chained_handler(TLMM_SCSS_SUMMARY_IRQ,
+ msm_summary_irq_handler);
+ return 0;
+}
+
+static int __devexit msm_gpio_remove(struct platform_device *dev)
+{
+ int ret = gpiochip_remove(&msm_gpio.gpio_chip);
+
+ if (ret < 0)
+ return ret;
+
+ irq_set_handler(TLMM_SCSS_SUMMARY_IRQ, NULL);
+
+ return 0;
+}
+
+static struct platform_driver msm_gpio_driver = {
+ .probe = msm_gpio_probe,
+ .remove = __devexit_p(msm_gpio_remove),
+ .driver = {
+ .name = "msmgpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+static struct platform_device msm_device_gpio = {
+ .name = "msmgpio",
+ .id = -1,
+};
+
+static int __init msm_gpio_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&msm_gpio_driver);
+ if (!rc) {
+ rc = platform_device_register(&msm_device_gpio);
+ if (rc)
+ platform_driver_unregister(&msm_gpio_driver);
+ }
+
+ return rc;
+}
+
+static void __exit msm_gpio_exit(void)
+{
+ platform_device_unregister(&msm_device_gpio);
+ platform_driver_unregister(&msm_gpio_driver);
+}
+
+postcore_initcall(msm_gpio_init);
+module_exit(msm_gpio_exit);
+
+MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>");
+MODULE_DESCRIPTION("Driver for Qualcomm MSM TLMMv2 SoC GPIOs");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:msmgpio");
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
new file mode 100644
index 0000000..79e66c0
--- /dev/null
+++ b/drivers/gpio/gpio-tps65912.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Margarita Olaya <magi@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This driver is based on wm8350 implementation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/mfd/core.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/mfd/tps65912.h>
+
+struct tps65912_gpio_data {
+ struct tps65912 *tps65912;
+ struct gpio_chip gpio_chip;
+};
+
+static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+ int val;
+
+ val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset);
+
+ if (val & GPIO_STS_MASK)
+ return 1;
+
+ return 0;
+}
+
+static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+
+ if (value)
+ tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
+ GPIO_SET_MASK);
+ else
+ tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
+ GPIO_SET_MASK);
+}
+
+static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+
+ /* Set the initial value */
+ tps65912_gpio_set(gc, offset, value);
+
+ return tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
+ GPIO_CFG_MASK);
+}
+
+static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+
+ return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
+ GPIO_CFG_MASK);
+
+}
+
+static struct gpio_chip template_chip = {
+ .label = "tps65912",
+ .owner = THIS_MODULE,
+ .direction_input = tps65912_gpio_input,
+ .direction_output = tps65912_gpio_output,
+ .get = tps65912_gpio_get,
+ .set = tps65912_gpio_set,
+ .can_sleep = 1,
+ .ngpio = 5,
+ .base = -1,
+};
+
+static int __devinit tps65912_gpio_probe(struct platform_device *pdev)
+{
+ struct tps65912 *tps65912 = dev_get_drvdata(pdev->dev.parent);
+ struct tps65912_board *pdata = tps65912->dev->platform_data;
+ struct tps65912_gpio_data *tps65912_gpio;
+ int ret;
+
+ tps65912_gpio = kzalloc(sizeof(*tps65912_gpio), GFP_KERNEL);
+ if (tps65912_gpio == NULL)
+ return -ENOMEM;
+
+ tps65912_gpio->tps65912 = tps65912;
+ tps65912_gpio->gpio_chip = template_chip;
+ tps65912_gpio->gpio_chip.dev = &pdev->dev;
+ if (pdata && pdata->gpio_base)
+ tps65912_gpio->gpio_chip.base = pdata->gpio_base;
+
+ ret = gpiochip_add(&tps65912_gpio->gpio_chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register gpiochip, %d\n", ret);
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, tps65912_gpio);
+
+ return ret;
+
+err:
+ kfree(tps65912_gpio);
+ return ret;
+}
+
+static int __devexit tps65912_gpio_remove(struct platform_device *pdev)
+{
+ struct tps65912_gpio_data *tps65912_gpio = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = gpiochip_remove(&tps65912_gpio->gpio_chip);
+ if (ret == 0)
+ kfree(tps65912_gpio);
+
+ return ret;
+}
+
+static struct platform_driver tps65912_gpio_driver = {
+ .driver = {
+ .name = "tps65912-gpio",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps65912_gpio_probe,
+ .remove = __devexit_p(tps65912_gpio_remove),
+};
+
+static int __init tps65912_gpio_init(void)
+{
+ return platform_driver_register(&tps65912_gpio_driver);
+}
+subsys_initcall(tps65912_gpio_init);
+
+static void __exit tps65912_gpio_exit(void)
+{
+ platform_driver_unregister(&tps65912_gpio_driver);
+}
+module_exit(tps65912_gpio_exit);
+
+MODULE_AUTHOR("Margarita Olaya Cabrera <magi@slimlogic.co.uk>");
+MODULE_DESCRIPTION("GPIO interface for TPS65912 PMICs");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps65912-gpio");
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 82db185..fe738f0 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -499,6 +499,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
mutex_lock(&dev->mode_config.mutex);
drm_mode_object_put(dev, &connector->base);
list_del(&connector->head);
+ dev->mode_config.num_connector--;
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_connector_cleanup);
@@ -529,6 +530,7 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
mutex_lock(&dev->mode_config.mutex);
drm_mode_object_put(dev, &encoder->base);
list_del(&encoder->head);
+ dev->mode_config.num_encoder--;
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_encoder_cleanup);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 9236965..f88a9b2 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -560,6 +560,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
mode_changed = true;
} else if (set->fb == NULL) {
mode_changed = true;
+ } else if (set->fb->depth != set->crtc->fb->depth) {
+ mode_changed = true;
+ } else if (set->fb->bits_per_pixel !=
+ set->crtc->fb->bits_per_pixel) {
+ mode_changed = true;
} else
fb_changed = true;
}
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 9d8c892..9d2668a 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -90,7 +90,6 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
struct drm_device *dev = minor->dev;
struct dentry *ent;
struct drm_info_node *tmp;
- char name[64];
int i, ret;
for (i = 0; i < count; i++) {
@@ -108,6 +107,9 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
root, tmp, &drm_debugfs_fops);
if (!ent) {
+ char name[64];
+ strncpy(name, root->d_name.name,
+ min(root->d_name.len, 64U));
DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
name, files[i].name);
kfree(tmp);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 0929219..7425e5c 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -127,6 +127,23 @@ static const u8 edid_header[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
};
+ /*
+ * Sanity check the header of the base EDID block. Return 8 if the header
+ * is perfect, down to 0 if it's totally wrong.
+ */
+int drm_edid_header_is_valid(const u8 *raw_edid)
+{
+ int i, score = 0;
+
+ for (i = 0; i < sizeof(edid_header); i++)
+ if (raw_edid[i] == edid_header[i])
+ score++;
+
+ return score;
+}
+EXPORT_SYMBOL(drm_edid_header_is_valid);
+
+
/*
* Sanity check the EDID block (base or extension). Return 0 if the block
* doesn't check out, or 1 if it's valid.
@@ -139,12 +156,7 @@ drm_edid_block_valid(u8 *raw_edid)
struct edid *edid = (struct edid *)raw_edid;
if (raw_edid[0] == 0x00) {
- int score = 0;
-
- for (i = 0; i < sizeof(edid_header); i++)
- if (raw_edid[i] == edid_header[i])
- score++;
-
+ int score = drm_edid_header_is_valid(raw_edid);
if (score == 8) ;
else if (score >= 6) {
DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
@@ -185,8 +197,8 @@ drm_edid_block_valid(u8 *raw_edid)
bad:
if (raw_edid) {
printk(KERN_ERR "Raw EDID:\n");
- print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
- printk(KERN_ERR "\n");
+ print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
+ raw_edid, EDID_LENGTH, false);
}
return 0;
}
@@ -1439,6 +1451,8 @@ EXPORT_SYMBOL(drm_detect_monitor_audio);
static void drm_add_display_info(struct edid *edid,
struct drm_display_info *info)
{
+ u8 *edid_ext;
+
info->width_mm = edid->width_cm * 10;
info->height_mm = edid->height_cm * 10;
@@ -1483,6 +1497,13 @@ static void drm_add_display_info(struct edid *edid,
info->color_formats = DRM_COLOR_FORMAT_YCRCB444;
if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422)
info->color_formats = DRM_COLOR_FORMAT_YCRCB422;
+
+ /* Get data from CEA blocks if present */
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ return;
+
+ info->cea_rev = edid_ext[1];
}
/**
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 802b61a..f7c6854 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -256,7 +256,6 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
{
printk(KERN_ERR "panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
- return 0;
}
EXPORT_SYMBOL(drm_fb_helper_panic);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 4012fe4..186d62e 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -129,7 +129,7 @@ drm_gem_destroy(struct drm_device *dev)
}
/**
- * Initialize an already allocate GEM object of the specified size with
+ * Initialize an already allocated GEM object of the specified size with
* shmfs backing store.
*/
int drm_gem_object_init(struct drm_device *dev,
@@ -151,6 +151,27 @@ int drm_gem_object_init(struct drm_device *dev,
EXPORT_SYMBOL(drm_gem_object_init);
/**
+ * Initialize an already allocated GEM object of the specified size with
+ * no GEM provided backing store. Instead the caller is responsible for
+ * backing the object and handling it.
+ */
+int drm_gem_private_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size)
+{
+ BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+ obj->dev = dev;
+ obj->filp = NULL;
+
+ kref_init(&obj->refcount);
+ atomic_set(&obj->handle_count, 0);
+ obj->size = size;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_private_object_init);
+
+/**
* Allocate a GEM object of the specified size with shmfs backing store
*/
struct drm_gem_object *
@@ -211,6 +232,8 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
+ if (dev->driver->gem_close_object)
+ dev->driver->gem_close_object(obj, filp);
drm_gem_object_handle_unreference_unlocked(obj);
return 0;
@@ -227,7 +250,8 @@ drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep)
{
- int ret;
+ struct drm_device *dev = obj->dev;
+ int ret;
/*
* Get the user-visible handle using idr.
@@ -248,6 +272,15 @@ again:
return ret;
drm_gem_object_handle_reference(obj);
+
+ if (dev->driver->gem_open_object) {
+ ret = dev->driver->gem_open_object(obj, file_priv);
+ if (ret) {
+ drm_gem_handle_delete(file_priv, *handlep);
+ return ret;
+ }
+ }
+
return 0;
}
EXPORT_SYMBOL(drm_gem_handle_create);
@@ -402,7 +435,12 @@ drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
static int
drm_gem_object_release_handle(int id, void *ptr, void *data)
{
+ struct drm_file *file_priv = data;
struct drm_gem_object *obj = ptr;
+ struct drm_device *dev = obj->dev;
+
+ if (dev->driver->gem_close_object)
+ dev->driver->gem_close_object(obj, file_priv);
drm_gem_object_handle_unreference_unlocked(obj);
@@ -418,7 +456,7 @@ void
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
idr_for_each(&file_private->object_idr,
- &drm_gem_object_release_handle, NULL);
+ &drm_gem_object_release_handle, file_private);
idr_remove_all(&file_private->object_idr);
idr_destroy(&file_private->object_idr);
@@ -427,7 +465,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
void
drm_gem_object_release(struct drm_gem_object *obj)
{
- fput(obj->filp);
+ if (obj->filp)
+ fput(obj->filp);
}
EXPORT_SYMBOL(drm_gem_object_release);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 2022a5c..3830e9e 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -291,11 +291,14 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state)
if (!dev->irq_enabled)
return;
- if (state)
- dev->driver->irq_uninstall(dev);
- else {
- dev->driver->irq_preinstall(dev);
- dev->driver->irq_postinstall(dev);
+ if (state) {
+ if (dev->driver->irq_uninstall)
+ dev->driver->irq_uninstall(dev);
+ } else {
+ if (dev->driver->irq_preinstall)
+ dev->driver->irq_preinstall(dev);
+ if (dev->driver->irq_postinstall)
+ dev->driver->irq_postinstall(dev);
}
}
@@ -338,7 +341,8 @@ int drm_irq_install(struct drm_device *dev)
DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
/* Before installing handler */
- dev->driver->irq_preinstall(dev);
+ if (dev->driver->irq_preinstall)
+ dev->driver->irq_preinstall(dev);
/* Install handler */
if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
@@ -363,11 +367,16 @@ int drm_irq_install(struct drm_device *dev)
vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL);
/* After installing handler */
- ret = dev->driver->irq_postinstall(dev);
+ if (dev->driver->irq_postinstall)
+ ret = dev->driver->irq_postinstall(dev);
+
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
dev->irq_enabled = 0;
mutex_unlock(&dev->struct_mutex);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+ free_irq(drm_dev_to_irq(dev), dev);
}
return ret;
@@ -413,7 +422,8 @@ int drm_irq_uninstall(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
vga_client_register(dev->pdev, NULL, NULL, NULL);
- dev->driver->irq_uninstall(dev);
+ if (dev->driver->irq_uninstall)
+ dev->driver->irq_uninstall(dev);
free_irq(drm_dev_to_irq(dev), dev);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index c2d32f2..ad74fb4 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -994,9 +994,10 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
{
const char *name;
unsigned int namelen;
- int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
+ bool res_specified = false, bpp_specified = false, refresh_specified = false;
unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
- int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
+ bool yres_specified = false, cvt = false, rb = false;
+ bool interlace = false, margins = false, was_digit = false;
int i;
enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
@@ -1015,54 +1016,65 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
for (i = namelen-1; i >= 0; i--) {
switch (name[i]) {
case '@':
- namelen = i;
if (!refresh_specified && !bpp_specified &&
- !yres_specified) {
+ !yres_specified && !cvt && !rb && was_digit) {
refresh = simple_strtol(&name[i+1], NULL, 10);
- refresh_specified = 1;
- if (cvt || rb)
- cvt = 0;
+ refresh_specified = true;
+ was_digit = false;
} else
goto done;
break;
case '-':
- namelen = i;
- if (!bpp_specified && !yres_specified) {
+ if (!bpp_specified && !yres_specified && !cvt &&
+ !rb && was_digit) {
bpp = simple_strtol(&name[i+1], NULL, 10);
- bpp_specified = 1;
- if (cvt || rb)
- cvt = 0;
+ bpp_specified = true;
+ was_digit = false;
} else
goto done;
break;
case 'x':
- if (!yres_specified) {
+ if (!yres_specified && was_digit) {
yres = simple_strtol(&name[i+1], NULL, 10);
- yres_specified = 1;
+ yres_specified = true;
+ was_digit = false;
} else
goto done;
case '0' ... '9':
+ was_digit = true;
break;
case 'M':
- if (!yres_specified)
- cvt = 1;
+ if (yres_specified || cvt || was_digit)
+ goto done;
+ cvt = true;
break;
case 'R':
- if (cvt)
- rb = 1;
+ if (yres_specified || cvt || rb || was_digit)
+ goto done;
+ rb = true;
break;
case 'm':
- if (!cvt)
- margins = 1;
+ if (cvt || yres_specified || was_digit)
+ goto done;
+ margins = true;
break;
case 'i':
- if (!cvt)
- interlace = 1;
+ if (cvt || yres_specified || was_digit)
+ goto done;
+ interlace = true;
break;
case 'e':
+ if (yres_specified || bpp_specified || refresh_specified ||
+ was_digit || (force != DRM_FORCE_UNSPECIFIED))
+ goto done;
+
force = DRM_FORCE_ON;
break;
case 'D':
+ if (yres_specified || bpp_specified || refresh_specified ||
+ was_digit || (force != DRM_FORCE_UNSPECIFIED))
+ goto done;
+
if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
(connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
force = DRM_FORCE_ON;
@@ -1070,17 +1082,37 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
force = DRM_FORCE_ON_DIGITAL;
break;
case 'd':
+ if (yres_specified || bpp_specified || refresh_specified ||
+ was_digit || (force != DRM_FORCE_UNSPECIFIED))
+ goto done;
+
force = DRM_FORCE_OFF;
break;
default:
goto done;
}
}
+
if (i < 0 && yres_specified) {
- xres = simple_strtol(name, NULL, 10);
- res_specified = 1;
+ char *ch;
+ xres = simple_strtol(name, &ch, 10);
+ if ((ch != NULL) && (*ch == 'x'))
+ res_specified = true;
+ else
+ i = ch - name;
+ } else if (!yres_specified && was_digit) {
+ /* catch mode that begins with digits but has no 'x' */
+ i = 0;
}
done:
+ if (i >= 0) {
+ printk(KERN_WARNING
+ "parse error at position %i in video mode '%s'\n",
+ i, name);
+ mode->specified = false;
+ return false;
+ }
+
if (res_specified) {
mode->specified = true;
mode->xres = xres;
@@ -1096,9 +1128,10 @@ done:
mode->bpp_specified = true;
mode->bpp = bpp;
}
- mode->rb = rb ? true : false;
- mode->cvt = cvt ? true : false;
- mode->interlace = interlace ? true : false;
+ mode->rb = rb;
+ mode->cvt = cvt;
+ mode->interlace = interlace;
+ mode->margins = margins;
mode->force = force;
return true;
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 7223f06..2a8b626 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -123,14 +123,15 @@ static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *mas
{
int len, ret;
- master->unique_len = 10 + strlen(dev->platformdev->name);
+ master->unique_len = 13 + strlen(dev->platformdev->name);
+ master->unique_size = master->unique_len;
master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
if (master->unique == NULL)
return -ENOMEM;
len = snprintf(master->unique, master->unique_len,
- "platform:%s", dev->platformdev->name);
+ "platform:%s:%02d", dev->platformdev->name, dev->platformdev->id);
if (len > master->unique_len) {
DRM_ERROR("Unique buffer overflowed\n");
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0a893f7..3c395a5 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
for (i = 0; i < I915_NUM_RINGS; i++) {
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
dev_priv->ring[i].name,
I915_READ_IMR(&dev_priv->ring[i]));
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
MEMSTAT_VID_SHIFT);
seq_printf(m, "Current P-state: %d\n",
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
- } else if (IS_GEN6(dev)) {
+ } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -1123,6 +1123,44 @@ static int i915_emon_status(struct seq_file *m, void *unused)
return 0;
}
+static int i915_ring_freq_table(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ int gpu_freq, ia_freq;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
+ seq_printf(m, "unsupported on this chipset\n");
+ return 0;
+ }
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
+
+ for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
+ gpu_freq++) {
+ I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
+ GEN6_PCODE_READ_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
+ GEN6_PCODE_READY) == 0, 10)) {
+ DRM_ERROR("pcode read of freq table timed out\n");
+ continue;
+ }
+ ia_freq = I915_READ(GEN6_PCODE_DATA);
+ seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
static int i915_gfxec(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1300,6 +1338,155 @@ static const struct file_operations i915_wedged_fops = {
.llseek = default_llseek,
};
+static int
+i915_max_freq_open(struct inode *inode,
+ struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+i915_max_freq_read(struct file *filp,
+ char __user *ubuf,
+ size_t max,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[80];
+ int len;
+
+ len = snprintf(buf, sizeof (buf),
+ "max freq: %d\n", dev_priv->max_delay * 50);
+
+ if (len > sizeof (buf))
+ len = sizeof (buf);
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_max_freq_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ char buf[20];
+ int val = 1;
+
+ if (cnt > 0) {
+ if (cnt > sizeof (buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ val = simple_strtoul(buf, NULL, 0);
+ }
+
+ DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
+
+ /*
+ * Turbo will still be enabled, but won't go above the set value.
+ */
+ dev_priv->max_delay = val / 50;
+
+ gen6_set_rps(dev, val / 50);
+
+ return cnt;
+}
+
+static const struct file_operations i915_max_freq_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_max_freq_open,
+ .read = i915_max_freq_read,
+ .write = i915_max_freq_write,
+ .llseek = default_llseek,
+};
+
+static int
+i915_cache_sharing_open(struct inode *inode,
+ struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+i915_cache_sharing_read(struct file *filp,
+ char __user *ubuf,
+ size_t max,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[80];
+ u32 snpcr;
+ int len;
+
+ mutex_lock(&dev_priv->dev->struct_mutex);
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+
+ len = snprintf(buf, sizeof (buf),
+ "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
+ GEN6_MBC_SNPCR_SHIFT);
+
+ if (len > sizeof (buf))
+ len = sizeof (buf);
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_cache_sharing_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ char buf[20];
+ u32 snpcr;
+ int val = 1;
+
+ if (cnt > 0) {
+ if (cnt > sizeof (buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ val = simple_strtoul(buf, NULL, 0);
+ }
+
+ if (val < 0 || val > 3)
+ return -EINVAL;
+
+ DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
+
+ /* Update the cache sharing policy here as well */
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ snpcr &= ~GEN6_MBC_SNPCR_MASK;
+ snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
+ I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+
+ return cnt;
+}
+
+static const struct file_operations i915_cache_sharing_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_cache_sharing_open,
+ .read = i915_cache_sharing_read,
+ .write = i915_cache_sharing_write,
+ .llseek = default_llseek,
+};
+
/* As the drm_debugfs_init() routines are called before dev->dev_private is
* allocated we need to hook into the minor for release. */
static int
@@ -1399,6 +1586,36 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
}
+static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct dentry *ent;
+
+ ent = debugfs_create_file("i915_max_freq",
+ S_IRUGO | S_IWUSR,
+ root, dev,
+ &i915_max_freq_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops);
+}
+
+static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct dentry *ent;
+
+ ent = debugfs_create_file("i915_cache_sharing",
+ S_IRUGO | S_IWUSR,
+ root, dev,
+ &i915_cache_sharing_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops);
+}
+
static struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
@@ -1430,6 +1647,7 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_inttoext_table", i915_inttoext_table, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},
+ {"i915_ring_freq_table", i915_ring_freq_table, 0},
{"i915_gfxec", i915_gfxec, 0},
{"i915_fbc_status", i915_fbc_status, 0},
{"i915_sr_status", i915_sr_status, 0},
@@ -1451,6 +1669,12 @@ int i915_debugfs_init(struct drm_minor *minor)
ret = i915_forcewake_create(minor->debugfs_root, minor);
if (ret)
return ret;
+ ret = i915_max_freq_create(minor->debugfs_root, minor);
+ if (ret)
+ return ret;
+ ret = i915_cache_sharing_create(minor->debugfs_root, minor);
+ if (ret)
+ return ret;
return drm_debugfs_create_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES,
@@ -1465,6 +1689,10 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
+ 1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
+ 1, minor);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 296fbd6..8a3942c 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -61,7 +61,6 @@ static void i915_write_hws_pga(struct drm_device *dev)
static int i915_init_phys_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
@@ -71,10 +70,9 @@ static int i915_init_phys_hws(struct drm_device *dev)
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
- ring->status_page.page_addr =
- (void __force __iomem *)dev_priv->status_page_dmah->vaddr;
- memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
+ memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
+ 0, PAGE_SIZE);
i915_write_hws_pga(dev);
@@ -1073,6 +1071,9 @@ static void i915_setup_compression(struct drm_device *dev, int size)
unsigned long cfb_base;
unsigned long ll_base = 0;
+ /* Just in case the BIOS is doing something questionable. */
+ intel_disable_fbc(dev);
+
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
if (compressed_fb)
compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
@@ -1099,7 +1100,6 @@ static void i915_setup_compression(struct drm_device *dev, int size)
dev_priv->cfb_size = size;
- intel_disable_fbc(dev);
dev_priv->compressed_fb = compressed_fb;
if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index eb91e2d..ce045a8 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -37,38 +37,70 @@
#include <linux/console.h>
#include "drm_crtc_helper.h"
-static int i915_modeset = -1;
+static int i915_modeset __read_mostly = -1;
module_param_named(modeset, i915_modeset, int, 0400);
+MODULE_PARM_DESC(modeset,
+ "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
+ "1=on, -1=force vga console preference [default])");
-unsigned int i915_fbpercrtc = 0;
+unsigned int i915_fbpercrtc __always_unused = 0;
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
-int i915_panel_ignore_lid = 0;
+int i915_panel_ignore_lid __read_mostly = 0;
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
+MODULE_PARM_DESC(panel_ignore_lid,
+ "Override lid status (0=autodetect [default], 1=lid open, "
+ "-1=lid closed)");
-unsigned int i915_powersave = 1;
+unsigned int i915_powersave __read_mostly = 1;
module_param_named(powersave, i915_powersave, int, 0600);
+MODULE_PARM_DESC(powersave,
+ "Enable powersavings, fbc, downclocking, etc. (default: true)");
-unsigned int i915_semaphores = 0;
+unsigned int i915_semaphores __read_mostly = 0;
module_param_named(semaphores, i915_semaphores, int, 0600);
+MODULE_PARM_DESC(semaphores,
+ "Use semaphores for inter-ring sync (default: false)");
-unsigned int i915_enable_rc6 = 0;
+unsigned int i915_enable_rc6 __read_mostly = 0;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
+MODULE_PARM_DESC(i915_enable_rc6,
+ "Enable power-saving render C-state 6 (default: true)");
-unsigned int i915_enable_fbc = 0;
+unsigned int i915_enable_fbc __read_mostly = 1;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
+MODULE_PARM_DESC(i915_enable_fbc,
+ "Enable frame buffer compression for power savings "
+ "(default: false)");
-unsigned int i915_lvds_downclock = 0;
+unsigned int i915_lvds_downclock __read_mostly = 0;
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
+MODULE_PARM_DESC(lvds_downclock,
+ "Use panel (LVDS/eDP) downclocking for power savings "
+ "(default: false)");
-unsigned int i915_panel_use_ssc = 1;
+unsigned int i915_panel_use_ssc __read_mostly = 1;
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
+MODULE_PARM_DESC(lvds_use_ssc,
+ "Use Spread Spectrum Clock with panels [LVDS/eDP] "
+ "(default: true)");
-int i915_vbt_sdvo_panel_type = -1;
+int i915_vbt_sdvo_panel_type __read_mostly = -1;
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
+MODULE_PARM_DESC(vbt_sdvo_panel_type,
+ "Override selection of SDVO panel mode in the VBT "
+ "(default: auto)");
-static bool i915_try_reset = true;
+static bool i915_try_reset __read_mostly = true;
module_param_named(reset, i915_try_reset, bool, 0600);
+MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
+
+bool i915_enable_hangcheck __read_mostly = true;
+module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
+MODULE_PARM_DESC(enable_hangcheck,
+ "Periodically check GPU activity for detecting hangs. "
+ "WARNING: Disabling this can cause system wide hangs. "
+ "(default: true)");
static struct drm_driver driver;
extern int intel_agp_enabled;
@@ -345,12 +377,17 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
- int loop = 500;
- u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
- while (fifo < 20 && loop--) {
- udelay(10);
- fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES ) {
+ int loop = 500;
+ u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
+ udelay(10);
+ fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ }
+ WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES);
+ dev_priv->gt_fifo_count = fifo;
}
+ dev_priv->gt_fifo_count--;
}
static int i915_drm_freeze(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ce7914c..7916bd9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -36,6 +36,7 @@
#include <linux/io-mapping.h>
#include <linux/i2c.h>
#include <drm/intel-gtt.h>
+#include <linux/backlight.h>
/* General customization:
*/
@@ -214,6 +215,8 @@ struct drm_i915_display_funcs {
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj);
+ int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -265,6 +268,7 @@ enum intel_pch {
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
struct intel_fbdev;
+struct intel_fbc_work;
typedef struct drm_i915_private {
struct drm_device *dev;
@@ -275,6 +279,7 @@ typedef struct drm_i915_private {
int relative_constants_mode;
void __iomem *regs;
+ u32 gt_fifo_count;
struct intel_gmbus {
struct i2c_adapter adapter;
@@ -329,11 +334,10 @@ typedef struct drm_i915_private {
uint32_t last_instdone1;
unsigned long cfb_size;
- unsigned long cfb_pitch;
- unsigned long cfb_offset;
- int cfb_fence;
- int cfb_plane;
+ unsigned int cfb_fb;
+ enum plane cfb_plane;
int cfb_y;
+ struct intel_fbc_work *fbc_work;
struct intel_opregion opregion;
@@ -541,6 +545,7 @@ typedef struct drm_i915_private {
u32 savePIPEB_LINK_M1;
u32 savePIPEB_LINK_N1;
u32 saveMCHBAR_RENDER_STANDBY;
+ u32 savePCH_PORT_HOTPLUG;
struct {
/** Bridge to intel-gtt-ko */
@@ -686,6 +691,7 @@ typedef struct drm_i915_private {
int child_dev_num;
struct child_device_config *child_dev;
struct drm_connector *int_lvds_connector;
+ struct drm_connector *int_edp_connector;
bool mchbar_need_disable;
@@ -719,6 +725,8 @@ typedef struct drm_i915_private {
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
+ struct backlight_device *backlight;
+
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
@@ -986,15 +994,16 @@ struct drm_i915_file_private {
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
-extern unsigned int i915_fbpercrtc;
-extern int i915_panel_ignore_lid;
-extern unsigned int i915_powersave;
-extern unsigned int i915_semaphores;
-extern unsigned int i915_lvds_downclock;
-extern unsigned int i915_panel_use_ssc;
-extern int i915_vbt_sdvo_panel_type;
-extern unsigned int i915_enable_rc6;
-extern unsigned int i915_enable_fbc;
+extern unsigned int i915_fbpercrtc __always_unused;
+extern int i915_panel_ignore_lid __read_mostly;
+extern unsigned int i915_powersave __read_mostly;
+extern unsigned int i915_semaphores __read_mostly;
+extern unsigned int i915_lvds_downclock __read_mostly;
+extern unsigned int i915_panel_use_ssc __read_mostly;
+extern int i915_vbt_sdvo_panel_type __read_mostly;
+extern unsigned int i915_enable_rc6 __read_mostly;
+extern unsigned int i915_enable_fbc __read_mostly;
+extern bool i915_enable_hangcheck __read_mostly;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
@@ -1164,7 +1173,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
-int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
void i915_gem_do_init(struct drm_device *dev,
@@ -1183,7 +1192,8 @@ int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
int __must_check
-i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ u32 alignment,
struct intel_ring_buffer *pipelined);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -1199,9 +1209,14 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
uint32_t size,
int tiling_mode);
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
+
/* i915_gem_gtt.c */
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
/* i915_gem_evict.c */
@@ -1283,12 +1298,8 @@ extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
-extern void i8xx_disable_fbc(struct drm_device *dev);
-extern void g4x_disable_fbc(struct drm_device *dev);
-extern void ironlake_disable_fbc(struct drm_device *dev);
-extern void intel_disable_fbc(struct drm_device *dev);
-extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern bool intel_fbc_enabled(struct drm_device *dev);
+extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void ironlake_enable_rc6(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a087e1b..a546a71 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1763,8 +1763,11 @@ i915_add_request(struct intel_ring_buffer *ring,
ring->outstanding_lazy_request = false;
if (!dev_priv->mm.suspended) {
- mod_timer(&dev_priv->hangcheck_timer,
- jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ if (i915_enable_hangcheck) {
+ mod_timer(&dev_priv->hangcheck_timer,
+ jiffies +
+ msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ }
if (was_empty)
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ);
@@ -2135,6 +2138,30 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
return 0;
}
+static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
+{
+ u32 old_write_domain, old_read_domains;
+
+ /* Act a barrier for all accesses through the GTT */
+ mb();
+
+ /* Force a pagefault for domain tracking on next user access */
+ i915_gem_release_mmap(obj);
+
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+ return;
+
+ old_read_domains = obj->base.read_domains;
+ old_write_domain = obj->base.write_domain;
+
+ obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
+ obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
+
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+}
+
/**
* Unbinds an object from the GTT aperture.
*/
@@ -2151,23 +2178,28 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return -EINVAL;
}
- /* blow away mappings if mapped through GTT */
- i915_gem_release_mmap(obj);
-
- /* Move the object to the CPU domain to ensure that
- * any possible CPU writes while it's not in the GTT
- * are flushed when we go to remap it. This will
- * also ensure that all pending GPU writes are finished
- * before we unbind.
- */
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ ret = i915_gem_object_finish_gpu(obj);
if (ret == -ERESTARTSYS)
return ret;
/* Continue on if we fail due to EIO, the GPU is hung so we
* should be safe and we need to cleanup or else we might
* cause memory corruption through use-after-free.
*/
+
+ i915_gem_object_finish_gtt(obj);
+
+ /* Move the object to the CPU domain to ensure that
+ * any possible CPU writes while it's not in the GTT
+ * are flushed when we go to remap it.
+ */
+ if (ret == 0)
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ if (ret == -ERESTARTSYS)
+ return ret;
if (ret) {
+ /* In the event of a disaster, abandon all caches and
+ * hope for the best.
+ */
i915_gem_clflush_object(obj);
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
@@ -2996,51 +3028,139 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
return 0;
}
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
+{
+ int ret;
+
+ if (obj->cache_level == cache_level)
+ return 0;
+
+ if (obj->pin_count) {
+ DRM_DEBUG("can not change the cache level of pinned objects\n");
+ return -EBUSY;
+ }
+
+ if (obj->gtt_space) {
+ ret = i915_gem_object_finish_gpu(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_finish_gtt(obj);
+
+ /* Before SandyBridge, you could not use tiling or fence
+ * registers with snooped memory, so relinquish any fences
+ * currently pointing to our region in the aperture.
+ */
+ if (INTEL_INFO(obj->base.dev)->gen < 6) {
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ return ret;
+ }
+
+ i915_gem_gtt_rebind_object(obj, cache_level);
+ }
+
+ if (cache_level == I915_CACHE_NONE) {
+ u32 old_read_domains, old_write_domain;
+
+ /* If we're coming from LLC cached, then we haven't
+ * actually been tracking whether the data is in the
+ * CPU cache or not, since we only allow one bit set
+ * in obj->write_domain and have been skipping the clflushes.
+ * Just set it to the CPU cache for now.
+ */
+ WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
+ WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
+
+ old_read_domains = obj->base.read_domains;
+ old_write_domain = obj->base.write_domain;
+
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+ }
+
+ obj->cache_level = cache_level;
+ return 0;
+}
+
/*
- * Prepare buffer for display plane. Use uninterruptible for possible flush
- * wait, as in modesetting process we're not supposed to be interrupted.
+ * Prepare buffer for display plane (scanout, cursors, etc).
+ * Can be called from an uninterruptible phase (modesetting) and allows
+ * any flushes to be pipelined (for pageflips).
+ *
+ * For the display plane, we want to be in the GTT but out of any write
+ * domains. So in many ways this looks like set_to_gtt_domain() apart from the
+ * ability to pipeline the waits, pinning and any additional subtleties
+ * that may differentiate the display plane from ordinary buffers.
*/
int
-i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ u32 alignment,
struct intel_ring_buffer *pipelined)
{
- uint32_t old_read_domains;
+ u32 old_read_domains, old_write_domain;
int ret;
- /* Not valid to be called on unbound objects. */
- if (obj->gtt_space == NULL)
- return -EINVAL;
-
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
-
- /* Currently, we are always called from an non-interruptible context. */
if (pipelined != obj->ring) {
ret = i915_gem_object_wait_rendering(obj);
- if (ret)
+ if (ret == -ERESTARTSYS)
return ret;
}
+ /* The display engine is not coherent with the LLC cache on gen6. As
+ * a result, we make sure that the pinning that is about to occur is
+ * done with uncached PTEs. This is lowest common denominator for all
+ * chipsets.
+ *
+ * However for gen6+, we could do better by using the GFDT bit instead
+ * of uncaching, which would allow us to flush all the LLC-cached data
+ * with that bit in the PTE to main memory with just one PIPE_CONTROL.
+ */
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+ if (ret)
+ return ret;
+
+ /* As the user may map the buffer once pinned in the display plane
+ * (e.g. libkms for the bootup splash), we have to ensure that we
+ * always use map_and_fenceable for all scanout buffers.
+ */
+ ret = i915_gem_object_pin(obj, alignment, true);
+ if (ret)
+ return ret;
+
i915_gem_object_flush_cpu_write_domain(obj);
+ old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
- obj->base.write_domain);
+ old_write_domain);
return 0;
}
int
-i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
+i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
{
int ret;
- if (!obj->active)
+ if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
return 0;
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
@@ -3049,6 +3169,9 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
return ret;
}
+ /* Ensure that we invalidate the GPU's caches and TLBs. */
+ obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+
return i915_gem_object_wait_rendering(obj);
}
@@ -3575,7 +3698,23 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->cache_level = I915_CACHE_NONE;
+ if (IS_GEN6(dev)) {
+ /* On Gen6, we can have the GPU use the LLC (the CPU
+ * cache) for about a 10% performance improvement
+ * compared to uncached. Graphics requests other than
+ * display scanout are coherent with the CPU in
+ * accessing this cache. This means in this mode we
+ * don't need to clflush on the CPU side, and on the
+ * GPU side we only need to flush internal caches to
+ * get data visible to the CPU.
+ *
+ * However, we maintain the display planes as UC, and so
+ * need to rebind when first used as such.
+ */
+ obj->cache_level = I915_CACHE_LLC;
+ } else
+ obj->cache_level = I915_CACHE_NONE;
+
obj->base.driver_private = NULL;
obj->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj->mm_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e46b645..7a709cd 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -59,24 +59,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
- unsigned int agp_type =
- cache_level_to_agp_type(dev, obj->cache_level);
-
i915_gem_clflush_object(obj);
-
- if (dev_priv->mm.gtt->needs_dmar) {
- BUG_ON(!obj->sg_list);
-
- intel_gtt_insert_sg_entries(obj->sg_list,
- obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
- } else
- intel_gtt_insert_pages(obj->gtt_space->start
- >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- obj->pages,
- agp_type);
+ i915_gem_gtt_rebind_object(obj, obj->cache_level);
}
intel_gtt_chipset_flush();
@@ -110,6 +94,27 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
return 0;
}
+void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
+
+ if (dev_priv->mm.gtt->needs_dmar) {
+ BUG_ON(!obj->sg_list);
+
+ intel_gtt_insert_sg_entries(obj->sg_list,
+ obj->num_sg,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ agp_type);
+ } else
+ intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT,
+ obj->pages,
+ agp_type);
+}
+
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3b03f85..9cbb0cd 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -306,12 +306,15 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
+ mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->hot_plug)
encoder->hot_plug(encoder);
+ mutex_unlock(&mode_config->mutex);
+
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
}
@@ -361,10 +364,12 @@ static void notify_ring(struct drm_device *dev,
ring->irq_seqno = seqno;
wake_up_all(&ring->irq_queue);
-
- dev_priv->hangcheck_count = 0;
- mod_timer(&dev_priv->hangcheck_timer,
- jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ if (i915_enable_hangcheck) {
+ dev_priv->hangcheck_count = 0;
+ mod_timer(&dev_priv->hangcheck_timer,
+ jiffies +
+ msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ }
}
static void gen6_pm_rps_work(struct work_struct *work)
@@ -1664,6 +1669,9 @@ void i915_hangcheck_elapsed(unsigned long data)
uint32_t acthd, instdone, instdone1;
bool err = false;
+ if (!i915_enable_hangcheck)
+ return;
+
/* If all work is done then ACTHD clearly hasn't advanced. */
if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
@@ -2050,8 +2058,10 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
}
-
- dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
+ else
+ dev->driver->get_vblank_timestamp = NULL;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
if (IS_IVYBRIDGE(dev)) {
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5d5def7..542453f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -78,6 +78,14 @@
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
+#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
+#define GEN6_MBC_SNPCR_SHIFT 21
+#define GEN6_MBC_SNPCR_MASK (3<<21)
+#define GEN6_MBC_SNPCR_MAX (0<<21)
+#define GEN6_MBC_SNPCR_MED (1<<21)
+#define GEN6_MBC_SNPCR_LOW (2<<21)
+#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
+
#define GEN6_GDRST 0x941c
#define GEN6_GRDOM_FULL (1 << 0)
#define GEN6_GRDOM_RENDER (1 << 1)
@@ -367,6 +375,7 @@
# define MI_FLUSH_ENABLE (1 << 11)
#define GFX_MODE 0x02520
+#define GFX_MODE_GEN7 0x0229c
#define GFX_RUN_LIST_ENABLE (1<<15)
#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
#define GFX_SURFACE_FAULT_ENABLE (1<<12)
@@ -374,6 +383,9 @@
#define GFX_PSMI_GRANULARITY (1<<10)
#define GFX_PPGTT_ENABLE (1<<9)
+#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
+#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
+
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
@@ -579,6 +591,7 @@
#define DPFC_CTL_PLANEA (0<<30)
#define DPFC_CTL_PLANEB (1<<30)
#define DPFC_CTL_FENCE_EN (1<<29)
+#define DPFC_CTL_PERSISTENT_MODE (1<<25)
#define DPFC_SR_EN (1<<10)
#define DPFC_CTL_LIMIT_1X (0<<6)
#define DPFC_CTL_LIMIT_2X (1<<6)
@@ -1309,6 +1322,7 @@
#define ADPA_PIPE_SELECT_MASK (1<<30)
#define ADPA_PIPE_A_SELECT 0
#define ADPA_PIPE_B_SELECT (1<<30)
+#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
#define ADPA_USE_VGA_HVPOLARITY (1<<15)
#define ADPA_SETS_HVPOLARITY 0
#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
@@ -1451,6 +1465,7 @@
/* Selects pipe B for LVDS data. Must be set on pre-965. */
#define LVDS_PIPEB_SELECT (1 << 30)
#define LVDS_PIPE_MASK (1 << 30)
+#define LVDS_PIPE(pipe) ((pipe) << 30)
/* LVDS dithering flag on 965/g4x platform */
#define LVDS_ENABLE_DITHER (1 << 25)
/* LVDS sync polarity flags. Set to invert (i.e. negative) */
@@ -1490,9 +1505,6 @@
#define LVDS_B0B3_POWER_DOWN (0 << 2)
#define LVDS_B0B3_POWER_UP (3 << 2)
-#define LVDS_PIPE_ENABLED(V, P) \
- (((V) & (LVDS_PIPE_MASK | LVDS_PORT_EN)) == ((P) << 30 | LVDS_PORT_EN))
-
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
#define VIDEO_DIP_CTL 0x61170
@@ -1505,6 +1517,7 @@
#define VIDEO_DIP_SELECT_AVI (0 << 19)
#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
#define VIDEO_DIP_SELECT_SPD (3 << 19)
+#define VIDEO_DIP_SELECT_MASK (3 << 19)
#define VIDEO_DIP_FREQ_ONCE (0 << 16)
#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
@@ -2083,9 +2096,6 @@
#define DP_PIPEB_SELECT (1 << 30)
#define DP_PIPE_MASK (1 << 30)
-#define DP_PIPE_ENABLED(V, P) \
- (((V) & (DP_PIPE_MASK | DP_PORT_EN)) == ((P) << 30 | DP_PORT_EN))
-
/* Link training mode - select a suitable mode for each stage */
#define DP_LINK_TRAIN_PAT_1 (0 << 28)
#define DP_LINK_TRAIN_PAT_2 (1 << 28)
@@ -3023,6 +3033,20 @@
#define _TRANSA_DP_LINK_M2 0xe0048
#define _TRANSA_DP_LINK_N2 0xe004c
+/* Per-transcoder DIP controls */
+
+#define _VIDEO_DIP_CTL_A 0xe0200
+#define _VIDEO_DIP_DATA_A 0xe0208
+#define _VIDEO_DIP_GCP_A 0xe0210
+
+#define _VIDEO_DIP_CTL_B 0xe1200
+#define _VIDEO_DIP_DATA_B 0xe1208
+#define _VIDEO_DIP_GCP_B 0xe1210
+
+#define TVIDEO_DIP_CTL(pipe) _PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
+#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
+#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+
#define _TRANS_HTOTAL_B 0xe1000
#define _TRANS_HBLANK_B 0xe1004
#define _TRANS_HSYNC_B 0xe1008
@@ -3075,6 +3099,16 @@
#define TRANS_6BPC (2<<5)
#define TRANS_12BPC (3<<5)
+#define _TRANSA_CHICKEN2 0xf0064
+#define _TRANSB_CHICKEN2 0xf1064
+#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
+#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31)
+
+#define SOUTH_CHICKEN1 0xc2000
+#define FDIA_PHASE_SYNC_SHIFT_OVR 19
+#define FDIA_PHASE_SYNC_SHIFT_EN 18
+#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
#define SOUTH_CHICKEN2 0xc2004
#define DPLS_EDP_PPS_FIX_DIS (1<<0)
@@ -3225,14 +3259,12 @@
#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
-#define ADPA_PIPE_ENABLED(V, P) \
- (((V) & (ADPA_TRANS_SELECT_MASK | ADPA_DAC_ENABLE)) == ((P) << 30 | ADPA_DAC_ENABLE))
-
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
#define TRANSCODER_A (0)
#define TRANSCODER_B (1 << 30)
+#define TRANSCODER(pipe) ((pipe) << 30)
#define TRANSCODER_MASK (1 << 30)
#define COLOR_FORMAT_8bpc (0)
#define COLOR_FORMAT_12bpc (3 << 26)
@@ -3249,9 +3281,6 @@
#define HSYNC_ACTIVE_HIGH (1 << 3)
#define PORT_DETECTED (1 << 2)
-#define HDMI_PIPE_ENABLED(V, P) \
- (((V) & (TRANSCODER_MASK | PORT_ENABLE)) == ((P) << 30 | PORT_ENABLE))
-
/* PCH SDVOB multiplex with HDMIB */
#define PCH_SDVOB HDMIB
@@ -3318,6 +3347,7 @@
#define PORT_TRANS_B_SEL_CPT (1<<29)
#define PORT_TRANS_C_SEL_CPT (2<<29)
#define PORT_TRANS_SEL_MASK (3<<29)
+#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
@@ -3360,6 +3390,7 @@
#define FORCEWAKE_ACK 0x130090
#define GT_FIFO_FREE_ENTRIES 0x120008
+#define GT_FIFO_NUM_RESERVED_ENTRIES 20
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
@@ -3434,7 +3465,9 @@
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
#define GEN6_READ_OC_PARAMS 0xc
-#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9
+#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
+#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_DATA 0x138128
+#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 5257cfc..f107423 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -760,15 +760,13 @@ static void i915_restore_display(struct drm_device *dev)
/* FIXME: restore TV & SDVO state */
/* only restore FBC info on the platform that supports FBC*/
+ intel_disable_fbc(dev);
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
- ironlake_disable_fbc(dev);
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
} else if (IS_GM45(dev)) {
- g4x_disable_fbc(dev);
I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
} else {
- i8xx_disable_fbc(dev);
I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
@@ -814,6 +812,7 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
dev_priv->saveMCHBAR_RENDER_STANDBY =
I915_READ(RSTDBYCTL);
+ dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
} else {
dev_priv->saveIER = I915_READ(IER);
dev_priv->saveIMR = I915_READ(IMR);
@@ -865,21 +864,25 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(GTIMR, dev_priv->saveGTIMR);
I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
+ I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG);
} else {
I915_WRITE(IER, dev_priv->saveIER);
I915_WRITE(IMR, dev_priv->saveIMR);
}
mutex_unlock(&dev->struct_mutex);
- intel_init_clock_gating(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ intel_init_clock_gating(dev);
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
intel_init_emon(dev);
}
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev)) {
gen6_enable_rps(dev_priv);
+ gen6_update_ring_freq(dev_priv);
+ }
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 927442a..61abef8 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -74,7 +74,7 @@ get_blocksize(void *p)
static void
fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
- struct lvds_dvo_timing *dvo_timing)
+ const struct lvds_dvo_timing *dvo_timing)
{
panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
dvo_timing->hactive_lo;
@@ -115,20 +115,75 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
drm_mode_set_name(panel_fixed_mode);
}
+static bool
+lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a,
+ const struct lvds_dvo_timing *b)
+{
+ if (a->hactive_hi != b->hactive_hi ||
+ a->hactive_lo != b->hactive_lo)
+ return false;
+
+ if (a->hsync_off_hi != b->hsync_off_hi ||
+ a->hsync_off_lo != b->hsync_off_lo)
+ return false;
+
+ if (a->hsync_pulse_width != b->hsync_pulse_width)
+ return false;
+
+ if (a->hblank_hi != b->hblank_hi ||
+ a->hblank_lo != b->hblank_lo)
+ return false;
+
+ if (a->vactive_hi != b->vactive_hi ||
+ a->vactive_lo != b->vactive_lo)
+ return false;
+
+ if (a->vsync_off != b->vsync_off)
+ return false;
+
+ if (a->vsync_pulse_width != b->vsync_pulse_width)
+ return false;
+
+ if (a->vblank_hi != b->vblank_hi ||
+ a->vblank_lo != b->vblank_lo)
+ return false;
+
+ return true;
+}
+
+static const struct lvds_dvo_timing *
+get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
+ int index)
+{
+ /*
+ * the size of fp_timing varies on the different platform.
+ * So calculate the DVO timing relative offset in LVDS data
+ * entry to get the DVO timing entry
+ */
+
+ int lfp_data_size =
+ lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
+ lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
+ int dvo_timing_offset =
+ lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
+ lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
+ char *entry = (char *)lvds_lfp_data->data + lfp_data_size * index;
+
+ return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
+}
+
/* Try to find integrated panel data */
static void
parse_lfp_panel_data(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
- struct bdb_lvds_options *lvds_options;
- struct bdb_lvds_lfp_data *lvds_lfp_data;
- struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
- struct bdb_lvds_lfp_data_entry *entry;
- struct lvds_dvo_timing *dvo_timing;
+ const struct bdb_lvds_options *lvds_options;
+ const struct bdb_lvds_lfp_data *lvds_lfp_data;
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
+ const struct lvds_dvo_timing *panel_dvo_timing;
struct drm_display_mode *panel_fixed_mode;
- int lfp_data_size, dvo_timing_offset;
- int i, temp_downclock;
- struct drm_display_mode *temp_mode;
+ int i, downclock;
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
if (!lvds_options)
@@ -150,75 +205,44 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
dev_priv->lvds_vbt = 1;
- lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
- lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
- entry = (struct bdb_lvds_lfp_data_entry *)
- ((uint8_t *)lvds_lfp_data->data + (lfp_data_size *
- lvds_options->panel_type));
- dvo_timing_offset = lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
- lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
-
- /*
- * the size of fp_timing varies on the different platform.
- * So calculate the DVO timing relative offset in LVDS data
- * entry to get the DVO timing entry
- */
- dvo_timing = (struct lvds_dvo_timing *)
- ((unsigned char *)entry + dvo_timing_offset);
+ panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ lvds_options->panel_type);
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
if (!panel_fixed_mode)
return;
- fill_detail_timing_data(panel_fixed_mode, dvo_timing);
+ fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
- temp_mode = kzalloc(sizeof(*temp_mode), GFP_KERNEL);
- temp_downclock = panel_fixed_mode->clock;
/*
- * enumerate the LVDS panel timing info entry in VBT to check whether
- * the LVDS downclock is found.
+ * Iterate over the LVDS panel timing info to find the lowest clock
+ * for the native resolution.
*/
+ downclock = panel_dvo_timing->clock;
for (i = 0; i < 16; i++) {
- entry = (struct bdb_lvds_lfp_data_entry *)
- ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * i));
- dvo_timing = (struct lvds_dvo_timing *)
- ((unsigned char *)entry + dvo_timing_offset);
-
- fill_detail_timing_data(temp_mode, dvo_timing);
-
- if (temp_mode->hdisplay == panel_fixed_mode->hdisplay &&
- temp_mode->hsync_start == panel_fixed_mode->hsync_start &&
- temp_mode->hsync_end == panel_fixed_mode->hsync_end &&
- temp_mode->htotal == panel_fixed_mode->htotal &&
- temp_mode->vdisplay == panel_fixed_mode->vdisplay &&
- temp_mode->vsync_start == panel_fixed_mode->vsync_start &&
- temp_mode->vsync_end == panel_fixed_mode->vsync_end &&
- temp_mode->vtotal == panel_fixed_mode->vtotal &&
- temp_mode->clock < temp_downclock) {
- /*
- * downclock is already found. But we expect
- * to find the lower downclock.
- */
- temp_downclock = temp_mode->clock;
- }
- /* clear it to zero */
- memset(temp_mode, 0, sizeof(*temp_mode));
+ const struct lvds_dvo_timing *dvo_timing;
+
+ dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ i);
+ if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) &&
+ dvo_timing->clock < downclock)
+ downclock = dvo_timing->clock;
}
- kfree(temp_mode);
- if (temp_downclock < panel_fixed_mode->clock &&
- i915_lvds_downclock) {
+
+ if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) {
dev_priv->lvds_downclock_avail = 1;
- dev_priv->lvds_downclock = temp_downclock;
+ dev_priv->lvds_downclock = downclock * 10;
DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
"Normal Clock %dKHz, downclock %dKHz\n",
- temp_downclock, panel_fixed_mode->clock);
+ panel_fixed_mode->clock, 10*downclock);
}
- return;
}
/* Try to find sdvo panel data */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 0f1c799..56a8554 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,6 +24,7 @@
* Eric Anholt <eric@anholt.net>
*/
+#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/i2c.h>
@@ -877,7 +878,7 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
int pp_reg, lvds_reg;
u32 val;
enum pipe panel_pipe = PIPE_A;
- bool locked = locked;
+ bool locked = true;
if (HAS_PCH_SPLIT(dev_priv->dev)) {
pp_reg = PCH_PP_CONTROL;
@@ -979,11 +980,76 @@ static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
pipe_name(pipe));
}
+static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 port_sel, u32 val)
+{
+ if ((val & DP_PORT_EN) == 0)
+ return false;
+
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
+ u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
+ if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
+ return false;
+ } else {
+ if ((val & DP_PIPE_MASK) != (pipe << 30))
+ return false;
+ }
+ return true;
+}
+
+static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 val)
+{
+ if ((val & PORT_ENABLE) == 0)
+ return false;
+
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+ return false;
+ } else {
+ if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
+ return false;
+ }
+ return true;
+}
+
+static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 val)
+{
+ if ((val & LVDS_PORT_EN) == 0)
+ return false;
+
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+ return false;
+ } else {
+ if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
+ return false;
+ }
+ return true;
+}
+
+static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 val)
+{
+ if ((val & ADPA_DAC_ENABLE) == 0)
+ return false;
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+ return false;
+ } else {
+ if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
+ return false;
+ }
+ return true;
+}
+
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, int reg)
+ enum pipe pipe, int reg, u32 port_sel)
{
u32 val = I915_READ(reg);
- WARN(DP_PIPE_ENABLED(val, pipe),
+ WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
}
@@ -992,7 +1058,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
- WARN(HDMI_PIPE_ENABLED(val, pipe),
+ WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
}
@@ -1003,19 +1069,19 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
int reg;
u32 val;
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B);
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C);
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
reg = PCH_ADPA;
val = I915_READ(reg);
- WARN(ADPA_PIPE_ENABLED(val, pipe),
+ WARN(adpa_pipe_enabled(dev_priv, val, pipe),
"PCH VGA enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
reg = PCH_LVDS;
val = I915_READ(reg);
- WARN(LVDS_PIPE_ENABLED(val, pipe),
+ WARN(lvds_pipe_enabled(dev_priv, val, pipe),
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
@@ -1157,12 +1223,15 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
reg = TRANSCONF(pipe);
val = I915_READ(reg);
- /*
- * make the BPC in transcoder be consistent with
- * that in pipeconf reg.
- */
- val &= ~PIPE_BPC_MASK;
- val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ /*
+ * make the BPC in transcoder be consistent with
+ * that in pipeconf reg.
+ */
+ val &= ~PIPE_BPC_MASK;
+ val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
+ }
I915_WRITE(reg, val | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
@@ -1272,6 +1341,17 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
intel_wait_for_pipe_off(dev_priv->dev, pipe);
}
+/*
+ * Plane regs are double buffered, going from enabled->disabled needs a
+ * trigger in order to latch. The display address reg provides this.
+ */
+static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+ enum plane plane)
+{
+ I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
+ I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
+}
+
/**
* intel_enable_plane - enable a display plane on a given pipe
* @dev_priv: i915 private structure
@@ -1295,20 +1375,10 @@ static void intel_enable_plane(struct drm_i915_private *dev_priv,
return;
I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev_priv, plane);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
-/*
- * Plane regs are double buffered, going from enabled->disabled needs a
- * trigger in order to latch. The display address reg provides this.
- */
-static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
- enum plane plane)
-{
- u32 reg = DSPADDR(plane);
- I915_WRITE(reg, I915_READ(reg));
-}
-
/**
* intel_disable_plane - disable a display plane
* @dev_priv: i915 private structure
@@ -1334,19 +1404,24 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv,
}
static void disable_pch_dp(struct drm_i915_private *dev_priv,
- enum pipe pipe, int reg)
+ enum pipe pipe, int reg, u32 port_sel)
{
u32 val = I915_READ(reg);
- if (DP_PIPE_ENABLED(val, pipe))
+ if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
+ DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
I915_WRITE(reg, val & ~DP_PORT_EN);
+ }
}
static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
- if (HDMI_PIPE_ENABLED(val, pipe))
+ if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
+ DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
+ reg, pipe);
I915_WRITE(reg, val & ~PORT_ENABLE);
+ }
}
/* Disable any ports connected to this transcoder */
@@ -1358,18 +1433,19 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
val = I915_READ(PCH_PP_CONTROL);
I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
- disable_pch_dp(dev_priv, pipe, PCH_DP_B);
- disable_pch_dp(dev_priv, pipe, PCH_DP_C);
- disable_pch_dp(dev_priv, pipe, PCH_DP_D);
+ disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
+ disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
+ disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
reg = PCH_ADPA;
val = I915_READ(reg);
- if (ADPA_PIPE_ENABLED(val, pipe))
+ if (adpa_pipe_enabled(dev_priv, val, pipe))
I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
reg = PCH_LVDS;
val = I915_READ(reg);
- if (LVDS_PIPE_ENABLED(val, pipe)) {
+ if (lvds_pipe_enabled(dev_priv, val, pipe)) {
+ DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
I915_WRITE(reg, val & ~LVDS_PORT_EN);
POSTING_READ(reg);
udelay(100);
@@ -1380,6 +1456,28 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
disable_pch_hdmi(dev_priv, pipe, HDMID);
}
+static void i8xx_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 fbc_ctl;
+
+ /* Disable compression */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ if ((fbc_ctl & FBC_CTL_EN) == 0)
+ return;
+
+ fbc_ctl &= ~FBC_CTL_EN;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ /* Wait for compressing bit to clear */
+ if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
+ DRM_DEBUG_KMS("FBC idle timed out\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+}
+
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
@@ -1388,36 +1486,25 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int cfb_pitch;
int plane, i;
u32 fbc_ctl, fbc_ctl2;
- if (fb->pitch == dev_priv->cfb_pitch &&
- obj->fence_reg == dev_priv->cfb_fence &&
- intel_crtc->plane == dev_priv->cfb_plane &&
- I915_READ(FBC_CONTROL) & FBC_CTL_EN)
- return;
-
- i8xx_disable_fbc(dev);
-
- dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
-
- if (fb->pitch < dev_priv->cfb_pitch)
- dev_priv->cfb_pitch = fb->pitch;
+ cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+ if (fb->pitch < cfb_pitch)
+ cfb_pitch = fb->pitch;
/* FBC_CTL wants 64B units */
- dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- dev_priv->cfb_fence = obj->fence_reg;
- dev_priv->cfb_plane = intel_crtc->plane;
- plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
+ cfb_pitch = (cfb_pitch / 64) - 1;
+ plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
I915_WRITE(FBC_TAG + (i * 4), 0);
/* Set it up... */
- fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
- if (obj->tiling_mode != I915_TILING_NONE)
- fbc_ctl2 |= FBC_CTL_CPU_FENCE;
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+ fbc_ctl2 |= plane;
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, crtc->y);
@@ -1425,36 +1512,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
- fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+ fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
- if (obj->tiling_mode != I915_TILING_NONE)
- fbc_ctl |= dev_priv->cfb_fence;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
-
- DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
- dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
-}
-
-void i8xx_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 fbc_ctl;
-
- /* Disable compression */
- fbc_ctl = I915_READ(FBC_CONTROL);
- if ((fbc_ctl & FBC_CTL_EN) == 0)
- return;
-
- fbc_ctl &= ~FBC_CTL_EN;
+ fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
- /* Wait for compressing bit to clear */
- if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
- DRM_DEBUG_KMS("FBC idle timed out\n");
- return;
- }
-
- DRM_DEBUG_KMS("disabled FBC\n");
+ DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
+ cfb_pitch, crtc->y, intel_crtc->plane);
}
static bool i8xx_fbc_enabled(struct drm_device *dev)
@@ -1476,30 +1540,9 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
unsigned long stall_watermark = 200;
u32 dpfc_ctl;
- dpfc_ctl = I915_READ(DPFC_CONTROL);
- if (dpfc_ctl & DPFC_CTL_EN) {
- if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
- dev_priv->cfb_fence == obj->fence_reg &&
- dev_priv->cfb_plane == intel_crtc->plane &&
- dev_priv->cfb_y == crtc->y)
- return;
-
- I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- }
-
- dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- dev_priv->cfb_fence = obj->fence_reg;
- dev_priv->cfb_plane = intel_crtc->plane;
- dev_priv->cfb_y = crtc->y;
-
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
- if (obj->tiling_mode != I915_TILING_NONE) {
- dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
- I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
- } else {
- I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
- }
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
+ I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
@@ -1512,7 +1555,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
-void g4x_disable_fbc(struct drm_device *dev)
+static void g4x_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
@@ -1567,32 +1610,12 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
u32 dpfc_ctl;
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
- if (dpfc_ctl & DPFC_CTL_EN) {
- if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
- dev_priv->cfb_fence == obj->fence_reg &&
- dev_priv->cfb_plane == intel_crtc->plane &&
- dev_priv->cfb_offset == obj->gtt_offset &&
- dev_priv->cfb_y == crtc->y)
- return;
-
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- }
-
- dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- dev_priv->cfb_fence = obj->fence_reg;
- dev_priv->cfb_plane = intel_crtc->plane;
- dev_priv->cfb_offset = obj->gtt_offset;
- dev_priv->cfb_y = crtc->y;
-
dpfc_ctl &= DPFC_RESERVED;
dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
- if (obj->tiling_mode != I915_TILING_NONE) {
- dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
- I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
- } else {
- I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
- }
+ /* Set persistent mode for front-buffer rendering, ala X. */
+ dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
+ dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
+ I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
@@ -1604,7 +1627,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
if (IS_GEN6(dev)) {
I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
+ SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
sandybridge_blit_fbc_update(dev);
}
@@ -1612,7 +1635,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
-void ironlake_disable_fbc(struct drm_device *dev)
+static void ironlake_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
@@ -1644,24 +1667,109 @@ bool intel_fbc_enabled(struct drm_device *dev)
return dev_priv->display.fbc_enabled(dev);
}
-void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void intel_fbc_work_fn(struct work_struct *__work)
+{
+ struct intel_fbc_work *work =
+ container_of(to_delayed_work(__work),
+ struct intel_fbc_work, work);
+ struct drm_device *dev = work->crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ if (work == dev_priv->fbc_work) {
+ /* Double check that we haven't switched fb without cancelling
+ * the prior work.
+ */
+ if (work->crtc->fb == work->fb) {
+ dev_priv->display.enable_fbc(work->crtc,
+ work->interval);
+
+ dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
+ dev_priv->cfb_fb = work->crtc->fb->base.id;
+ dev_priv->cfb_y = work->crtc->y;
+ }
+
+ dev_priv->fbc_work = NULL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ kfree(work);
+}
+
+static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->fbc_work == NULL)
+ return;
+
+ DRM_DEBUG_KMS("cancelling pending FBC enable\n");
+
+ /* Synchronisation is provided by struct_mutex and checking of
+ * dev_priv->fbc_work, so we can perform the cancellation
+ * entirely asynchronously.
+ */
+ if (cancel_delayed_work(&dev_priv->fbc_work->work))
+ /* tasklet was killed before being run, clean up */
+ kfree(dev_priv->fbc_work);
+
+ /* Mark the work as no longer wanted so that if it does
+ * wake-up (because the work was already running and waiting
+ * for our mutex), it will discover that is no longer
+ * necessary to run.
+ */
+ dev_priv->fbc_work = NULL;
+}
+
+static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_fbc_work *work;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev_priv->display.enable_fbc)
return;
- dev_priv->display.enable_fbc(crtc, interval);
+ intel_cancel_fbc_work(dev_priv);
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL) {
+ dev_priv->display.enable_fbc(crtc, interval);
+ return;
+ }
+
+ work->crtc = crtc;
+ work->fb = crtc->fb;
+ work->interval = interval;
+ INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
+
+ dev_priv->fbc_work = work;
+
+ DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
+
+ /* Delay the actual enabling to let pageflipping cease and the
+ * display to settle before starting the compression. Note that
+ * this delay also serves a second purpose: it allows for a
+ * vblank to pass after disabling the FBC before we attempt
+ * to modify the control registers.
+ *
+ * A more complicated solution would involve tracking vblanks
+ * following the termination of the page-flipping sequence
+ * and indeed performing the enable as a co-routine and not
+ * waiting synchronously upon the vblank.
+ */
+ schedule_delayed_work(&work->work, msecs_to_jiffies(50));
}
void intel_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ intel_cancel_fbc_work(dev_priv);
+
if (!dev_priv->display.disable_fbc)
return;
dev_priv->display.disable_fbc(dev);
+ dev_priv->cfb_plane = -1;
}
/**
@@ -1760,8 +1868,13 @@ static void intel_update_fbc(struct drm_device *dev)
dev_priv->no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
}
- if (obj->tiling_mode != I915_TILING_X) {
- DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
+
+ /* The use of a CPU fence is mandatory in order to detect writes
+ * by the CPU to the scanout and trigger updates to the FBC.
+ */
+ if (obj->tiling_mode != I915_TILING_X ||
+ obj->fence_reg == I915_FENCE_REG_NONE) {
+ DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
dev_priv->no_fbc_reason = FBC_NOT_TILED;
goto out_disable;
}
@@ -1770,6 +1883,44 @@ static void intel_update_fbc(struct drm_device *dev)
if (in_dbg_master())
goto out_disable;
+ /* If the scanout has not changed, don't modify the FBC settings.
+ * Note that we make the fundamental assumption that the fb->obj
+ * cannot be unpinned (and have its GTT offset and fence revoked)
+ * without first being decoupled from the scanout and FBC disabled.
+ */
+ if (dev_priv->cfb_plane == intel_crtc->plane &&
+ dev_priv->cfb_fb == fb->base.id &&
+ dev_priv->cfb_y == crtc->y)
+ return;
+
+ if (intel_fbc_enabled(dev)) {
+ /* We update FBC along two paths, after changing fb/crtc
+ * configuration (modeswitching) and after page-flipping
+ * finishes. For the latter, we know that not only did
+ * we disable the FBC at the start of the page-flip
+ * sequence, but also more than one vblank has passed.
+ *
+ * For the former case of modeswitching, it is possible
+ * to switch between two FBC valid configurations
+ * instantaneously so we do need to disable the FBC
+ * before we can modify its control registers. We also
+ * have to wait for the next vblank for that to take
+ * effect. However, since we delay enabling FBC we can
+ * assume that a vblank has passed since disabling and
+ * that we can safely alter the registers in the deferred
+ * callback.
+ *
+ * In the scenario that we go from a valid to invalid
+ * and then back to valid FBC configuration we have
+ * no strict enforcement that a vblank occurred since
+ * disabling the FBC. However, along all current pipe
+ * disabling paths we do need to wait for a vblank at
+ * some point. And we wait before enabling FBC anyway.
+ */
+ DRM_DEBUG_KMS("disabling active FBC for update\n");
+ intel_disable_fbc(dev);
+ }
+
intel_enable_fbc(crtc, 500);
return;
@@ -1812,14 +1963,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
}
dev_priv->mm.interruptible = false;
- ret = i915_gem_object_pin(obj, alignment, true);
+ ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
if (ret)
goto err_interruptible;
- ret = i915_gem_object_set_to_display_plane(obj, pipelined);
- if (ret)
- goto err_unpin;
-
/* Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
* framebuffer compression. For simplicity, we always install
@@ -1841,10 +1988,8 @@ err_interruptible:
return ret;
}
-/* Assume fb object is pinned & idle & fenced and just update base pointers */
-static int
-intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int x, int y, enum mode_set_atomic state)
+static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1887,7 +2032,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
default:
- DRM_ERROR("Unknown color depth\n");
+ DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 4) {
@@ -1897,10 +2042,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
dspcntr &= ~DISPPLANE_TILED;
}
- if (HAS_PCH_SPLIT(dev))
- /* must disable */
- dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
-
I915_WRITE(reg, dspcntr);
Start = obj->gtt_offset;
@@ -1917,6 +2058,99 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
I915_WRITE(DSPADDR(plane), Start + Offset);
POSTING_READ(reg);
+ return 0;
+}
+
+static int ironlake_update_plane(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int plane = intel_crtc->plane;
+ unsigned long Start, Offset;
+ u32 dspcntr;
+ u32 reg;
+
+ switch (plane) {
+ case 0:
+ case 1:
+ break;
+ default:
+ DRM_ERROR("Can't update plane %d in SAREA\n", plane);
+ return -EINVAL;
+ }
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ reg = DSPCNTR(plane);
+ dspcntr = I915_READ(reg);
+ /* Mask out pixel format bits in case we change it */
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+ switch (fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (fb->depth != 16)
+ return -EINVAL;
+
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ if (fb->depth == 24)
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ else if (fb->depth == 30)
+ dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
+ else
+ return -EINVAL;
+ break;
+ default:
+ DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
+
+ /* must disable */
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+ I915_WRITE(reg, dspcntr);
+
+ Start = obj->gtt_offset;
+ Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
+
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ Start, Offset, x, y, fb->pitch);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitch);
+ I915_WRITE(DSPSURF(plane), Start);
+ I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE(DSPADDR(plane), Offset);
+ POSTING_READ(reg);
+
+ return 0;
+}
+
+/* Assume fb object is pinned & idle & fenced and just update base pointers */
+static int
+intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = dev_priv->display.update_plane(crtc, fb, x, y);
+ if (ret)
+ return ret;
+
intel_update_fbc(dev);
intel_increase_pllclock(crtc);
@@ -1934,7 +2168,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
/* no fb bound */
if (!crtc->fb) {
- DRM_DEBUG_KMS("No FB bound\n");
+ DRM_ERROR("No FB bound\n");
return 0;
}
@@ -1943,6 +2177,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
case 1:
break;
default:
+ DRM_ERROR("no plane for crtc\n");
return -EINVAL;
}
@@ -1952,6 +2187,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
NULL);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR("pin & fence failed\n");
return ret;
}
@@ -1971,7 +2207,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
- ret = i915_gem_object_flush_gpu(obj);
+ ret = i915_gem_object_finish_gpu(obj);
(void) ret;
}
@@ -1980,6 +2216,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (ret) {
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR("failed to update base address\n");
return ret;
}
@@ -2086,6 +2323,18 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
FDI_FE_ERRC_ENABLE);
}
+static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 flags = I915_READ(SOUTH_CHICKEN1);
+
+ flags |= FDI_PHASE_SYNC_OVR(pipe);
+ I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
+ flags |= FDI_PHASE_SYNC_EN(pipe);
+ I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
+ POSTING_READ(SOUTH_CHICKEN1);
+}
+
/* The FDI link training functions for ILK/Ibexpeak. */
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
{
@@ -2236,6 +2485,9 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
+ if (HAS_PCH_CPT(dev))
+ cpt_phase_pointer_enable(dev, pipe);
+
for (i = 0; i < 4; i++ ) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
@@ -2352,6 +2604,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
+ if (HAS_PCH_CPT(dev))
+ cpt_phase_pointer_enable(dev, pipe);
+
for (i = 0; i < 4; i++ ) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
@@ -2461,6 +2716,17 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
}
}
+static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 flags = I915_READ(SOUTH_CHICKEN1);
+
+ flags &= ~(FDI_PHASE_SYNC_EN(pipe));
+ I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
+ flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
+ I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
+ POSTING_READ(SOUTH_CHICKEN1);
+}
static void ironlake_fdi_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -2490,6 +2756,8 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
I915_WRITE(FDI_RX_CHICKEN(pipe),
I915_READ(FDI_RX_CHICKEN(pipe) &
~FDI_RX_PHASE_SYNC_POINTER_EN));
+ } else if (HAS_PCH_CPT(dev)) {
+ cpt_phase_pointer_disable(dev, pipe);
}
/* still set train pattern 1 */
@@ -2622,6 +2890,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
@@ -2629,7 +2898,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
TRANS_DP_BPC_MASK);
temp |= (TRANS_DP_OUTPUT_ENABLE |
TRANS_DP_ENH_FRAMING);
- temp |= TRANS_DP_8BPC;
+ temp |= bpc << 9; /* same format but at 11:9 */
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
@@ -2699,14 +2968,18 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
}
+ /*
+ * On ILK+ LUT must be loaded before the pipe is running but with
+ * clocks enabled
+ */
+ intel_crtc_load_lut(crtc);
+
intel_enable_pipe(dev_priv, pipe, is_pch_port);
intel_enable_plane(dev_priv, plane, pipe);
if (is_pch_port)
ironlake_pch_enable(crtc);
- intel_crtc_load_lut(crtc);
-
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
@@ -2732,9 +3005,8 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_disable_plane(dev_priv, plane, pipe);
- if (dev_priv->cfb_plane == plane &&
- dev_priv->display.disable_fbc)
- dev_priv->display.disable_fbc(dev);
+ if (dev_priv->cfb_plane == plane)
+ intel_disable_fbc(dev);
intel_disable_pipe(dev_priv, pipe);
@@ -2898,9 +3170,8 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_crtc_dpms_overlay(intel_crtc, false);
intel_crtc_update_cursor(crtc, false);
- if (dev_priv->cfb_plane == plane &&
- dev_priv->display.disable_fbc)
- dev_priv->display.disable_fbc(dev);
+ if (dev_priv->cfb_plane == plane)
+ intel_disable_fbc(dev);
intel_disable_plane(dev_priv, plane, pipe);
intel_disable_pipe(dev_priv, pipe);
@@ -4309,6 +4580,135 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
+/**
+ * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
+ * @crtc: CRTC structure
+ *
+ * A pipe may be connected to one or more outputs. Based on the depth of the
+ * attached framebuffer, choose a good color depth to use on the pipe.
+ *
+ * If possible, match the pipe depth to the fb depth. In some cases, this
+ * isn't ideal, because the connected output supports a lesser or restricted
+ * set of depths. Resolve that here:
+ * LVDS typically supports only 6bpc, so clamp down in that case
+ * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
+ * Displays may support a restricted set as well, check EDID and clamp as
+ * appropriate.
+ *
+ * RETURNS:
+ * Dithering requirement (i.e. false if display bpc and pipe bpc match,
+ * true if they don't match).
+ */
+static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
+ unsigned int *pipe_bpp)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ unsigned int display_bpc = UINT_MAX, bpc;
+
+ /* Walk the encoders & connectors on this crtc, get min bpc */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
+ unsigned int lvds_bpc;
+
+ if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
+ LVDS_A3_POWER_UP)
+ lvds_bpc = 8;
+ else
+ lvds_bpc = 6;
+
+ if (lvds_bpc < display_bpc) {
+ DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
+ display_bpc = lvds_bpc;
+ }
+ continue;
+ }
+
+ if (intel_encoder->type == INTEL_OUTPUT_EDP) {
+ /* Use VBT settings if we have an eDP panel */
+ unsigned int edp_bpc = dev_priv->edp.bpp / 3;
+
+ if (edp_bpc < display_bpc) {
+ DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
+ display_bpc = edp_bpc;
+ }
+ continue;
+ }
+
+ /* Not one of the known troublemakers, check the EDID */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ /* Don't use an invalid EDID bpc value */
+ if (connector->display_info.bpc &&
+ connector->display_info.bpc < display_bpc) {
+ DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
+ display_bpc = connector->display_info.bpc;
+ }
+ }
+
+ /*
+ * HDMI is either 12 or 8, so if the display lets 10bpc sneak
+ * through, clamp it down. (Note: >12bpc will be caught below.)
+ */
+ if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
+ if (display_bpc > 8 && display_bpc < 12) {
+ DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n");
+ display_bpc = 12;
+ } else {
+ DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n");
+ display_bpc = 8;
+ }
+ }
+ }
+
+ /*
+ * We could just drive the pipe at the highest bpc all the time and
+ * enable dithering as needed, but that costs bandwidth. So choose
+ * the minimum value that expresses the full color range of the fb but
+ * also stays within the max display bpc discovered above.
+ */
+
+ switch (crtc->fb->depth) {
+ case 8:
+ bpc = 8; /* since we go through a colormap */
+ break;
+ case 15:
+ case 16:
+ bpc = 6; /* min is 18bpp */
+ break;
+ case 24:
+ bpc = min((unsigned int)8, display_bpc);
+ break;
+ case 30:
+ bpc = min((unsigned int)10, display_bpc);
+ break;
+ case 48:
+ bpc = min((unsigned int)12, display_bpc);
+ break;
+ default:
+ DRM_DEBUG("unsupported depth, assuming 24 bits\n");
+ bpc = min((unsigned int)8, display_bpc);
+ break;
+ }
+
+ DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
+ bpc, display_bpc);
+
+ *pipe_bpp = bpc * 3;
+
+ return display_bpc != bpc;
+}
+
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -4697,6 +5097,81 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
return ret;
}
+static void ironlake_update_pch_refclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_encoder *has_edp_encoder = NULL;
+ u32 temp;
+ bool has_lvds = false;
+
+ /* We need to take the global config into account */
+ list_for_each_entry(crtc, &mode_config->crtc_list, head) {
+ if (!crtc->enabled)
+ continue;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list,
+ base.head) {
+ if (encoder->base.crtc != crtc)
+ continue;
+
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ has_lvds = true;
+ case INTEL_OUTPUT_EDP:
+ has_edp_encoder = encoder;
+ break;
+ }
+ }
+ }
+
+ /* Ironlake: try to setup display ref clock before DPLL
+ * enabling. This is only under driver's control after
+ * PCH B stepping, previous chipset stepping should be
+ * ignoring this setting.
+ */
+ temp = I915_READ(PCH_DREF_CONTROL);
+ /* Always enable nonspread source */
+ temp &= ~DREF_NONSPREAD_SOURCE_MASK;
+ temp |= DREF_NONSPREAD_SOURCE_ENABLE;
+ temp &= ~DREF_SSC_SOURCE_MASK;
+ temp |= DREF_SSC_SOURCE_ENABLE;
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+
+ if (has_edp_encoder) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ temp |= DREF_SSC1_ENABLE;
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+ }
+ temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+ /* Enable CPU source on CPU attached eDP */
+ if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ if (intel_panel_use_ssc(dev_priv))
+ temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+ else
+ temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ } else {
+ /* Enable SSC on PCH eDP if needed */
+ if (intel_panel_use_ssc(dev_priv)) {
+ DRM_ERROR("enabling SSC on PCH\n");
+ temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
+ }
+ }
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+ }
+}
+
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -4721,7 +5196,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
struct fdi_m_n m_n = {0};
u32 temp;
u32 lvds_sync = 0;
- int target_clock, pixel_multiplier, lane, link_bw, bpp, factor;
+ int target_clock, pixel_multiplier, lane, link_bw, factor;
+ unsigned int pipe_bpp;
+ bool dither;
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
@@ -4848,56 +5325,38 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* determine panel color depth */
temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
- if (is_lvds) {
- /* the BPC will be 6 if it is 18-bit LVDS panel */
- if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
- temp |= PIPE_8BPC;
- else
- temp |= PIPE_6BPC;
- } else if (has_edp_encoder) {
- switch (dev_priv->edp.bpp/3) {
- case 8:
- temp |= PIPE_8BPC;
- break;
- case 10:
- temp |= PIPE_10BPC;
- break;
- case 6:
- temp |= PIPE_6BPC;
- break;
- case 12:
- temp |= PIPE_12BPC;
- break;
- }
- } else
- temp |= PIPE_8BPC;
- I915_WRITE(PIPECONF(pipe), temp);
-
- switch (temp & PIPE_BPC_MASK) {
- case PIPE_8BPC:
- bpp = 24;
+ dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
+ switch (pipe_bpp) {
+ case 18:
+ temp |= PIPE_6BPC;
break;
- case PIPE_10BPC:
- bpp = 30;
+ case 24:
+ temp |= PIPE_8BPC;
break;
- case PIPE_6BPC:
- bpp = 18;
+ case 30:
+ temp |= PIPE_10BPC;
break;
- case PIPE_12BPC:
- bpp = 36;
+ case 36:
+ temp |= PIPE_12BPC;
break;
default:
- DRM_ERROR("unknown pipe bpc value\n");
- bpp = 24;
+ WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
+ pipe_bpp);
+ temp |= PIPE_8BPC;
+ pipe_bpp = 24;
+ break;
}
+ intel_crtc->bpp = pipe_bpp;
+ I915_WRITE(PIPECONF(pipe), temp);
+
if (!lane) {
/*
* Account for spread spectrum to avoid
* oversubscribing the link. Max center spread
* is 2.5%; use 5% for safety's sake.
*/
- u32 bps = target_clock * bpp * 21 / 20;
+ u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
lane = bps / (link_bw * 8) + 1;
}
@@ -4905,51 +5364,10 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (pixel_multiplier > 1)
link_bw *= pixel_multiplier;
- ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
-
- /* Ironlake: try to setup display ref clock before DPLL
- * enabling. This is only under driver's control after
- * PCH B stepping, previous chipset stepping should be
- * ignoring this setting.
- */
- temp = I915_READ(PCH_DREF_CONTROL);
- /* Always enable nonspread source */
- temp &= ~DREF_NONSPREAD_SOURCE_MASK;
- temp |= DREF_NONSPREAD_SOURCE_ENABLE;
- temp &= ~DREF_SSC_SOURCE_MASK;
- temp |= DREF_SSC_SOURCE_ENABLE;
- I915_WRITE(PCH_DREF_CONTROL, temp);
-
- POSTING_READ(PCH_DREF_CONTROL);
- udelay(200);
-
- if (has_edp_encoder) {
- if (intel_panel_use_ssc(dev_priv)) {
- temp |= DREF_SSC1_ENABLE;
- I915_WRITE(PCH_DREF_CONTROL, temp);
-
- POSTING_READ(PCH_DREF_CONTROL);
- udelay(200);
- }
- temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+ ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
+ &m_n);
- /* Enable CPU source on CPU attached eDP */
- if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
- if (intel_panel_use_ssc(dev_priv))
- temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
- else
- temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
- } else {
- /* Enable SSC on PCH eDP if needed */
- if (intel_panel_use_ssc(dev_priv)) {
- DRM_ERROR("enabling SSC on PCH\n");
- temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
- }
- }
- I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
- udelay(200);
- }
+ ironlake_update_pch_refclk(dev);
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
@@ -4966,7 +5384,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
} else if (is_sdvo && is_tv)
factor = 20;
- if (clock.m1 < factor * clock.n)
+ if (clock.m < factor * clock.n)
fp |= FP_CB_TUNE;
dpll = 0;
@@ -5108,14 +5526,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(PCH_LVDS, temp);
}
- /* set the dithering flag and clear for anything other than a panel. */
pipeconf &= ~PIPECONF_DITHER_EN;
pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
- if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) {
+ if ((is_lvds && dev_priv->lvds_dither) || dither) {
pipeconf |= PIPECONF_DITHER_EN;
pipeconf |= PIPECONF_DITHER_TYPE_ST1;
}
-
if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
@@ -5246,6 +5662,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
drm_vblank_post_modeset(dev, pipe);
+ intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
+
return ret;
}
@@ -5435,21 +5853,15 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
goto fail_locked;
}
- ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
- if (ret) {
- DRM_ERROR("failed to pin cursor bo\n");
- goto fail_locked;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(obj, 0);
+ ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
if (ret) {
DRM_ERROR("failed to move cursor bo into the GTT\n");
- goto fail_unpin;
+ goto fail_locked;
}
ret = i915_gem_object_put_fence(obj);
if (ret) {
- DRM_ERROR("failed to move cursor bo into the GTT\n");
+ DRM_ERROR("failed to release fence for cursor");
goto fail_unpin;
}
@@ -6152,6 +6564,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
+ intel_update_fbc(work->dev);
mutex_unlock(&work->dev->struct_mutex);
kfree(work);
}
@@ -6516,6 +6929,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (ret)
goto cleanup_pending;
+ intel_disable_fbc(dev);
mutex_unlock(&dev->struct_mutex);
trace_i915_flip_request(intel_crtc->plane, obj);
@@ -6644,6 +7058,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc_reset(&intel_crtc->base);
intel_crtc->active = true; /* force the pipe off on setup_init_config */
+ intel_crtc->bpp = 24; /* default for pre-Ironlake */
if (HAS_PCH_SPLIT(dev)) {
intel_helper_funcs.prepare = ironlake_crtc_prepare;
@@ -6823,8 +7238,6 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_encoder_clones(dev, encoder->clone_mask);
}
- intel_panel_setup_backlight(dev);
-
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
}
@@ -6870,6 +7283,11 @@ int intel_framebuffer_init(struct drm_device *dev,
switch (mode_cmd->bpp) {
case 8:
case 16:
+ /* Only pre-ILK can handle 5:5:5 */
+ if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev))
+ return -EINVAL;
+ break;
+
case 24:
case 32:
break;
@@ -7284,6 +7702,59 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->dev->struct_mutex);
}
+void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
+{
+ int min_freq = 15;
+ int gpu_freq, ia_freq, max_ia_freq;
+ int scaling_factor = 180;
+
+ max_ia_freq = cpufreq_quick_get_max(0);
+ /*
+ * Default to measured freq if none found, PCU will ensure we don't go
+ * over
+ */
+ if (!max_ia_freq)
+ max_ia_freq = tsc_khz;
+
+ /* Convert from kHz to MHz */
+ max_ia_freq /= 1000;
+
+ mutex_lock(&dev_priv->dev->struct_mutex);
+
+ /*
+ * For each potential GPU frequency, load a ring frequency we'd like
+ * to use for memory access. We do this by specifying the IA frequency
+ * the PCU should use as a reference to determine the ring frequency.
+ */
+ for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
+ gpu_freq--) {
+ int diff = dev_priv->max_delay - gpu_freq;
+
+ /*
+ * For GPU frequencies less than 750MHz, just use the lowest
+ * ring freq.
+ */
+ if (gpu_freq < min_freq)
+ ia_freq = 800;
+ else
+ ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
+ ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+
+ I915_WRITE(GEN6_PCODE_DATA,
+ (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
+ gpu_freq);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
+ GEN6_PCODE_READY) == 0, 10)) {
+ DRM_ERROR("pcode write of freq table timed out\n");
+ continue;
+ }
+ }
+
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+}
+
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7389,10 +7860,12 @@ static void gen6_init_clock_gating(struct drm_device *dev)
ILK_DPARB_CLK_GATE |
ILK_DPFD_CLK_GATE);
- for_each_pipe(pipe)
+ for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
}
static void ivybridge_init_clock_gating(struct drm_device *dev)
@@ -7409,10 +7882,12 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
- for_each_pipe(pipe)
+ for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
}
static void g4x_init_clock_gating(struct drm_device *dev)
@@ -7495,6 +7970,7 @@ static void ibx_init_clock_gating(struct drm_device *dev)
static void cpt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
@@ -7504,6 +7980,9 @@ static void cpt_init_clock_gating(struct drm_device *dev)
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
DPLS_EDP_PPS_FIX_DIS);
+ /* Without this, mode sets may fail silently on FDI */
+ for_each_pipe(pipe)
+ I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
}
static void ironlake_teardown_rc6(struct drm_device *dev)
@@ -7640,9 +8119,11 @@ static void intel_init_display(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.dpms = ironlake_crtc_dpms;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+ dev_priv->display.update_plane = ironlake_update_plane;
} else {
dev_priv->display.dpms = i9xx_crtc_dpms;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.update_plane = i9xx_update_plane;
}
if (I915_HAS_FBC(dev)) {
@@ -7851,6 +8332,9 @@ struct intel_quirk intel_quirks[] = {
/* Lenovo U160 cannot use SSC on LVDS */
{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
+
+ /* Sony Vaio Y cannot use SSC on LVDS */
+ { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -7939,8 +8423,10 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_emon(dev);
}
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
gen6_enable_rps(dev_priv);
+ gen6_update_ring_freq(dev_priv);
+ }
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
@@ -7976,12 +8462,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_increase_pllclock(crtc);
}
- if (dev_priv->display.disable_fbc)
- dev_priv->display.disable_fbc(dev);
+ intel_disable_fbc(dev);
if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev) || IS_GEN7(dev))
gen6_disable_rps(dev);
if (IS_IRONLAKE_M(dev))
@@ -7994,6 +8479,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
drm_irq_uninstall(dev);
cancel_work_sync(&dev_priv->hotplug_work);
+ /* flush any delayed tasks or pending work */
+ flush_scheduled_work();
+
/* Shut off idle work before the crtcs get freed. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
intel_crtc = to_intel_crtc(crtc);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index e2aced6..44fef5e 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -50,9 +50,10 @@ struct intel_dp {
bool has_audio;
int force_audio;
uint32_t color_range;
+ int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
- uint8_t dpcd[4];
+ uint8_t dpcd[8];
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
@@ -178,12 +179,14 @@ intel_dp_link_clock(uint8_t link_bw)
static int
intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int bpp = 24;
- if (is_edp(intel_dp))
- return (pixel_clock * dev_priv->edp.bpp + 7) / 8;
- else
- return pixel_clock * 3;
+ if (intel_crtc)
+ bpp = intel_crtc->bpp;
+
+ return (pixel_clock * bpp + 7) / 8;
}
static int
@@ -314,9 +317,17 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
else
precharge = 5;
- if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
- DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
- I915_READ(ch_ctl));
+ /* Try to wait for any previous AUX channel activity */
+ for (try = 0; try < 3; try++) {
+ status = I915_READ(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+
+ if (try == 3) {
+ WARN(1, "dp_aux_ch not started status 0x%08x\n",
+ I915_READ(ch_ctl));
return -EBUSY;
}
@@ -681,7 +692,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_encoder *encoder;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int lane_count = 4, bpp = 24;
+ int lane_count = 4;
struct intel_dp_m_n m_n;
int pipe = intel_crtc->pipe;
@@ -700,7 +711,6 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
break;
} else if (is_edp(intel_dp)) {
lane_count = dev_priv->edp.lanes;
- bpp = dev_priv->edp.bpp;
break;
}
}
@@ -710,7 +720,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
* the number of bytes_per_pixel post-LUT, which we always
* set up for 8-bits of R/G/B, or 3 bytes total.
*/
- intel_dp_compute_m_n(bpp, lane_count,
+ intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
mode->clock, adjusted_mode->clock, &m_n);
if (HAS_PCH_SPLIT(dev)) {
@@ -769,6 +779,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
intel_dp->link_configuration[0] = intel_dp->link_bw;
intel_dp->link_configuration[1] = intel_dp->lane_count;
+ intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
/*
* Check for DPCD version > 1.1 and enhanced framing support
@@ -1010,6 +1021,8 @@ static void intel_dp_commit(struct drm_encoder *encoder)
if (is_edp(intel_dp))
ironlake_edp_backlight_on(dev);
+
+ intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
}
static void
@@ -1044,6 +1057,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
if (is_edp(intel_dp))
ironlake_edp_backlight_on(dev);
}
+ intel_dp->dpms_mode = mode;
}
/*
@@ -1333,10 +1347,16 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
u32 reg;
uint32_t DP = intel_dp->DP;
- /* Enable output, wait for it to become active */
- I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- POSTING_READ(intel_dp->output_reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ /*
+ * On CPT we have to enable the port in training pattern 1, which
+ * will happen below in intel_dp_set_link_train. Otherwise, enable
+ * the port and wait for it to become active.
+ */
+ if (!HAS_PCH_CPT(dev)) {
+ I915_WRITE(intel_dp->output_reg, intel_dp->DP);
+ POSTING_READ(intel_dp->output_reg);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ }
/* Write the link configuration data */
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
@@ -1369,7 +1389,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
reg = DP | DP_LINK_TRAIN_PAT_1;
if (!intel_dp_set_link_train(intel_dp, reg,
- DP_TRAINING_PATTERN_1))
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE))
break;
/* Set training pattern 1 */
@@ -1444,7 +1465,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
/* channel eq pattern */
if (!intel_dp_set_link_train(intel_dp, reg,
- DP_TRAINING_PATTERN_2))
+ DP_TRAINING_PATTERN_2 |
+ DP_LINK_SCRAMBLING_DISABLE))
break;
udelay(400);
@@ -1558,6 +1580,18 @@ intel_dp_link_down(struct intel_dp *intel_dp)
POSTING_READ(intel_dp->output_reg);
}
+static bool
+intel_dp_get_dpcd(struct intel_dp *intel_dp)
+{
+ if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
+ sizeof (intel_dp->dpcd)) &&
+ (intel_dp->dpcd[DP_DPCD_REV] != 0)) {
+ return true;
+ }
+
+ return false;
+}
+
/*
* According to DP spec
* 5.1.2:
@@ -1570,36 +1604,44 @@ intel_dp_link_down(struct intel_dp *intel_dp)
static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
- int ret;
+ if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
+ return;
if (!intel_dp->base.base.crtc)
return;
+ /* Try to read receiver status if the link appears to be up */
if (!intel_dp_get_link_status(intel_dp)) {
intel_dp_link_down(intel_dp);
return;
}
- /* Try to read receiver status if the link appears to be up */
- ret = intel_dp_aux_native_read(intel_dp,
- 0x000, intel_dp->dpcd,
- sizeof (intel_dp->dpcd));
- if (ret != sizeof(intel_dp->dpcd)) {
+ /* Now read the DPCD to see if it's actually running */
+ if (!intel_dp_get_dpcd(intel_dp)) {
intel_dp_link_down(intel_dp);
return;
}
if (!intel_channel_eq_ok(intel_dp)) {
+ DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
+ drm_get_encoder_name(&intel_dp->base.base));
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
}
}
static enum drm_connector_status
+intel_dp_detect_dpcd(struct intel_dp *intel_dp)
+{
+ if (intel_dp_get_dpcd(intel_dp))
+ return connector_status_connected;
+ return connector_status_disconnected;
+}
+
+static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
{
enum drm_connector_status status;
- bool ret;
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp)) {
@@ -1609,15 +1651,7 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
return status;
}
- status = connector_status_disconnected;
- ret = intel_dp_aux_native_read_retry(intel_dp,
- 0x000, intel_dp->dpcd,
- sizeof (intel_dp->dpcd));
- if (ret && intel_dp->dpcd[DP_DPCD_REV] != 0)
- status = connector_status_connected;
- DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0],
- intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]);
- return status;
+ return intel_dp_detect_dpcd(intel_dp);
}
static enum drm_connector_status
@@ -1625,7 +1659,6 @@ g4x_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum drm_connector_status status;
uint32_t temp, bit;
switch (intel_dp->output_reg) {
@@ -1647,15 +1680,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
if ((temp & bit) == 0)
return connector_status_disconnected;
- status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd,
- sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
- {
- if (intel_dp->dpcd[DP_DPCD_REV] != 0)
- status = connector_status_connected;
- }
-
- return status;
+ return intel_dp_detect_dpcd(intel_dp);
}
/**
@@ -1678,6 +1703,12 @@ intel_dp_detect(struct drm_connector *connector, bool force)
status = ironlake_dp_detect(intel_dp);
else
status = g4x_dp_detect(intel_dp);
+
+ DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
+ intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2],
+ intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5],
+ intel_dp->dpcd[6], intel_dp->dpcd[7]);
+
if (status != connector_status_connected)
return status;
@@ -1810,6 +1841,11 @@ done:
static void
intel_dp_destroy (struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
+
+ if (intel_dpd_is_edp(dev))
+ intel_panel_destroy_backlight(dev);
+
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -1923,6 +1959,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
return;
intel_dp->output_reg = output_reg;
+ intel_dp->dpms_mode = -1;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
@@ -1999,7 +2036,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
/* Cache some DPCD data in the eDP case */
if (is_edp(intel_dp)) {
- int ret;
+ bool ret;
u32 pp_on, pp_div;
pp_on = I915_READ(PCH_PP_ON_DELAYS);
@@ -2012,11 +2049,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
dev_priv->panel_t12 *= 100; /* t12 in 100ms units */
ironlake_edp_panel_vdd_on(intel_dp);
- ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV,
- intel_dp->dpcd,
- sizeof(intel_dp->dpcd));
+ ret = intel_dp_get_dpcd(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp);
- if (ret == sizeof(intel_dp->dpcd)) {
+ if (ret) {
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
dev_priv->no_aux_handshake =
intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
@@ -2042,6 +2077,8 @@ intel_dp_init(struct drm_device *dev, int output_reg)
DRM_MODE_TYPE_PREFERRED;
}
}
+ dev_priv->int_edp_connector = connector;
+ intel_panel_setup_backlight(dev);
}
intel_dp_add_properties(intel_dp, connector);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9ffa61e..0b2ee9d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -170,6 +170,7 @@ struct intel_crtc {
int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
bool cursor_visible;
+ unsigned int bpp;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -177,10 +178,28 @@ struct intel_crtc {
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+#define DIP_HEADER_SIZE 5
+
#define DIP_TYPE_AVI 0x82
#define DIP_VERSION_AVI 0x2
#define DIP_LEN_AVI 13
+#define DIP_TYPE_SPD 0x3
+#define DIP_VERSION_SPD 0x1
+#define DIP_LEN_SPD 25
+#define DIP_SPD_UNKNOWN 0
+#define DIP_SPD_DSTB 0x1
+#define DIP_SPD_DVDP 0x2
+#define DIP_SPD_DVHS 0x3
+#define DIP_SPD_HDDVR 0x4
+#define DIP_SPD_DVC 0x5
+#define DIP_SPD_DSC 0x6
+#define DIP_SPD_VCD 0x7
+#define DIP_SPD_GAME 0x8
+#define DIP_SPD_PC 0x9
+#define DIP_SPD_BD 0xa
+#define DIP_SPD_SCD 0xb
+
struct dip_infoframe {
uint8_t type; /* HB0 */
uint8_t ver; /* HB1 */
@@ -205,6 +224,11 @@ struct dip_infoframe {
uint16_t left_bar_end;
uint16_t right_bar_start;
} avi;
+ struct {
+ uint8_t vn[8];
+ uint8_t pd[16];
+ uint8_t sdi;
+ } spd;
uint8_t payload[27];
} __attribute__ ((packed)) body;
} __attribute__((packed));
@@ -233,6 +257,13 @@ struct intel_unpin_work {
bool enable_stall_check;
};
+struct intel_fbc_work {
+ struct delayed_work work;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ int interval;
+};
+
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
@@ -266,9 +297,10 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern u32 intel_panel_get_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
-extern void intel_panel_setup_backlight(struct drm_device *dev);
+extern int intel_panel_setup_backlight(struct drm_device *dev);
extern void intel_panel_enable_backlight(struct drm_device *dev);
extern void intel_panel_disable_backlight(struct drm_device *dev);
+extern void intel_panel_destroy_backlight(struct drm_device *dev);
extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
@@ -317,6 +349,7 @@ extern void intel_enable_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
+extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
extern void gen6_disable_rps(struct drm_device *dev);
extern void intel_init_emon(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index aa0a8e8..226ba83 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -45,6 +45,8 @@ struct intel_hdmi {
bool has_hdmi_sink;
bool has_audio;
int force_audio;
+ void (*write_infoframe)(struct drm_encoder *encoder,
+ struct dip_infoframe *frame);
};
static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
@@ -58,37 +60,70 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
struct intel_hdmi, base);
}
-void intel_dip_infoframe_csum(struct dip_infoframe *avi_if)
+void intel_dip_infoframe_csum(struct dip_infoframe *frame)
{
- uint8_t *data = (uint8_t *)avi_if;
+ uint8_t *data = (uint8_t *)frame;
uint8_t sum = 0;
unsigned i;
- avi_if->checksum = 0;
- avi_if->ecc = 0;
+ frame->checksum = 0;
+ frame->ecc = 0;
- for (i = 0; i < sizeof(*avi_if); i++)
+ /* Header isn't part of the checksum */
+ for (i = 5; i < frame->len; i++)
sum += data[i];
- avi_if->checksum = 0x100 - sum;
+ frame->checksum = 0x100 - sum;
}
-static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
+static u32 intel_infoframe_index(struct dip_infoframe *frame)
{
- struct dip_infoframe avi_if = {
- .type = DIP_TYPE_AVI,
- .ver = DIP_VERSION_AVI,
- .len = DIP_LEN_AVI,
- };
- uint32_t *data = (uint32_t *)&avi_if;
+ u32 flags = 0;
+
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ flags |= VIDEO_DIP_SELECT_AVI;
+ break;
+ case DIP_TYPE_SPD:
+ flags |= VIDEO_DIP_SELECT_SPD;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ break;
+ }
+
+ return flags;
+}
+
+static u32 intel_infoframe_flags(struct dip_infoframe *frame)
+{
+ u32 flags = 0;
+
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC;
+ break;
+ case DIP_TYPE_SPD:
+ flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_2VSYNC;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ break;
+ }
+
+ return flags;
+}
+
+static void i9xx_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- u32 port;
- unsigned i;
+ u32 port, flags, val = I915_READ(VIDEO_DIP_CTL);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
- if (!intel_hdmi->has_hdmi_sink)
- return;
/* XXX first guess at handling video port, is this corrent? */
if (intel_hdmi->sdvox_reg == SDVOB)
@@ -98,18 +133,87 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
else
return;
- I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
- VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC);
+ flags = intel_infoframe_index(frame);
+
+ val &= ~VIDEO_DIP_SELECT_MASK;
- intel_dip_infoframe_csum(&avi_if);
- for (i = 0; i < sizeof(avi_if); i += 4) {
+ I915_WRITE(VIDEO_DIP_CTL, val | port | flags);
+
+ for (i = 0; i < len; i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
}
- I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
- VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC |
- VIDEO_DIP_ENABLE_AVI);
+ flags |= intel_infoframe_flags(frame);
+
+ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+}
+
+static void ironlake_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ u32 flags, val = I915_READ(reg);
+
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ flags = intel_infoframe_index(frame);
+
+ val &= ~VIDEO_DIP_SELECT_MASK;
+
+ I915_WRITE(reg, val | flags);
+
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+
+ flags |= intel_infoframe_flags(frame);
+
+ I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+}
+static void intel_set_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ if (!intel_hdmi->has_hdmi_sink)
+ return;
+
+ intel_dip_infoframe_csum(frame);
+ intel_hdmi->write_infoframe(encoder, frame);
+}
+
+static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
+{
+ struct dip_infoframe avi_if = {
+ .type = DIP_TYPE_AVI,
+ .ver = DIP_VERSION_AVI,
+ .len = DIP_LEN_AVI,
+ };
+
+ intel_set_infoframe(encoder, &avi_if);
+}
+
+static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+{
+ struct dip_infoframe spd_if;
+
+ memset(&spd_if, 0, sizeof(spd_if));
+ spd_if.type = DIP_TYPE_SPD;
+ spd_if.ver = DIP_VERSION_SPD;
+ spd_if.len = DIP_LEN_SPD;
+ strcpy(spd_if.body.spd.vn, "Intel");
+ strcpy(spd_if.body.spd.pd, "Integrated gfx");
+ spd_if.body.spd.sdi = DIP_SPD_PC;
+
+ intel_set_infoframe(encoder, &spd_if);
}
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
@@ -124,12 +228,18 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
u32 sdvox;
sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
- sdvox |= intel_hdmi->color_range;
+ if (!HAS_PCH_SPLIT(dev))
+ sdvox |= intel_hdmi->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
+ if (intel_crtc->bpp > 24)
+ sdvox |= COLOR_FORMAT_12bpc;
+ else
+ sdvox |= COLOR_FORMAT_8bpc;
+
/* Required on CPT */
if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
sdvox |= HDMI_MODE_SELECT;
@@ -150,6 +260,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
POSTING_READ(intel_hdmi->sdvox_reg);
intel_hdmi_set_avi_infoframe(encoder);
+ intel_hdmi_set_spd_infoframe(encoder);
}
static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
@@ -427,6 +538,11 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
intel_hdmi->sdvox_reg = sdvox_reg;
+ if (!HAS_PCH_SPLIT(dev))
+ intel_hdmi->write_infoframe = i9xx_write_infoframe;
+ else
+ intel_hdmi->write_infoframe = ironlake_write_infoframe;
+
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
intel_hdmi_add_properties(intel_hdmi, connector);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b28f7bd..31da77f 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -72,14 +72,16 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
{
struct drm_device *dev = intel_lvds->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 ctl_reg, lvds_reg;
+ u32 ctl_reg, lvds_reg, stat_reg;
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
lvds_reg = PCH_LVDS;
+ stat_reg = PCH_PP_STATUS;
} else {
ctl_reg = PP_CONTROL;
lvds_reg = LVDS;
+ stat_reg = PP_STATUS;
}
I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
@@ -94,17 +96,16 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
intel_lvds->pfit_control,
intel_lvds->pfit_pgm_ratios);
- if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) {
- DRM_ERROR("timed out waiting for panel to power off\n");
- } else {
- I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
- intel_lvds->pfit_dirty = false;
- }
+
+ I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
+ intel_lvds->pfit_dirty = false;
}
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
POSTING_READ(lvds_reg);
+ if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
+ DRM_ERROR("timed out waiting for panel to power on\n");
intel_panel_enable_backlight(dev);
}
@@ -113,24 +114,25 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds)
{
struct drm_device *dev = intel_lvds->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 ctl_reg, lvds_reg;
+ u32 ctl_reg, lvds_reg, stat_reg;
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
lvds_reg = PCH_LVDS;
+ stat_reg = PCH_PP_STATUS;
} else {
ctl_reg = PP_CONTROL;
lvds_reg = LVDS;
+ stat_reg = PP_STATUS;
}
intel_panel_disable_backlight(dev);
I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
+ if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
+ DRM_ERROR("timed out waiting for panel to power off\n");
if (intel_lvds->pfit_control) {
- if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
- DRM_ERROR("timed out waiting for panel to power off\n");
-
I915_WRITE(PFIT_CONTROL, 0);
intel_lvds->pfit_dirty = true;
}
@@ -398,53 +400,21 @@ out:
static void intel_lvds_prepare(struct drm_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- /* We try to do the minimum that is necessary in order to unlock
- * the registers for mode setting.
- *
- * On Ironlake, this is quite simple as we just set the unlock key
- * and ignore all subtleties. (This may cause some issues...)
- *
+ /*
* Prior to Ironlake, we must disable the pipe if we want to adjust
* the panel fitter. However at all other times we can just reset
* the registers regardless.
*/
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_PP_CONTROL,
- I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
- } else if (intel_lvds->pfit_dirty) {
- I915_WRITE(PP_CONTROL,
- (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS)
- & ~POWER_TARGET_ON);
- } else {
- I915_WRITE(PP_CONTROL,
- I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
- }
+ if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty)
+ intel_lvds_disable(intel_lvds);
}
static void intel_lvds_commit(struct drm_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- /* Undo any unlocking done in prepare to prevent accidental
- * adjustment of the registers.
- */
- if (HAS_PCH_SPLIT(dev)) {
- u32 val = I915_READ(PCH_PP_CONTROL);
- if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
- I915_WRITE(PCH_PP_CONTROL, val & 0x3);
- } else {
- u32 val = I915_READ(PP_CONTROL);
- if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
- I915_WRITE(PP_CONTROL, val & 0x3);
- }
-
/* Always do a full power on as we do not know what state
* we were left in.
*/
@@ -582,6 +552,8 @@ static void intel_lvds_destroy(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ intel_panel_destroy_backlight(dev);
+
if (dev_priv->lid_notifier.notifier_call)
acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
drm_sysfs_connector_remove(connector);
@@ -690,6 +662,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
+ .ident = "Dell OptiPlex FX170",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex FX170"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
.ident = "AOpen Mini PC",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "AOpen"),
@@ -1032,6 +1012,19 @@ out:
pwm = I915_READ(BLC_PWM_PCH_CTL1);
pwm |= PWM_PCH_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
+ /*
+ * Unlock registers and just
+ * leave them unlocked
+ */
+ I915_WRITE(PCH_PP_CONTROL,
+ I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
+ } else {
+ /*
+ * Unlock registers and just
+ * leave them unlocked
+ */
+ I915_WRITE(PP_CONTROL,
+ I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
}
dev_priv->lid_notifier.notifier_call = intel_lid_notify;
if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
@@ -1041,6 +1034,9 @@ out:
/* keep the LVDS connector */
dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector);
+
+ intel_panel_setup_backlight(dev);
+
return true;
failed:
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index d2c7104..b8e8158b 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -227,7 +227,6 @@ void intel_opregion_asle_intr(struct drm_device *dev)
asle->aslc = asle_stat;
}
-/* Only present on Ironlake+ */
void intel_opregion_gse_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -297,19 +296,26 @@ static int intel_opregion_video_event(struct notifier_block *nb,
/* The only video events relevant to opregion are 0x80. These indicate
either a docking event, lid switch or display switch request. In
Linux, these are handled by the dock, button and video drivers.
- We might want to fix the video driver to be opregion-aware in
- future, but right now we just indicate to the firmware that the
- request has been handled */
+ */
struct opregion_acpi *acpi;
+ struct acpi_bus_event *event = data;
+ int ret = NOTIFY_OK;
+
+ if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
+ return NOTIFY_DONE;
if (!system_opregion)
return NOTIFY_DONE;
acpi = system_opregion->acpi;
+
+ if (event->type == 0x80 && !(acpi->cevt & 0x1))
+ ret = NOTIFY_BAD;
+
acpi->csts = 0;
- return NOTIFY_OK;
+ return ret;
}
static struct notifier_block intel_opregion_notifier = {
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 9e2959b..d360380 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -773,14 +773,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
- ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true);
+ ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
if (ret != 0)
return ret;
- ret = i915_gem_object_set_to_gtt_domain(new_bo, 0);
- if (ret != 0)
- goto out_unpin;
-
ret = i915_gem_object_put_fence(new_bo);
if (ret)
goto out_unpin;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a06ff07..a9e0c7b 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -83,11 +83,15 @@ intel_pch_panel_fitting(struct drm_device *dev,
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
if (scaled_width > scaled_height) { /* pillar */
width = scaled_height / mode->vdisplay;
+ if (width & 1)
+ width++;
x = (adjusted_mode->hdisplay - width + 1) / 2;
y = 0;
height = adjusted_mode->vdisplay;
} else if (scaled_width < scaled_height) { /* letter */
height = scaled_width / mode->hdisplay;
+ if (height & 1)
+ height++;
y = (adjusted_mode->vdisplay - height + 1) / 2;
x = 0;
width = adjusted_mode->hdisplay;
@@ -273,7 +277,7 @@ void intel_panel_enable_backlight(struct drm_device *dev)
dev_priv->backlight_enabled = true;
}
-void intel_panel_setup_backlight(struct drm_device *dev)
+static void intel_panel_init_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -305,3 +309,73 @@ intel_panel_detect(struct drm_device *dev)
return connector_status_unknown;
}
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+static int intel_panel_update_status(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ intel_panel_set_backlight(dev, bd->props.brightness);
+ return 0;
+}
+
+static int intel_panel_get_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ return intel_panel_get_backlight(dev);
+}
+
+static const struct backlight_ops intel_panel_bl_ops = {
+ .update_status = intel_panel_update_status,
+ .get_brightness = intel_panel_get_brightness,
+};
+
+int intel_panel_setup_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct backlight_properties props;
+ struct drm_connector *connector;
+
+ intel_panel_init_backlight(dev);
+
+ if (dev_priv->int_lvds_connector)
+ connector = dev_priv->int_lvds_connector;
+ else if (dev_priv->int_edp_connector)
+ connector = dev_priv->int_edp_connector;
+ else
+ return -ENODEV;
+
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = intel_panel_get_max_backlight(dev);
+ dev_priv->backlight =
+ backlight_device_register("intel_backlight",
+ &connector->kdev, dev,
+ &intel_panel_bl_ops, &props);
+
+ if (IS_ERR(dev_priv->backlight)) {
+ DRM_ERROR("Failed to register backlight: %ld\n",
+ PTR_ERR(dev_priv->backlight));
+ dev_priv->backlight = NULL;
+ return -ENODEV;
+ }
+ dev_priv->backlight->props.brightness = intel_panel_get_backlight(dev);
+ return 0;
+}
+
+void intel_panel_destroy_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (dev_priv->backlight)
+ backlight_device_unregister(dev_priv->backlight);
+}
+#else
+int intel_panel_setup_backlight(struct drm_device *dev)
+{
+ intel_panel_init_backlight(dev);
+ return 0;
+}
+
+void intel_panel_destroy_backlight(struct drm_device *dev)
+{
+ return;
+}
+#endif
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 95c4b14..c30626e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -236,7 +236,8 @@ init_pipe_control(struct intel_ring_buffer *ring)
ret = -ENOMEM;
goto err;
}
- obj->cache_level = I915_CACHE_LLC;
+
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
ret = i915_gem_object_pin(obj, 4096, true);
if (ret)
@@ -289,6 +290,10 @@ static int init_render_ring(struct intel_ring_buffer *ring)
if (IS_GEN6(dev) || IS_GEN7(dev))
mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
I915_WRITE(MI_MODE, mode);
+ if (IS_GEN7(dev))
+ I915_WRITE(GFX_MODE_GEN7,
+ GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+ GFX_MODE_ENABLE(GFX_REPLAY_MODE));
}
if (INTEL_INFO(dev)->gen >= 6) {
@@ -776,7 +781,8 @@ static int init_status_page(struct intel_ring_buffer *ring)
ret = -ENOMEM;
goto err;
}
- obj->cache_level = I915_CACHE_LLC;
+
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
ret = i915_gem_object_pin(obj, 4096, true);
if (ret != 0) {
@@ -1319,6 +1325,9 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
ring->get_seqno = pc_render_get_seqno;
}
+ if (!I915_NEED_GFX_HWS(dev))
+ ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 113e4e7..210d570 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1236,6 +1236,8 @@ intel_tv_detect_type (struct intel_tv *intel_tv,
struct drm_connector *connector)
{
struct drm_encoder *encoder = &intel_tv->base.base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
@@ -1258,6 +1260,10 @@ intel_tv_detect_type (struct intel_tv *intel_tv,
/* Poll for TV detection */
tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
+ if (intel_crtc->pipe == 1)
+ tv_ctl |= TV_ENC_PIPEB_SELECT;
+ else
+ tv_ctl &= ~TV_ENC_PIPEB_SELECT;
tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
tv_dac |= (TVDAC_STATE_CHG_EN |
@@ -1277,26 +1283,26 @@ intel_tv_detect_type (struct intel_tv *intel_tv,
to_intel_crtc(intel_tv->base.base.crtc)->pipe);
type = -1;
- if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) {
- DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
- /*
- * A B C
- * 0 1 1 Composite
- * 1 0 X svideo
- * 0 0 0 Component
- */
- if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
- DRM_DEBUG_KMS("Detected Composite TV connection\n");
- type = DRM_MODE_CONNECTOR_Composite;
- } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
- DRM_DEBUG_KMS("Detected S-Video TV connection\n");
- type = DRM_MODE_CONNECTOR_SVIDEO;
- } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
- DRM_DEBUG_KMS("Detected Component TV connection\n");
- type = DRM_MODE_CONNECTOR_Component;
- } else {
- DRM_DEBUG_KMS("Unrecognised TV connection\n");
- }
+ tv_dac = I915_READ(TV_DAC);
+ DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
+ /*
+ * A B C
+ * 0 1 1 Composite
+ * 1 0 X svideo
+ * 0 0 0 Component
+ */
+ if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
+ DRM_DEBUG_KMS("Detected Composite TV connection\n");
+ type = DRM_MODE_CONNECTOR_Composite;
+ } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
+ DRM_DEBUG_KMS("Detected S-Video TV connection\n");
+ type = DRM_MODE_CONNECTOR_SVIDEO;
+ } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
+ DRM_DEBUG_KMS("Detected Component TV connection\n");
+ type = DRM_MODE_CONNECTOR_Component;
+ } else {
+ DRM_DEBUG_KMS("Unrecognised TV connection\n");
+ type = -1;
}
I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 729d5fd..b311fab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -135,13 +135,14 @@ static void load_vbios_pramin(struct drm_device *dev, uint8_t *data)
int i;
if (dev_priv->card_type >= NV_50) {
- uint32_t vbios_vram = (nv_rd32(dev, 0x619f04) & ~0xff) << 8;
-
- if (!vbios_vram)
- vbios_vram = (nv_rd32(dev, 0x1700) << 16) + 0xf0000;
+ u64 addr = (u64)(nv_rd32(dev, 0x619f04) & 0xffffff00) << 8;
+ if (!addr) {
+ addr = (u64)nv_rd32(dev, 0x1700) << 16;
+ addr += 0xf0000;
+ }
old_bar0_pramin = nv_rd32(dev, 0x1700);
- nv_wr32(dev, 0x1700, vbios_vram >> 16);
+ nv_wr32(dev, 0x1700, addr >> 16);
}
/* bail if no rom signature */
@@ -5186,7 +5187,7 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
load_table_ptr = ROM16(bios->data[bitentry->offset]);
if (load_table_ptr == 0x0) {
- NV_ERROR(dev, "Pointer to BIT loadval table invalid\n");
+ NV_DEBUG(dev, "Pointer to BIT loadval table invalid\n");
return -EINVAL;
}
@@ -5965,6 +5966,12 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx)
if (cte->type == DCB_CONNECTOR_HDMI_1)
cte->type = DCB_CONNECTOR_DVI_I;
}
+
+ /* Gigabyte GV-NX86T512H */
+ if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
+ if (cte->type == DCB_CONNECTOR_HDMI_1)
+ cte->type = DCB_CONNECTOR_DVI_I;
+ }
}
static const u8 hpd_gpio[16] = {
@@ -6377,6 +6384,37 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
}
}
+ /* Some other twisted XFX board (rhbz#694914)
+ *
+ * The DVI/VGA encoder combo that's supposed to represent the
+ * DVI-I connector actually point at two different ones, and
+ * the HDMI connector ends up paired with the VGA instead.
+ *
+ * Connector table is missing anything for VGA at all, pointing it
+ * an invalid conntab entry 2 so we figure it out ourself.
+ */
+ if (nv_match_device(dev, 0x0615, 0x1682, 0x2605)) {
+ if (idx == 0) {
+ *conn = 0x02002300; /* VGA, connector 2 */
+ *conf = 0x00000028;
+ } else
+ if (idx == 1) {
+ *conn = 0x01010312; /* DVI, connector 0 */
+ *conf = 0x00020030;
+ } else
+ if (idx == 2) {
+ *conn = 0x04020310; /* VGA, connector 0 */
+ *conf = 0x00000028;
+ } else
+ if (idx == 3) {
+ *conn = 0x02021322; /* HDMI, connector 1 */
+ *conf = 0x00020010;
+ } else {
+ *conn = 0x0000000e; /* EOL */
+ *conf = 0x00000000;
+ }
+ }
+
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 2ad49cb..890d50e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -49,16 +49,12 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
DRM_ERROR("bo %p still attached to GEM object\n", bo);
nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
- if (nvbo->vma.node) {
- nouveau_vm_unmap(&nvbo->vma);
- nouveau_vm_put(&nvbo->vma);
- }
kfree(nvbo);
}
static void
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
- int *align, int *size, int *page_shift)
+ int *align, int *size)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
@@ -82,67 +78,51 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
}
}
} else {
- if (likely(dev_priv->chan_vm)) {
- if (!(flags & TTM_PL_FLAG_TT) && *size > 256 * 1024)
- *page_shift = dev_priv->chan_vm->lpg_shift;
- else
- *page_shift = dev_priv->chan_vm->spg_shift;
- } else {
- *page_shift = 12;
- }
-
- *size = roundup(*size, (1 << *page_shift));
- *align = max((1 << *page_shift), *align);
+ *size = roundup(*size, (1 << nvbo->page_shift));
+ *align = max((1 << nvbo->page_shift), *align);
}
*size = roundup(*size, PAGE_SIZE);
}
int
-nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
- int size, int align, uint32_t flags, uint32_t tile_mode,
- uint32_t tile_flags, struct nouveau_bo **pnvbo)
+nouveau_bo_new(struct drm_device *dev, int size, int align,
+ uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
+ struct nouveau_bo **pnvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
- int ret = 0, page_shift = 0;
+ int ret;
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo)
return -ENOMEM;
INIT_LIST_HEAD(&nvbo->head);
INIT_LIST_HEAD(&nvbo->entry);
+ INIT_LIST_HEAD(&nvbo->vma_list);
nvbo->tile_mode = tile_mode;
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &dev_priv->ttm.bdev;
- nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift);
- align >>= PAGE_SHIFT;
-
- if (dev_priv->chan_vm) {
- ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
- NV_MEM_ACCESS_RW, &nvbo->vma);
- if (ret) {
- kfree(nvbo);
- return ret;
- }
+ nvbo->page_shift = 12;
+ if (dev_priv->bar1_vm) {
+ if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
+ nvbo->page_shift = dev_priv->bar1_vm->lpg_shift;
}
+ nouveau_bo_fixup_align(nvbo, flags, &align, &size);
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
- nvbo->channel = chan;
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
- ttm_bo_type_device, &nvbo->placement, align, 0,
- false, NULL, size, nouveau_bo_del_ttm);
+ ttm_bo_type_device, &nvbo->placement,
+ align >> PAGE_SHIFT, 0, false, NULL, size,
+ nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret;
}
- nvbo->channel = NULL;
- if (nvbo->vma.node)
- nvbo->bo.offset = nvbo->vma.offset;
*pnvbo = nvbo;
return 0;
}
@@ -312,8 +292,6 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
if (ret)
return ret;
- if (nvbo->vma.node)
- nvbo->bo.offset = nvbo->vma.offset;
return 0;
}
@@ -440,7 +418,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
TTM_MEMTYPE_FLAG_CMA;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
- man->gpu_offset = dev_priv->gart_info.aper_base;
break;
default:
NV_ERROR(dev, "Unknown GART type: %d\n",
@@ -501,19 +478,12 @@ static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct nouveau_mem *old_node = old_mem->mm_node;
- struct nouveau_mem *new_node = new_mem->mm_node;
- struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_mem *node = old_mem->mm_node;
+ u64 src_offset = node->vma[0].offset;
+ u64 dst_offset = node->vma[1].offset;
u32 page_count = new_mem->num_pages;
- u64 src_offset, dst_offset;
int ret;
- src_offset = old_node->tmp_vma.offset;
- if (new_node->tmp_vma.node)
- dst_offset = new_node->tmp_vma.offset;
- else
- dst_offset = nvbo->vma.offset;
-
page_count = new_mem->num_pages;
while (page_count) {
int line_count = (page_count > 2047) ? 2047 : page_count;
@@ -547,19 +517,13 @@ static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct nouveau_mem *old_node = old_mem->mm_node;
- struct nouveau_mem *new_node = new_mem->mm_node;
+ struct nouveau_mem *node = old_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
u64 length = (new_mem->num_pages << PAGE_SHIFT);
- u64 src_offset, dst_offset;
+ u64 src_offset = node->vma[0].offset;
+ u64 dst_offset = node->vma[1].offset;
int ret;
- src_offset = old_node->tmp_vma.offset;
- if (new_node->tmp_vma.node)
- dst_offset = new_node->tmp_vma.offset;
- else
- dst_offset = nvbo->vma.offset;
-
while (length) {
u32 amount, stride, height;
@@ -695,6 +659,27 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
}
static int
+nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
+ struct ttm_mem_reg *mem, struct nouveau_vma *vma)
+{
+ struct nouveau_mem *node = mem->mm_node;
+ int ret;
+
+ ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
+ node->page_shift, NV_MEM_ACCESS_RO, vma);
+ if (ret)
+ return ret;
+
+ if (mem->mem_type == TTM_PL_VRAM)
+ nouveau_vm_map(vma, node);
+ else
+ nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT,
+ node, node->pages);
+
+ return 0;
+}
+
+static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
@@ -711,31 +696,20 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
}
- /* create temporary vma for old memory, this will get cleaned
- * up after ttm destroys the ttm_mem_reg
+ /* create temporary vmas for the transfer and attach them to the
+ * old nouveau_mem node, these will get cleaned up after ttm has
+ * destroyed the ttm_mem_reg
*/
if (dev_priv->card_type >= NV_50) {
struct nouveau_mem *node = old_mem->mm_node;
- if (!node->tmp_vma.node) {
- u32 page_shift = nvbo->vma.node->type;
- if (old_mem->mem_type == TTM_PL_TT)
- page_shift = nvbo->vma.vm->spg_shift;
-
- ret = nouveau_vm_get(chan->vm,
- old_mem->num_pages << PAGE_SHIFT,
- page_shift, NV_MEM_ACCESS_RO,
- &node->tmp_vma);
- if (ret)
- goto out;
- }
- if (old_mem->mem_type == TTM_PL_VRAM)
- nouveau_vm_map(&node->tmp_vma, node);
- else {
- nouveau_vm_map_sg(&node->tmp_vma, 0,
- old_mem->num_pages << PAGE_SHIFT,
- node, node->pages);
- }
+ ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
+ if (ret)
+ goto out;
+
+ ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
+ if (ret)
+ goto out;
}
if (dev_priv->card_type < NV_50)
@@ -762,7 +736,6 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
struct ttm_mem_reg tmp_mem;
@@ -782,23 +755,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- if (dev_priv->card_type >= NV_50) {
- struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_mem *node = tmp_mem.mm_node;
- struct nouveau_vma *vma = &nvbo->vma;
- if (vma->node->type != vma->vm->spg_shift)
- vma = &node->tmp_vma;
- nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT,
- node, node->pages);
- }
-
ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
-
- if (dev_priv->card_type >= NV_50) {
- struct nouveau_bo *nvbo = nouveau_bo(bo);
- nouveau_vm_unmap(&nvbo->vma);
- }
-
if (ret)
goto out;
@@ -844,30 +801,22 @@ out:
static void
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_mem *node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_vma *vma = &nvbo->vma;
- struct nouveau_vm *vm = vma->vm;
-
- if (dev_priv->card_type < NV_50)
- return;
-
- switch (new_mem->mem_type) {
- case TTM_PL_VRAM:
- nouveau_vm_map(vma, node);
- break;
- case TTM_PL_TT:
- if (vma->node->type != vm->spg_shift) {
+ struct nouveau_vma *vma;
+
+ list_for_each_entry(vma, &nvbo->vma_list, head) {
+ if (new_mem->mem_type == TTM_PL_VRAM) {
+ nouveau_vm_map(vma, new_mem->mm_node);
+ } else
+ if (new_mem->mem_type == TTM_PL_TT &&
+ nvbo->page_shift == vma->vm->spg_shift) {
+ nouveau_vm_map_sg(vma, 0, new_mem->
+ num_pages << PAGE_SHIFT,
+ node, node->pages);
+ } else {
nouveau_vm_unmap(vma);
- vma = &node->tmp_vma;
}
- nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT,
- node, node->pages);
- break;
- default:
- nouveau_vm_unmap(&nvbo->vma);
- break;
}
}
@@ -1113,3 +1062,54 @@ struct ttm_bo_driver nouveau_bo_driver = {
.io_mem_free = &nouveau_ttm_io_mem_free,
};
+struct nouveau_vma *
+nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
+{
+ struct nouveau_vma *vma;
+ list_for_each_entry(vma, &nvbo->vma_list, head) {
+ if (vma->vm == vm)
+ return vma;
+ }
+
+ return NULL;
+}
+
+int
+nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
+ struct nouveau_vma *vma)
+{
+ const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
+ struct nouveau_mem *node = nvbo->bo.mem.mm_node;
+ int ret;
+
+ ret = nouveau_vm_get(vm, size, nvbo->page_shift,
+ NV_MEM_ACCESS_RW, vma);
+ if (ret)
+ return ret;
+
+ if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+ nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
+ else
+ if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ nouveau_vm_map_sg(vma, 0, size, node, node->pages);
+
+ list_add_tail(&vma->head, &nvbo->vma_list);
+ vma->refcount = 1;
+ return 0;
+}
+
+void
+nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
+{
+ if (vma->node) {
+ if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
+ spin_lock(&nvbo->bo.bdev->fence_lock);
+ ttm_bo_wait(&nvbo->bo, false, false, false);
+ spin_unlock(&nvbo->bo.bdev->fence_lock);
+ nouveau_vm_unmap(vma);
+ }
+
+ nouveau_vm_put(vma);
+ list_del(&vma->head);
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index a7583a8..b0d753f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -27,40 +27,63 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
static int
-nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
+nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
{
+ u32 mem = nouveau_vram_pushbuf ? TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT;
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_bo *pb = chan->pushbuf_bo;
- struct nouveau_gpuobj *pushbuf = NULL;
- int ret = 0;
+ int ret;
+
+ /* allocate buffer object */
+ ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, &chan->pushbuf_bo);
+ if (ret)
+ goto out;
+
+ ret = nouveau_bo_pin(chan->pushbuf_bo, mem);
+ if (ret)
+ goto out;
+
+ ret = nouveau_bo_map(chan->pushbuf_bo);
+ if (ret)
+ goto out;
+ /* create DMA object covering the entire memtype where the push
+ * buffer resides, userspace can submit its own push buffers from
+ * anywhere within the same memtype.
+ */
+ chan->pushbuf_base = chan->pushbuf_bo->bo.offset;
if (dev_priv->card_type >= NV_50) {
+ ret = nouveau_bo_vma_add(chan->pushbuf_bo, chan->vm,
+ &chan->pushbuf_vma);
+ if (ret)
+ goto out;
+
if (dev_priv->card_type < NV_C0) {
ret = nouveau_gpuobj_dma_new(chan,
NV_CLASS_DMA_IN_MEMORY, 0,
(1ULL << 40),
NV_MEM_ACCESS_RO,
NV_MEM_TARGET_VM,
- &pushbuf);
+ &chan->pushbuf);
}
- chan->pushbuf_base = pb->bo.offset;
+ chan->pushbuf_base = chan->pushbuf_vma.offset;
} else
- if (pb->bo.mem.mem_type == TTM_PL_TT) {
+ if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_TT) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
dev_priv->gart_info.aper_size,
NV_MEM_ACCESS_RO,
- NV_MEM_TARGET_GART, &pushbuf);
- chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
+ NV_MEM_TARGET_GART,
+ &chan->pushbuf);
} else
if (dev_priv->card_type != NV_04) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
dev_priv->fb_available_size,
NV_MEM_ACCESS_RO,
- NV_MEM_TARGET_VRAM, &pushbuf);
- chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
+ NV_MEM_TARGET_VRAM,
+ &chan->pushbuf);
} else {
/* NV04 cmdbuf hack, from original ddx.. not sure of it's
* exact reason for existing :) PCI access to cmdbuf in
@@ -70,47 +93,22 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
pci_resource_start(dev->pdev, 1),
dev_priv->fb_available_size,
NV_MEM_ACCESS_RO,
- NV_MEM_TARGET_PCI, &pushbuf);
- chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
+ NV_MEM_TARGET_PCI,
+ &chan->pushbuf);
}
- nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
- nouveau_gpuobj_ref(NULL, &pushbuf);
- return ret;
-}
-
-static struct nouveau_bo *
-nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
-{
- struct nouveau_bo *pushbuf = NULL;
- int location, ret;
-
- if (nouveau_vram_pushbuf)
- location = TTM_PL_FLAG_VRAM;
- else
- location = TTM_PL_FLAG_TT;
-
- ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, &pushbuf);
- if (ret) {
- NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
- return NULL;
- }
-
- ret = nouveau_bo_pin(pushbuf, location);
- if (ret) {
- NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
- nouveau_bo_ref(NULL, &pushbuf);
- return NULL;
- }
-
- ret = nouveau_bo_map(pushbuf);
+out:
if (ret) {
- nouveau_bo_unpin(pushbuf);
- nouveau_bo_ref(NULL, &pushbuf);
- return NULL;
+ NV_ERROR(dev, "error initialising pushbuf: %d\n", ret);
+ nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
+ nouveau_gpuobj_ref(NULL, &chan->pushbuf);
+ if (chan->pushbuf_bo) {
+ nouveau_bo_unmap(chan->pushbuf_bo);
+ nouveau_bo_ref(NULL, &chan->pushbuf_bo);
+ }
}
- return pushbuf;
+ return 0;
}
/* allocates and initializes a fifo for user space consumption */
@@ -121,6 +119,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_channel *chan;
unsigned long flags;
int ret;
@@ -160,19 +159,14 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
INIT_LIST_HEAD(&chan->nvsw.flip);
INIT_LIST_HEAD(&chan->fence.pending);
- /* Allocate DMA push buffer */
- chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
- if (!chan->pushbuf_bo) {
- ret = -ENOMEM;
- NV_ERROR(dev, "pushbuf %d\n", ret);
+ /* setup channel's memory and vm */
+ ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
+ if (ret) {
+ NV_ERROR(dev, "gpuobj %d\n", ret);
nouveau_channel_put(&chan);
return ret;
}
- nouveau_dma_pre_init(chan);
- chan->user_put = 0x40;
- chan->user_get = 0x44;
-
/* Allocate space for per-channel fixed notifier memory */
ret = nouveau_notifier_init_channel(chan);
if (ret) {
@@ -181,21 +175,17 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
return ret;
}
- /* Setup channel's default objects */
- ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
+ /* Allocate DMA push buffer */
+ ret = nouveau_channel_pushbuf_init(chan);
if (ret) {
- NV_ERROR(dev, "gpuobj %d\n", ret);
+ NV_ERROR(dev, "pushbuf %d\n", ret);
nouveau_channel_put(&chan);
return ret;
}
- /* Create a dma object for the push buffer */
- ret = nouveau_channel_pushbuf_ctxdma_init(chan);
- if (ret) {
- NV_ERROR(dev, "pbctxdma %d\n", ret);
- nouveau_channel_put(&chan);
- return ret;
- }
+ nouveau_dma_pre_init(chan);
+ chan->user_put = 0x40;
+ chan->user_get = 0x44;
/* disable the fifo caches */
pfifo->reassign(dev, false);
@@ -220,6 +210,11 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
nouveau_debugfs_channel_init(chan);
NV_DEBUG(dev, "channel %d initialised\n", chan->id);
+ if (fpriv) {
+ spin_lock(&fpriv->lock);
+ list_add(&chan->list, &fpriv->channels);
+ spin_unlock(&fpriv->lock);
+ }
*chan_ret = chan;
return 0;
}
@@ -236,29 +231,23 @@ nouveau_channel_get_unlocked(struct nouveau_channel *ref)
}
struct nouveau_channel *
-nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id)
+nouveau_channel_get(struct drm_file *file_priv, int id)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_channel *chan;
- unsigned long flags;
-
- if (unlikely(id < 0 || id >= NOUVEAU_MAX_CHANNEL_NR))
- return ERR_PTR(-EINVAL);
-
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]);
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
-
- if (unlikely(!chan))
- return ERR_PTR(-EINVAL);
- if (unlikely(file_priv && chan->file_priv != file_priv)) {
- nouveau_channel_put_unlocked(&chan);
- return ERR_PTR(-EINVAL);
+ spin_lock(&fpriv->lock);
+ list_for_each_entry(chan, &fpriv->channels, list) {
+ if (chan->id == id) {
+ chan = nouveau_channel_get_unlocked(chan);
+ spin_unlock(&fpriv->lock);
+ mutex_lock(&chan->mutex);
+ return chan;
+ }
}
+ spin_unlock(&fpriv->lock);
- mutex_lock(&chan->mutex);
- return chan;
+ return ERR_PTR(-EINVAL);
}
void
@@ -312,12 +301,14 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
/* destroy any resources the channel owned */
nouveau_gpuobj_ref(NULL, &chan->pushbuf);
if (chan->pushbuf_bo) {
+ nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
nouveau_bo_unmap(chan->pushbuf_bo);
nouveau_bo_unpin(chan->pushbuf_bo);
nouveau_bo_ref(NULL, &chan->pushbuf_bo);
}
- nouveau_gpuobj_channel_takedown(chan);
+ nouveau_ramht_ref(NULL, &chan->ramht, chan);
nouveau_notifier_takedown_channel(chan);
+ nouveau_gpuobj_channel_takedown(chan);
nouveau_channel_ref(NULL, pchan);
}
@@ -383,10 +374,11 @@ nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
for (i = 0; i < engine->fifo.channels; i++) {
- chan = nouveau_channel_get(dev, file_priv, i);
+ chan = nouveau_channel_get(file_priv, i);
if (IS_ERR(chan))
continue;
+ list_del(&chan->list);
atomic_dec(&chan->users);
nouveau_channel_put(&chan);
}
@@ -459,10 +451,11 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
struct drm_nouveau_channel_free *req = data;
struct nouveau_channel *chan;
- chan = nouveau_channel_get(dev, file_priv, req->channel);
+ chan = nouveau_channel_get(file_priv, req->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
+ list_del(&chan->list);
atomic_dec(&chan->users);
nouveau_channel_put(&chan);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 1595d0b..939d4df 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -40,7 +40,7 @@
static void nouveau_connector_hotplug(void *, int);
static struct nouveau_encoder *
-find_encoder_by_type(struct drm_connector *connector, int type)
+find_encoder(struct drm_connector *connector, int type)
{
struct drm_device *dev = connector->dev;
struct nouveau_encoder *nv_encoder;
@@ -170,8 +170,8 @@ nouveau_connector_of_detect(struct drm_connector *connector)
struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev);
if (!dn ||
- !((nv_encoder = find_encoder_by_type(connector, OUTPUT_TMDS)) ||
- (nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG))))
+ !((nv_encoder = find_encoder(connector, OUTPUT_TMDS)) ||
+ (nv_encoder = find_encoder(connector, OUTPUT_ANALOG))))
return NULL;
for_each_child_of_node(dn, cn) {
@@ -233,6 +233,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = NULL;
+ struct nouveau_encoder *nv_partner;
struct nouveau_i2c_chan *i2c;
int type;
@@ -266,19 +267,22 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
* same i2c channel so the value returned from ddc_detect
* isn't necessarily correct.
*/
- if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
+ nv_partner = NULL;
+ if (nv_encoder->dcb->type == OUTPUT_TMDS)
+ nv_partner = find_encoder(connector, OUTPUT_ANALOG);
+ if (nv_encoder->dcb->type == OUTPUT_ANALOG)
+ nv_partner = find_encoder(connector, OUTPUT_TMDS);
+
+ if (nv_partner && ((nv_encoder->dcb->type == OUTPUT_ANALOG &&
+ nv_partner->dcb->type == OUTPUT_TMDS) ||
+ (nv_encoder->dcb->type == OUTPUT_TMDS &&
+ nv_partner->dcb->type == OUTPUT_ANALOG))) {
if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
type = OUTPUT_TMDS;
else
type = OUTPUT_ANALOG;
- nv_encoder = find_encoder_by_type(connector, type);
- if (!nv_encoder) {
- NV_ERROR(dev, "Detected %d encoder on %s, "
- "but no object!\n", type,
- drm_get_connector_name(connector));
- return connector_status_disconnected;
- }
+ nv_encoder = find_encoder(connector, type);
}
nouveau_connector_set_encoder(connector, nv_encoder);
@@ -292,9 +296,9 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
}
detect_analog:
- nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
+ nv_encoder = find_encoder(connector, OUTPUT_ANALOG);
if (!nv_encoder && !nouveau_tv_disable)
- nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
+ nv_encoder = find_encoder(connector, OUTPUT_TV);
if (nv_encoder && force) {
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
struct drm_encoder_helper_funcs *helper =
@@ -327,7 +331,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
nv_connector->edid = NULL;
}
- nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
+ nv_encoder = find_encoder(connector, OUTPUT_LVDS);
if (!nv_encoder)
return connector_status_disconnected;
@@ -405,7 +409,7 @@ nouveau_connector_force(struct drm_connector *connector)
} else
type = OUTPUT_ANY;
- nv_encoder = find_encoder_by_type(connector, type);
+ nv_encoder = find_encoder(connector, type);
if (!nv_encoder) {
NV_ERROR(connector->dev, "can't find encoder to force %s on!\n",
drm_get_connector_name(connector));
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 568caed..00bc6ea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -167,8 +167,13 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
int delta, int length)
{
struct nouveau_bo *pb = chan->pushbuf_bo;
- uint64_t offset = bo->bo.offset + delta;
+ struct nouveau_vma *vma;
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
+ u64 offset;
+
+ vma = nouveau_bo_vma_find(bo, chan->vm);
+ BUG_ON(!vma);
+ offset = vma->offset + delta;
BUG_ON(chan->dma.ib_free < 1);
nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 02c6f37..b30ddd8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -73,7 +73,7 @@ int nouveau_ignorelid = 0;
module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
MODULE_PARM_DESC(noaccel, "Disable all acceleration");
-int nouveau_noaccel = 0;
+int nouveau_noaccel = -1;
module_param_named(noaccel, nouveau_noaccel, int, 0400);
MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
@@ -119,6 +119,10 @@ MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n");
int nouveau_msi;
module_param_named(msi, nouveau_msi, int, 0400);
+MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)\n");
+int nouveau_ctxfw;
+module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
+
int nouveau_fbpercrtc;
#if 0
module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -210,10 +214,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
pfifo->unload_context(dev);
for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
- if (dev_priv->eng[e]) {
- ret = dev_priv->eng[e]->fini(dev, e);
- if (ret)
- goto out_abort;
+ if (!dev_priv->eng[e])
+ continue;
+
+ ret = dev_priv->eng[e]->fini(dev, e, true);
+ if (ret) {
+ NV_ERROR(dev, "... engine %d failed: %d\n", i, ret);
+ goto out_abort;
}
}
@@ -354,7 +361,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT;
+ u32 offset = nv_crtc->cursor.nvbo->bo.offset;
nv_crtc->cursor.set_offset(nv_crtc, offset);
nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
@@ -389,7 +396,9 @@ static struct drm_driver driver = {
.firstopen = nouveau_firstopen,
.lastclose = nouveau_lastclose,
.unload = nouveau_unload,
+ .open = nouveau_open,
.preclose = nouveau_preclose,
+ .postclose = nouveau_postclose,
#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
.debugfs_init = nouveau_debugfs_init,
.debugfs_cleanup = nouveau_debugfs_takedown,
@@ -420,6 +429,8 @@ static struct drm_driver driver = {
.gem_init_object = nouveau_gem_object_new,
.gem_free_object = nouveau_gem_object_del,
+ .gem_open_object = nouveau_gem_object_open,
+ .gem_close_object = nouveau_gem_object_close,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 9c56331..d7d51de 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -46,9 +46,17 @@
#include "ttm/ttm_module.h"
struct nouveau_fpriv {
- struct ttm_object_file *tfile;
+ spinlock_t lock;
+ struct list_head channels;
+ struct nouveau_vm *vm;
};
+static inline struct nouveau_fpriv *
+nouveau_fpriv(struct drm_file *file_priv)
+{
+ return file_priv ? file_priv->driver_priv : NULL;
+}
+
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
#include "nouveau_drm.h"
@@ -69,7 +77,7 @@ struct nouveau_mem {
struct drm_device *dev;
struct nouveau_vma bar_vma;
- struct nouveau_vma tmp_vma;
+ struct nouveau_vma vma[2];
u8 page_shift;
struct drm_mm_node *tag;
@@ -107,7 +115,8 @@ struct nouveau_bo {
struct nouveau_channel *channel;
- struct nouveau_vma vma;
+ struct list_head vma_list;
+ unsigned page_shift;
uint32_t tile_mode;
uint32_t tile_flags;
@@ -176,9 +185,10 @@ struct nouveau_gpuobj {
uint32_t flags;
u32 size;
- u32 pinst;
- u32 cinst;
- u64 vinst;
+ u32 pinst; /* PRAMIN BAR offset */
+ u32 cinst; /* Channel offset */
+ u64 vinst; /* VRAM address */
+ u64 linst; /* VM address */
uint32_t engine;
uint32_t class;
@@ -201,6 +211,7 @@ enum nouveau_channel_mutex_class {
struct nouveau_channel {
struct drm_device *dev;
+ struct list_head list;
int id;
/* references to the channel data structure */
@@ -228,15 +239,18 @@ struct nouveau_channel {
uint32_t sequence;
uint32_t sequence_ack;
atomic_t last_sequence_irq;
+ struct nouveau_vma vma;
} fence;
/* DMA push buffer */
struct nouveau_gpuobj *pushbuf;
struct nouveau_bo *pushbuf_bo;
+ struct nouveau_vma pushbuf_vma;
uint32_t pushbuf_base;
/* Notifier memory */
struct nouveau_bo *notifier_bo;
+ struct nouveau_vma notifier_vma;
struct drm_mm notifier_heap;
/* PFIFO context */
@@ -278,6 +292,7 @@ struct nouveau_channel {
uint32_t sw_subchannel[8];
+ struct nouveau_vma dispc_vma[2];
struct {
struct nouveau_gpuobj *vblsem;
uint32_t vblsem_head;
@@ -297,7 +312,7 @@ struct nouveau_channel {
struct nouveau_exec_engine {
void (*destroy)(struct drm_device *, int engine);
int (*init)(struct drm_device *, int engine);
- int (*fini)(struct drm_device *, int engine);
+ int (*fini)(struct drm_device *, int engine, bool suspend);
int (*context_new)(struct nouveau_channel *, int engine);
void (*context_del)(struct nouveau_channel *, int engine);
int (*object_new)(struct nouveau_channel *, int engine,
@@ -314,7 +329,8 @@ struct nouveau_instmem_engine {
int (*suspend)(struct drm_device *dev);
void (*resume)(struct drm_device *dev);
- int (*get)(struct nouveau_gpuobj *, u32 size, u32 align);
+ int (*get)(struct nouveau_gpuobj *, struct nouveau_channel *,
+ u32 size, u32 align);
void (*put)(struct nouveau_gpuobj *);
int (*map)(struct nouveau_gpuobj *);
void (*unmap)(struct nouveau_gpuobj *);
@@ -445,9 +461,9 @@ struct nouveau_pm_level {
struct nouveau_pm_temp_sensor_constants {
u16 offset_constant;
s16 offset_mult;
- u16 offset_div;
- u16 slope_mult;
- u16 slope_div;
+ s16 offset_div;
+ s16 slope_mult;
+ s16 slope_div;
};
struct nouveau_pm_threshold_temp {
@@ -488,7 +504,10 @@ struct nouveau_pm_engine {
};
struct nouveau_vram_engine {
+ struct nouveau_mm *mm;
+
int (*init)(struct drm_device *);
+ void (*takedown)(struct drm_device *dev);
int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
u32 type, struct nouveau_mem **);
void (*put)(struct drm_device *, struct nouveau_mem **);
@@ -608,6 +627,7 @@ enum nouveau_card_type {
struct drm_nouveau_private {
struct drm_device *dev;
+ bool noaccel;
/* the card type, takes NV_* as values */
enum nouveau_card_type card_type;
@@ -700,7 +720,6 @@ struct drm_nouveau_private {
/* VRAM/fb configuration */
uint64_t vram_size;
uint64_t vram_sys_base;
- u32 vram_rblock_size;
uint64_t fb_phys;
uint64_t fb_available_size;
@@ -784,12 +803,15 @@ extern int nouveau_override_conntype;
extern char *nouveau_perflvl;
extern int nouveau_perflvl_wr;
extern int nouveau_msi;
+extern int nouveau_ctxfw;
extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
extern int nouveau_pci_resume(struct pci_dev *pdev);
/* nouveau_state.c */
+extern int nouveau_open(struct drm_device *, struct drm_file *);
extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
+extern void nouveau_postclose(struct drm_device *, struct drm_file *);
extern int nouveau_load(struct drm_device *, unsigned long flags);
extern int nouveau_firstopen(struct drm_device *);
extern void nouveau_lastclose(struct drm_device *);
@@ -847,7 +869,7 @@ extern int nouveau_channel_alloc(struct drm_device *dev,
extern struct nouveau_channel *
nouveau_channel_get_unlocked(struct nouveau_channel *);
extern struct nouveau_channel *
-nouveau_channel_get(struct drm_device *, struct drm_file *, int id);
+nouveau_channel_get(struct drm_file *, int id);
extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
extern void nouveau_channel_put(struct nouveau_channel **);
extern void nouveau_channel_ref(struct nouveau_channel *chan,
@@ -1120,7 +1142,6 @@ extern int nvc0_fifo_unload_context(struct drm_device *);
/* nv04_graph.c */
extern int nv04_graph_create(struct drm_device *);
-extern void nv04_graph_fifo_access(struct drm_device *, bool);
extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16);
extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
u32 class, u32 mthd, u32 data);
@@ -1169,7 +1190,8 @@ extern int nv04_instmem_init(struct drm_device *);
extern void nv04_instmem_takedown(struct drm_device *);
extern int nv04_instmem_suspend(struct drm_device *);
extern void nv04_instmem_resume(struct drm_device *);
-extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
+extern int nv04_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
+ u32 size, u32 align);
extern void nv04_instmem_put(struct nouveau_gpuobj *);
extern int nv04_instmem_map(struct nouveau_gpuobj *);
extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
@@ -1180,7 +1202,8 @@ extern int nv50_instmem_init(struct drm_device *);
extern void nv50_instmem_takedown(struct drm_device *);
extern int nv50_instmem_suspend(struct drm_device *);
extern void nv50_instmem_resume(struct drm_device *);
-extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
+extern int nv50_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
+ u32 size, u32 align);
extern void nv50_instmem_put(struct nouveau_gpuobj *);
extern int nv50_instmem_map(struct nouveau_gpuobj *);
extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
@@ -1247,10 +1270,9 @@ extern int nv04_crtc_create(struct drm_device *, int index);
/* nouveau_bo.c */
extern struct ttm_bo_driver nouveau_bo_driver;
-extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *,
- int size, int align, uint32_t flags,
- uint32_t tile_mode, uint32_t tile_flags,
- struct nouveau_bo **);
+extern int nouveau_bo_new(struct drm_device *, int size, int align,
+ uint32_t flags, uint32_t tile_mode,
+ uint32_t tile_flags, struct nouveau_bo **);
extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
extern int nouveau_bo_unpin(struct nouveau_bo *);
extern int nouveau_bo_map(struct nouveau_bo *);
@@ -1265,6 +1287,12 @@ extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
bool no_wait_reserve, bool no_wait_gpu);
+extern struct nouveau_vma *
+nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
+extern int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
+ struct nouveau_vma *);
+extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
+
/* nouveau_fence.c */
struct nouveau_fence;
extern int nouveau_fence_init(struct drm_device *);
@@ -1310,12 +1338,14 @@ static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
}
/* nouveau_gem.c */
-extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
- int size, int align, uint32_t domain,
- uint32_t tile_mode, uint32_t tile_flags,
- struct nouveau_bo **);
+extern int nouveau_gem_new(struct drm_device *, int size, int align,
+ uint32_t domain, uint32_t tile_mode,
+ uint32_t tile_flags, struct nouveau_bo **);
extern int nouveau_gem_object_new(struct drm_gem_object *);
extern void nouveau_gem_object_del(struct drm_gem_object *);
+extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
+extern void nouveau_gem_object_close(struct drm_gem_object *,
+ struct drm_file *);
extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
struct drm_file *);
extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index a3a88ad..95c843e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -30,6 +30,7 @@
struct nouveau_framebuffer {
struct drm_framebuffer base;
struct nouveau_bo *nvbo;
+ struct nouveau_vma vma;
u32 r_dma;
u32 r_format;
u32 r_pitch;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 39aee6d..14a8627 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -279,6 +279,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
struct fb_info *info;
struct drm_framebuffer *fb;
struct nouveau_framebuffer *nouveau_fb;
+ struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd mode_cmd;
struct pci_dev *pdev = dev->pdev;
@@ -296,8 +297,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
size = mode_cmd.pitch * mode_cmd.height;
size = roundup(size, PAGE_SIZE);
- ret = nouveau_gem_new(dev, dev_priv->channel, size, 0,
- NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo);
+ ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
+ 0, 0x0000, &nvbo);
if (ret) {
NV_ERROR(dev, "failed to allocate framebuffer\n");
goto out;
@@ -318,6 +319,15 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
goto out;
}
+ chan = nouveau_nofbaccel ? NULL : dev_priv->channel;
+ if (chan && dev_priv->card_type >= NV_50) {
+ ret = nouveau_bo_vma_add(nvbo, chan->vm, &nfbdev->nouveau_fb.vma);
+ if (ret) {
+ NV_ERROR(dev, "failed to map fb into chan: %d\n", ret);
+ chan = NULL;
+ }
+ }
+
mutex_lock(&dev->struct_mutex);
info = framebuffer_alloc(0, device);
@@ -448,6 +458,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
if (nouveau_fb->nvbo) {
nouveau_bo_unmap(nouveau_fb->nvbo);
+ nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
nouveau_fb->nvbo = NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 7347075..c919cfc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -336,6 +336,7 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *fence = NULL;
+ u64 offset = chan->fence.vma.offset + sema->mem->start;
int ret;
if (dev_priv->chipset < 0x84) {
@@ -345,13 +346,10 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3);
OUT_RING (chan, NvSema);
- OUT_RING (chan, sema->mem->start);
+ OUT_RING (chan, offset);
OUT_RING (chan, 1);
} else
if (dev_priv->chipset < 0xc0) {
- struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
- u64 offset = vma->offset + sema->mem->start;
-
ret = RING_SPACE(chan, 7);
if (ret)
return ret;
@@ -364,9 +362,6 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
OUT_RING (chan, 1);
OUT_RING (chan, 1); /* ACQUIRE_EQ */
} else {
- struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
- u64 offset = vma->offset + sema->mem->start;
-
ret = RING_SPACE(chan, 5);
if (ret)
return ret;
@@ -394,6 +389,7 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *fence = NULL;
+ u64 offset = chan->fence.vma.offset + sema->mem->start;
int ret;
if (dev_priv->chipset < 0x84) {
@@ -403,14 +399,11 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2);
OUT_RING (chan, NvSema);
- OUT_RING (chan, sema->mem->start);
+ OUT_RING (chan, offset);
BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
OUT_RING (chan, 1);
} else
if (dev_priv->chipset < 0xc0) {
- struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
- u64 offset = vma->offset + sema->mem->start;
-
ret = RING_SPACE(chan, 7);
if (ret)
return ret;
@@ -423,9 +416,6 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
OUT_RING (chan, 1);
OUT_RING (chan, 2); /* RELEASE */
} else {
- struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
- u64 offset = vma->offset + sema->mem->start;
-
ret = RING_SPACE(chan, 5);
if (ret)
return ret;
@@ -540,6 +530,13 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
nouveau_gpuobj_ref(NULL, &obj);
if (ret)
return ret;
+ } else
+ if (USE_SEMA(dev)) {
+ /* map fence bo into channel's vm */
+ ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
+ &chan->fence.vma);
+ if (ret)
+ return ret;
}
INIT_LIST_HEAD(&chan->fence.pending);
@@ -551,10 +548,10 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
void
nouveau_fence_channel_fini(struct nouveau_channel *chan)
{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *tmp, *fence;
spin_lock(&chan->fence.lock);
-
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
fence->signalled = true;
list_del(&fence->entry);
@@ -564,8 +561,9 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
kref_put(&fence->refcount, nouveau_fence_del);
}
-
spin_unlock(&chan->fence.lock);
+
+ nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
}
int
@@ -577,7 +575,7 @@ nouveau_fence_init(struct drm_device *dev)
/* Create a shared VRAM heap for cross-channel sync. */
if (USE_SEMA(dev)) {
- ret = nouveau_bo_new(dev, NULL, size, 0, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM,
0, 0, &dev_priv->fence.bo);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index b52e460..5f0bc57 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -60,9 +60,71 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
}
int
-nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
- int size, int align, uint32_t domain, uint32_t tile_mode,
- uint32_t tile_flags, struct nouveau_bo **pnvbo)
+nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
+{
+ struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
+ struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct nouveau_vma *vma;
+ int ret;
+
+ if (!fpriv->vm)
+ return 0;
+
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+ if (ret)
+ return ret;
+
+ vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
+ if (!vma) {
+ vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ if (!vma) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma);
+ if (ret) {
+ kfree(vma);
+ goto out;
+ }
+ } else {
+ vma->refcount++;
+ }
+
+out:
+ ttm_bo_unreserve(&nvbo->bo);
+ return ret;
+}
+
+void
+nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
+{
+ struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
+ struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct nouveau_vma *vma;
+ int ret;
+
+ if (!fpriv->vm)
+ return;
+
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+ if (ret)
+ return;
+
+ vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
+ if (vma) {
+ if (--vma->refcount == 0) {
+ nouveau_bo_vma_del(nvbo, vma);
+ kfree(vma);
+ }
+ }
+ ttm_bo_unreserve(&nvbo->bo);
+}
+
+int
+nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
+ uint32_t tile_mode, uint32_t tile_flags,
+ struct nouveau_bo **pnvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
@@ -76,7 +138,7 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
flags |= TTM_PL_FLAG_SYSTEM;
- ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
+ ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
tile_flags, pnvbo);
if (ret)
return ret;
@@ -103,17 +165,28 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
}
static int
-nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
+nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
+ struct drm_nouveau_gem_info *rep)
{
+ struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct nouveau_vma *vma;
if (nvbo->bo.mem.mem_type == TTM_PL_TT)
rep->domain = NOUVEAU_GEM_DOMAIN_GART;
else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
- rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
rep->offset = nvbo->bo.offset;
+ if (fpriv->vm) {
+ vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
+ if (!vma)
+ return -EINVAL;
+
+ rep->offset = vma->offset;
+ }
+
+ rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
rep->map_handle = nvbo->bo.addr_space_offset;
rep->tile_mode = nvbo->tile_mode;
rep->tile_flags = nvbo->tile_flags;
@@ -127,7 +200,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
- struct nouveau_channel *chan = NULL;
int ret = 0;
if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
@@ -138,28 +210,21 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (req->channel_hint) {
- chan = nouveau_channel_get(dev, file_priv, req->channel_hint);
- if (IS_ERR(chan))
- return PTR_ERR(chan);
- }
-
- ret = nouveau_gem_new(dev, chan, req->info.size, req->align,
+ ret = nouveau_gem_new(dev, req->info.size, req->align,
req->info.domain, req->info.tile_mode,
req->info.tile_flags, &nvbo);
- if (chan)
- nouveau_channel_put(&chan);
if (ret)
return ret;
- ret = nouveau_gem_info(nvbo->gem, &req->info);
- if (ret)
- goto out;
-
ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
+ if (ret == 0) {
+ ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
+ if (ret)
+ drm_gem_handle_delete(file_priv, req->info.handle);
+ }
+
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(nvbo->gem);
-out:
return ret;
}
@@ -318,6 +383,7 @@ static int
validate_list(struct nouveau_channel *chan, struct list_head *list,
struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
(void __force __user *)(uintptr_t)user_pbbo_ptr;
struct drm_device *dev = chan->dev;
@@ -356,24 +422,26 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
return ret;
}
- if (nvbo->bo.offset == b->presumed.offset &&
- ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
- b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
- (nvbo->bo.mem.mem_type == TTM_PL_TT &&
- b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
- continue;
+ if (dev_priv->card_type < NV_50) {
+ if (nvbo->bo.offset == b->presumed.offset &&
+ ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
+ b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
+ (nvbo->bo.mem.mem_type == TTM_PL_TT &&
+ b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
+ continue;
- if (nvbo->bo.mem.mem_type == TTM_PL_TT)
- b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
- else
- b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
- b->presumed.offset = nvbo->bo.offset;
- b->presumed.valid = 0;
- relocs++;
-
- if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
- &b->presumed, sizeof(b->presumed)))
- return -EFAULT;
+ if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
+ else
+ b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
+ b->presumed.offset = nvbo->bo.offset;
+ b->presumed.valid = 0;
+ relocs++;
+
+ if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
+ &b->presumed, sizeof(b->presumed)))
+ return -EFAULT;
+ }
}
return relocs;
@@ -548,7 +616,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct nouveau_fence *fence = NULL;
int i, j, ret = 0, do_reloc = 0;
- chan = nouveau_channel_get(dev, file_priv, req->channel);
+ chan = nouveau_channel_get(file_priv, req->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
@@ -782,7 +850,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
if (!gem)
return -ENOENT;
- ret = nouveau_gem_info(gem, req);
+ ret = nouveau_gem_info(file_priv, gem, req);
drm_gem_object_unreference_unlocked(gem);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 2ba7265..868c7fd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -79,7 +79,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
int i;
stat = nv_rd32(dev, NV03_PMC_INTR_0);
- if (!stat)
+ if (stat == 0 || stat == ~0)
return IRQ_NONE;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 5ee14d2..f9ae2fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -397,7 +397,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
dma_bits = 40;
} else
- if (0 && drm_pci_device_is_pcie(dev) &&
+ if (0 && pci_is_pcie(dev->pdev) &&
dev_priv->chipset > 0x40 &&
dev_priv->chipset != 0x45) {
if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
@@ -423,38 +423,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
return ret;
}
- /* reserve space at end of VRAM for PRAMIN */
- if (dev_priv->card_type >= NV_50) {
- dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
- } else
- if (dev_priv->card_type >= NV_40) {
- u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
- u32 rsvd;
-
- /* estimate grctx size, the magics come from nv40_grctx.c */
- if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
- else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
- else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
- else rsvd = 0x4a40 * vs;
- rsvd += 16 * 1024;
- rsvd *= dev_priv->engine.fifo.channels;
-
- /* pciegart table */
- if (drm_pci_device_is_pcie(dev))
- rsvd += 512 * 1024;
-
- /* object storage */
- rsvd += 512 * 1024;
-
- dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
- } else {
- dev_priv->ramin_rsvd_vram = 512 * 1024;
- }
-
- ret = dev_priv->engine.vram.init(dev);
- if (ret)
- return ret;
-
NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
if (dev_priv->vram_sys_base) {
NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
@@ -479,7 +447,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
}
if (dev_priv->card_type < NV_50) {
- ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
0, 0, &dev_priv->vga_ram);
if (ret == 0)
ret = nouveau_bo_pin(dev_priv->vga_ram,
@@ -729,37 +697,31 @@ nouveau_mem_timing_fini(struct drm_device *dev)
}
static int
-nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size)
+nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
- struct nouveau_mm *mm;
- u64 size, block, rsvd;
- int ret;
-
- rsvd = (256 * 1024); /* vga memory */
- size = (p_size << PAGE_SHIFT) - rsvd;
- block = dev_priv->vram_rblock_size;
-
- ret = nouveau_mm_init(&mm, rsvd >> 12, size >> 12, block >> 12);
- if (ret)
- return ret;
-
- man->priv = mm;
+ /* nothing to do */
return 0;
}
static int
nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
{
- struct nouveau_mm *mm = man->priv;
- int ret;
+ /* nothing to do */
+ return 0;
+}
- ret = nouveau_mm_fini(&mm);
- if (ret)
- return ret;
+static inline void
+nouveau_mem_node_cleanup(struct nouveau_mem *node)
+{
+ if (node->vma[0].node) {
+ nouveau_vm_unmap(&node->vma[0]);
+ nouveau_vm_put(&node->vma[0]);
+ }
- man->priv = NULL;
- return 0;
+ if (node->vma[1].node) {
+ nouveau_vm_unmap(&node->vma[1]);
+ nouveau_vm_put(&node->vma[1]);
+ }
}
static void
@@ -768,14 +730,9 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
- struct nouveau_mem *node = mem->mm_node;
struct drm_device *dev = dev_priv->dev;
- if (node->tmp_vma.node) {
- nouveau_vm_unmap(&node->tmp_vma);
- nouveau_vm_put(&node->tmp_vma);
- }
-
+ nouveau_mem_node_cleanup(mem->mm_node);
vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
}
@@ -794,7 +751,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
int ret;
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
- size_nc = 1 << nvbo->vma.node->type;
+ size_nc = 1 << nvbo->page_shift;
ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
mem->page_alignment << PAGE_SHIFT, size_nc,
@@ -804,9 +761,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
return (ret == -ENOSPC) ? 0 : ret;
}
- node->page_shift = 12;
- if (nvbo->vma.node)
- node->page_shift = nvbo->vma.node->type;
+ node->page_shift = nvbo->page_shift;
mem->mm_node = node;
mem->start = node->offset >> PAGE_SHIFT;
@@ -862,15 +817,9 @@ static void
nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
- struct nouveau_mem *node = mem->mm_node;
-
- if (node->tmp_vma.node) {
- nouveau_vm_unmap(&node->tmp_vma);
- nouveau_vm_put(&node->tmp_vma);
- }
-
+ nouveau_mem_node_cleanup(mem->mm_node);
+ kfree(mem->mm_node);
mem->mm_node = NULL;
- kfree(node);
}
static int
@@ -880,11 +829,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_vma *vma = &nvbo->vma;
- struct nouveau_vm *vm = vma->vm;
struct nouveau_mem *node;
- int ret;
if (unlikely((mem->num_pages << PAGE_SHIFT) >=
dev_priv->gart_info.aper_size))
@@ -893,24 +838,8 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
+ node->page_shift = 12;
- /* This node must be for evicting large-paged VRAM
- * to system memory. Due to a nv50 limitation of
- * not being able to mix large/small pages within
- * the same PDE, we need to create a temporary
- * small-paged VMA for the eviction.
- */
- if (vma->node->type != vm->spg_shift) {
- ret = nouveau_vm_get(vm, (u64)vma->node->length << 12,
- vm->spg_shift, NV_MEM_ACCESS_RW,
- &node->tmp_vma);
- if (ret) {
- kfree(node);
- return ret;
- }
- }
-
- node->page_shift = nvbo->vma.node->type;
mem->mm_node = node;
mem->start = 0;
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index 7609756..1640dec 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -158,11 +158,18 @@ int
nouveau_mm_fini(struct nouveau_mm **prmm)
{
struct nouveau_mm *rmm = *prmm;
- struct nouveau_mm_node *heap =
+ struct nouveau_mm_node *node, *heap =
list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
- if (!list_is_singular(&rmm->nodes))
+ if (!list_is_singular(&rmm->nodes)) {
+ printk(KERN_ERR "nouveau_mm not empty at destroy time!\n");
+ list_for_each_entry(node, &rmm->nodes, nl_entry) {
+ printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n",
+ node->type, node->offset, node->length);
+ }
+ WARN_ON(1);
return -EBUSY;
+ }
kfree(heap);
kfree(rmm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index 1f7483a..b9c016d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -52,6 +52,7 @@ int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
int nv50_vram_init(struct drm_device *);
+void nv50_vram_fini(struct drm_device *);
int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
u32 memtype, struct nouveau_mem **);
void nv50_vram_del(struct drm_device *, struct nouveau_mem **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 5b39718..6abdbe6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -34,6 +34,7 @@ int
nouveau_notifier_init_channel(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *ntfy = NULL;
uint32_t flags, ttmpl;
int ret;
@@ -46,7 +47,7 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
ttmpl = TTM_PL_FLAG_TT;
}
- ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
+ ret = nouveau_gem_new(dev, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
if (ret)
return ret;
@@ -58,14 +59,22 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
if (ret)
goto out_err;
+ if (dev_priv->card_type >= NV_50) {
+ ret = nouveau_bo_vma_add(ntfy, chan->vm, &chan->notifier_vma);
+ if (ret)
+ goto out_err;
+ }
+
ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size);
if (ret)
goto out_err;
chan->notifier_bo = ntfy;
out_err:
- if (ret)
+ if (ret) {
+ nouveau_bo_vma_del(ntfy, &chan->notifier_vma);
drm_gem_object_unreference_unlocked(ntfy->gem);
+ }
return ret;
}
@@ -78,6 +87,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
if (!chan->notifier_bo)
return;
+ nouveau_bo_vma_del(chan->notifier_bo, &chan->notifier_vma);
nouveau_bo_unmap(chan->notifier_bo);
mutex_lock(&dev->struct_mutex);
nouveau_bo_unpin(chan->notifier_bo);
@@ -122,10 +132,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
target = NV_MEM_TARGET_VRAM;
else
target = NV_MEM_TARGET_GART;
- offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
+ offset = chan->notifier_bo->bo.offset;
} else {
target = NV_MEM_TARGET_VM;
- offset = chan->notifier_bo->vma.offset;
+ offset = chan->notifier_vma.offset;
}
offset += mem->start;
@@ -183,7 +193,7 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
if (unlikely(dev_priv->card_type >= NV_C0))
return -EINVAL;
- chan = nouveau_channel_get(dev, file_priv, na->channel);
+ chan = nouveau_channel_get(file_priv, na->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 8f97016..159b7c4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -125,7 +125,7 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
int ret = -EINVAL;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- if (chid > 0 && chid < dev_priv->engine.fifo.channels)
+ if (chid >= 0 && chid < dev_priv->engine.fifo.channels)
chan = dev_priv->channels.ptr[chid];
if (chan)
ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
@@ -191,7 +191,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
spin_unlock(&dev_priv->ramin_lock);
- if (chan) {
+ if (!(flags & NVOBJ_FLAG_VM) && chan) {
ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
if (ramin)
ramin = drm_mm_get_block(ramin, size, align);
@@ -208,7 +208,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
gpuobj->vinst = ramin->start + chan->ramin->vinst;
gpuobj->node = ramin;
} else {
- ret = instmem->get(gpuobj, size, align);
+ ret = instmem->get(gpuobj, chan, size, align);
if (ret) {
nouveau_gpuobj_ref(NULL, &gpuobj);
return ret;
@@ -690,35 +690,64 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
return 0;
}
+static int
+nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_gpuobj *pgd = NULL;
+ struct nouveau_vm_pgd *vpgd;
+ int ret, i;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
+ if (ret)
+ return ret;
+
+ /* create page directory for this vm if none currently exists,
+ * will be destroyed automagically when last reference to the
+ * vm is removed
+ */
+ if (list_empty(&vm->pgd_list)) {
+ ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd);
+ if (ret)
+ return ret;
+ }
+ nouveau_vm_ref(vm, &chan->vm, pgd);
+ nouveau_gpuobj_ref(NULL, &pgd);
+
+ /* point channel at vm's page directory */
+ vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
+ nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
+ nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
+ nv_wo32(chan->ramin, 0x0208, 0xffffffff);
+ nv_wo32(chan->ramin, 0x020c, 0x000000ff);
+
+ /* map display semaphore buffers into channel's vm */
+ for (i = 0; i < 2; i++) {
+ struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i];
+
+ ret = nouveau_bo_vma_add(dispc->sem.bo, chan->vm,
+ &chan->dispc_vma[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int
nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
uint32_t vram_h, uint32_t tt_h)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
+ struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
struct nouveau_gpuobj *vram = NULL, *tt = NULL;
int ret, i;
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
-
- if (dev_priv->card_type == NV_C0) {
- struct nouveau_vm *vm = dev_priv->chan_vm;
- struct nouveau_vm_pgd *vpgd;
-
- ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0,
- &chan->ramin);
- if (ret)
- return ret;
-
- nouveau_vm_ref(vm, &chan->vm, NULL);
-
- vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
- nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
- nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
- nv_wo32(chan->ramin, 0x0208, 0xffffffff);
- nv_wo32(chan->ramin, 0x020c, 0x000000ff);
- return 0;
- }
+ if (dev_priv->card_type == NV_C0)
+ return nvc0_gpuobj_channel_init(chan, vm);
/* Allocate a chunk of memory for per-channel object storage */
ret = nouveau_gpuobj_channel_init_pramin(chan);
@@ -731,7 +760,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
* - Allocate per-channel page-directory
* - Link with shared channel VM
*/
- if (dev_priv->chan_vm) {
+ if (vm) {
u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
u64 vm_vinst = chan->ramin->vinst + pgd_offs;
u32 vm_pinst = chan->ramin->pinst;
@@ -744,7 +773,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
if (ret)
return ret;
- nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);
+ nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
}
/* RAMHT */
@@ -768,7 +797,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
struct nouveau_gpuobj *sem = NULL;
struct nv50_display_crtc *dispc =
&nv50_display(dev)->crtc[i];
- u64 offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
+ u64 offset = dispc->sem.bo->bo.offset;
ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,
NV_MEM_ACCESS_RW,
@@ -841,13 +870,22 @@ void
nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
NV_DEBUG(dev, "ch%d\n", chan->id);
- nouveau_ramht_ref(NULL, &chan->ramht, chan);
+ if (dev_priv->card_type >= NV_50) {
+ struct nv50_display *disp = nv50_display(dev);
+
+ for (i = 0; i < 2; i++) {
+ struct nv50_display_crtc *dispc = &disp->crtc[i];
+ nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
+ }
- nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
- nouveau_gpuobj_ref(NULL, &chan->vm_pd);
+ nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
+ nouveau_gpuobj_ref(NULL, &chan->vm_pd);
+ }
if (drm_mm_initialized(&chan->ramin_heap))
drm_mm_takedown(&chan->ramin_heap);
@@ -909,7 +947,7 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
if (init->handle == ~0)
return -EINVAL;
- chan = nouveau_channel_get(dev, file_priv, init->channel);
+ chan = nouveau_channel_get(file_priv, init->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
@@ -936,7 +974,7 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
struct nouveau_channel *chan;
int ret;
- chan = nouveau_channel_get(dev, file_priv, objfree->channel);
+ chan = nouveau_channel_get(file_priv, objfree->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 82fad91..2706cb3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -37,8 +37,11 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
return -ENOMEM;
nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
- if (!nvbe->ttm_alloced)
+ if (!nvbe->ttm_alloced) {
+ kfree(nvbe->pages);
+ nvbe->pages = NULL;
return -ENOMEM;
+ }
nvbe->nr_pages = 0;
while (num_pages--) {
@@ -126,7 +129,7 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
- dma_offset += NV_CTXDMA_PAGE_SIZE;
+ offset_l += NV_CTXDMA_PAGE_SIZE;
}
}
@@ -429,7 +432,7 @@ nouveau_sgdma_init(struct drm_device *dev)
u32 aper_size, align;
int ret;
- if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev))
+ if (dev_priv->card_type >= NV_40 && pci_is_pcie(dev->pdev))
aper_size = 512 * 1024 * 1024;
else
aper_size = 64 * 1024 * 1024;
@@ -458,7 +461,7 @@ nouveau_sgdma_init(struct drm_device *dev)
dev_priv->gart_info.type = NOUVEAU_GART_HW;
dev_priv->gart_info.func = &nv50_sgdma_backend;
} else
- if (0 && drm_pci_device_is_pcie(dev) &&
+ if (0 && pci_is_pcie(dev->pdev) &&
dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
if (nv44_graph_class(dev)) {
dev_priv->gart_info.func = &nv44_sgdma_backend;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 731acea..10656e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -91,6 +91,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_pre = nv04_pm_clock_pre;
engine->pm.clock_set = nv04_pm_clock_set;
engine->vram.init = nouveau_mem_detect;
+ engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x10:
@@ -139,6 +140,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_pre = nv04_pm_clock_pre;
engine->pm.clock_set = nv04_pm_clock_set;
engine->vram.init = nouveau_mem_detect;
+ engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x20:
@@ -187,6 +189,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_pre = nv04_pm_clock_pre;
engine->pm.clock_set = nv04_pm_clock_set;
engine->vram.init = nouveau_mem_detect;
+ engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x30:
@@ -237,6 +240,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
engine->vram.init = nouveau_mem_detect;
+ engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x40:
@@ -289,6 +293,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.voltage_set = nouveau_voltage_gpio_set;
engine->pm.temp_get = nv40_temp_get;
engine->vram.init = nouveau_mem_detect;
+ engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x50:
@@ -366,6 +371,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
else
engine->pm.temp_get = nv40_temp_get;
engine->vram.init = nv50_vram_init;
+ engine->vram.takedown = nv50_vram_fini;
engine->vram.get = nv50_vram_new;
engine->vram.put = nv50_vram_del;
engine->vram.flags_valid = nv50_vram_flags_valid;
@@ -411,9 +417,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
engine->vram.init = nvc0_vram_init;
+ engine->vram.takedown = nv50_vram_fini;
engine->vram.get = nvc0_vram_new;
engine->vram.put = nv50_vram_del;
engine->vram.flags_valid = nvc0_vram_flags_valid;
+ engine->pm.temp_get = nv84_temp_get;
break;
default:
NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
@@ -447,8 +455,8 @@ nouveau_card_init_channel(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret;
- ret = nouveau_channel_alloc(dev, &dev_priv->channel,
- (struct drm_file *)-2, NvDmaFB, NvDmaTT);
+ ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL,
+ NvDmaFB, NvDmaTT);
if (ret)
return ret;
@@ -527,7 +535,7 @@ nouveau_card_init(struct drm_device *dev)
nouveau_pm_init(dev);
- ret = nouveau_mem_vram_init(dev);
+ ret = engine->vram.init(dev);
if (ret)
goto out_bios;
@@ -539,10 +547,14 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_gpuobj;
- ret = nouveau_mem_gart_init(dev);
+ ret = nouveau_mem_vram_init(dev);
if (ret)
goto out_instmem;
+ ret = nouveau_mem_gart_init(dev);
+ if (ret)
+ goto out_ttmvram;
+
/* PMC */
ret = engine->mc.init(dev);
if (ret)
@@ -563,7 +575,7 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_timer;
- if (!nouveau_noaccel) {
+ if (!dev_priv->noaccel) {
switch (dev_priv->card_type) {
case NV_04:
nv04_graph_create(dev);
@@ -675,14 +687,14 @@ out_vblank:
drm_vblank_cleanup(dev);
engine->display.destroy(dev);
out_fifo:
- if (!nouveau_noaccel)
+ if (!dev_priv->noaccel)
engine->fifo.takedown(dev);
out_engine:
- if (!nouveau_noaccel) {
+ if (!dev_priv->noaccel) {
for (e = e - 1; e >= 0; e--) {
if (!dev_priv->eng[e])
continue;
- dev_priv->eng[e]->fini(dev, e);
+ dev_priv->eng[e]->fini(dev, e, false);
dev_priv->eng[e]->destroy(dev,e );
}
}
@@ -696,12 +708,14 @@ out_mc:
engine->mc.takedown(dev);
out_gart:
nouveau_mem_gart_fini(dev);
+out_ttmvram:
+ nouveau_mem_vram_fini(dev);
out_instmem:
engine->instmem.takedown(dev);
out_gpuobj:
nouveau_gpuobj_takedown(dev);
out_vram:
- nouveau_mem_vram_fini(dev);
+ engine->vram.takedown(dev);
out_bios:
nouveau_pm_fini(dev);
nouveau_bios_takedown(dev);
@@ -718,16 +732,21 @@ static void nouveau_card_takedown(struct drm_device *dev)
struct nouveau_engine *engine = &dev_priv->engine;
int e;
+ drm_kms_helper_poll_fini(dev);
+ nouveau_fbcon_fini(dev);
+
if (dev_priv->channel) {
- nouveau_fence_fini(dev);
nouveau_channel_put_unlocked(&dev_priv->channel);
+ nouveau_fence_fini(dev);
}
- if (!nouveau_noaccel) {
+ engine->display.destroy(dev);
+
+ if (!dev_priv->noaccel) {
engine->fifo.takedown(dev);
for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
if (dev_priv->eng[e]) {
- dev_priv->eng[e]->fini(dev, e);
+ dev_priv->eng[e]->fini(dev, e, false);
dev_priv->eng[e]->destroy(dev,e );
}
}
@@ -748,10 +767,11 @@ static void nouveau_card_takedown(struct drm_device *dev)
ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
mutex_unlock(&dev->struct_mutex);
nouveau_mem_gart_fini(dev);
+ nouveau_mem_vram_fini(dev);
engine->instmem.takedown(dev);
nouveau_gpuobj_takedown(dev);
- nouveau_mem_vram_fini(dev);
+ engine->vram.takedown(dev);
nouveau_irq_fini(dev);
drm_vblank_cleanup(dev);
@@ -762,6 +782,41 @@ static void nouveau_card_takedown(struct drm_device *dev)
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
+int
+nouveau_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fpriv *fpriv;
+ int ret;
+
+ fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+ if (unlikely(!fpriv))
+ return -ENOMEM;
+
+ spin_lock_init(&fpriv->lock);
+ INIT_LIST_HEAD(&fpriv->channels);
+
+ if (dev_priv->card_type == NV_50) {
+ ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
+ &fpriv->vm);
+ if (ret) {
+ kfree(fpriv);
+ return ret;
+ }
+ } else
+ if (dev_priv->card_type >= NV_C0) {
+ ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
+ &fpriv->vm);
+ if (ret) {
+ kfree(fpriv);
+ return ret;
+ }
+ }
+
+ file_priv->driver_priv = fpriv;
+ return 0;
+}
+
/* here a client dies, release the stuff that was allocated for its
* file_priv */
void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
@@ -769,6 +824,14 @@ void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
nouveau_channel_cleanup(dev, file_priv);
}
+void
+nouveau_postclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
+ nouveau_vm_ref(NULL, &fpriv->vm, NULL);
+ kfree(fpriv);
+}
+
/* first module load, setup the mmio/fb mapping */
/* KMS: we need mmio at load time, not when the first drm client opens. */
int nouveau_firstopen(struct drm_device *dev)
@@ -933,6 +996,25 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
dev_priv->card_type, reg0);
+ /* Determine whether we'll attempt acceleration or not, some
+ * cards are disabled by default here due to them being known
+ * non-functional, or never been tested due to lack of hw.
+ */
+ dev_priv->noaccel = !!nouveau_noaccel;
+ if (nouveau_noaccel == -1) {
+ switch (dev_priv->chipset) {
+ case 0xc1: /* known broken */
+ case 0xc8: /* never tested */
+ NV_INFO(dev, "acceleration disabled by default, pass "
+ "noaccel=0 to force enable\n");
+ dev_priv->noaccel = true;
+ break;
+ default:
+ dev_priv->noaccel = false;
+ break;
+ }
+ }
+
ret = nouveau_remove_conflicting_drivers(dev);
if (ret)
goto err_mmio;
@@ -997,11 +1079,7 @@ void nouveau_lastclose(struct drm_device *dev)
int nouveau_unload(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
- drm_kms_helper_poll_fini(dev);
- nouveau_fbcon_fini(dev);
- engine->display.destroy(dev);
nouveau_card_takedown(dev);
iounmap(dev_priv->mmio);
@@ -1031,7 +1109,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
case NOUVEAU_GETPARAM_BUS_TYPE:
if (drm_pci_device_is_agp(dev))
getparam->value = NV_AGP;
- else if (drm_pci_device_is_pcie(dev))
+ else if (pci_is_pcie(dev->pdev))
getparam->value = NV_PCIE;
else
getparam->value = NV_PCI;
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 649b041..081ca7b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -43,7 +43,7 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
/* Set the default sensor's contants */
sensor->offset_constant = 0;
- sensor->offset_mult = 1;
+ sensor->offset_mult = 0;
sensor->offset_div = 1;
sensor->slope_mult = 1;
sensor->slope_div = 1;
@@ -99,6 +99,13 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
sensor->slope_mult = 431;
sensor->slope_div = 10000;
break;
+
+ case 0x67:
+ sensor->offset_mult = -26149;
+ sensor->offset_div = 100;
+ sensor->slope_mult = 484;
+ sensor->slope_div = 10000;
+ break;
}
}
@@ -109,7 +116,7 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
/* Read the entries from the table */
for (i = 0; i < entries; i++) {
- u16 value = ROM16(temp[1]);
+ s16 value = ROM16(temp[1]);
switch (temp[0]) {
case 0x01:
@@ -160,8 +167,8 @@ nv40_sensor_setup(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
- u32 offset = sensor->offset_mult / sensor->offset_div;
- u32 sensor_calibration;
+ s32 offset = sensor->offset_mult / sensor->offset_div;
+ s32 sensor_calibration;
/* set up the sensors */
sensor_calibration = 120 - offset - sensor->offset_constant;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 519a6b4..244fd38 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -369,23 +369,26 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
}
static void
-nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
+nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
{
struct nouveau_vm_pgd *vpgd, *tmp;
+ struct nouveau_gpuobj *pgd = NULL;
- if (!pgd)
+ if (!mpgd)
return;
mutex_lock(&vm->mm->mutex);
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
- if (vpgd->obj != pgd)
- continue;
-
- list_del(&vpgd->head);
- nouveau_gpuobj_ref(NULL, &vpgd->obj);
- kfree(vpgd);
+ if (vpgd->obj == mpgd) {
+ pgd = vpgd->obj;
+ list_del(&vpgd->head);
+ kfree(vpgd);
+ break;
+ }
}
mutex_unlock(&vm->mm->mutex);
+
+ nouveau_gpuobj_ref(NULL, &pgd);
}
static void
@@ -396,8 +399,8 @@ nouveau_vm_del(struct nouveau_vm *vm)
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
nouveau_vm_unlink(vm, vpgd->obj);
}
- WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
+ nouveau_mm_fini(&vm->mm);
kfree(vm->pgt);
kfree(vm);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index c48a9fc..579ca8c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -41,6 +41,8 @@ struct nouveau_vm_pgd {
};
struct nouveau_vma {
+ struct list_head head;
+ int refcount;
struct nouveau_vm *vm;
struct nouveau_mm_node *node;
u64 offset;
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index f1a3ae4..5e45398 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -781,11 +781,20 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
- struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
- struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+ struct drm_framebuffer *drm_fb;
+ struct nouveau_framebuffer *fb;
int arb_burst, arb_lwm;
int ret;
+ NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+
+ /* no fb bound */
+ if (!atomic && !crtc->fb) {
+ NV_DEBUG_KMS(dev, "No FB bound\n");
+ return 0;
+ }
+
+
/* If atomic, we want to switch to the fb we were passed, so
* now we update pointers to do that. (We don't pin; just
* assume we're already pinned and update the base address.)
@@ -794,6 +803,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
drm_fb = passed_fb;
fb = nouveau_framebuffer(passed_fb);
} else {
+ drm_fb = crtc->fb;
+ fb = nouveau_framebuffer(crtc->fb);
/* If not atomic, we can go ahead and pin, and unpin the
* old fb we were passed.
*/
@@ -1035,7 +1046,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
- ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
0, 0x0000, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index 3626ee7..dbdea8e 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -450,13 +450,13 @@ nv04_graph_context_del(struct nouveau_channel *chan, int engine)
unsigned long flags;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv04_graph_fifo_access(dev, false);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
/* Unload the context if it's the currently active one */
if (nv04_graph_channel(dev) == chan)
nv04_graph_unload_context(dev);
- nv04_graph_fifo_access(dev, true);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* Free the context resources */
@@ -538,24 +538,18 @@ nv04_graph_init(struct drm_device *dev, int engine)
}
static int
-nv04_graph_fini(struct drm_device *dev, int engine)
+nv04_graph_fini(struct drm_device *dev, int engine, bool suspend)
{
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+ if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ return -EBUSY;
+ }
nv04_graph_unload_context(dev);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
return 0;
}
-void
-nv04_graph_fifo_access(struct drm_device *dev, bool enabled)
-{
- if (enabled)
- nv_wr32(dev, NV04_PGRAPH_FIFO,
- nv_rd32(dev, NV04_PGRAPH_FIFO) | 1);
- else
- nv_wr32(dev, NV04_PGRAPH_FIFO,
- nv_rd32(dev, NV04_PGRAPH_FIFO) & ~1);
-}
-
static int
nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
u32 class, u32 mthd, u32 data)
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index b8611b9..c1248e0 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -28,6 +28,31 @@ int nv04_instmem_init(struct drm_device *dev)
/* RAMIN always available */
dev_priv->ramin_available = true;
+ /* Reserve space at end of VRAM for PRAMIN */
+ if (dev_priv->card_type >= NV_40) {
+ u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
+ u32 rsvd;
+
+ /* estimate grctx size, the magics come from nv40_grctx.c */
+ if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
+ else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
+ else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
+ else rsvd = 0x4a40 * vs;
+ rsvd += 16 * 1024;
+ rsvd *= dev_priv->engine.fifo.channels;
+
+ /* pciegart table */
+ if (pci_is_pcie(dev->pdev))
+ rsvd += 512 * 1024;
+
+ /* object storage */
+ rsvd += 512 * 1024;
+
+ dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
+ } else {
+ dev_priv->ramin_rsvd_vram = 512 * 1024;
+ }
+
/* Setup shared RAMHT */
ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096,
NVOBJ_FLAG_ZERO_ALLOC, &ramht);
@@ -112,7 +137,8 @@ nv04_instmem_resume(struct drm_device *dev)
}
int
-nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
+nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
+ u32 size, u32 align)
{
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
struct drm_mm_node *ramin = NULL;
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 0930c6c..7255e4a 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -708,8 +708,8 @@ static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c);
nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
- nv04_graph_fifo_access(dev, true);
- nv04_graph_fifo_access(dev, false);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
/* Restore the FIFO state */
for (i = 0; i < ARRAY_SIZE(fifo); i++)
@@ -879,13 +879,13 @@ nv10_graph_context_del(struct nouveau_channel *chan, int engine)
unsigned long flags;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv04_graph_fifo_access(dev, false);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
/* Unload the context if it's the currently active one */
if (nv10_graph_channel(dev) == chan)
nv10_graph_unload_context(dev);
- nv04_graph_fifo_access(dev, true);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* Free the context resources */
@@ -957,8 +957,13 @@ nv10_graph_init(struct drm_device *dev, int engine)
}
static int
-nv10_graph_fini(struct drm_device *dev, int engine)
+nv10_graph_fini(struct drm_device *dev, int engine, bool suspend)
{
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+ if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ return -EBUSY;
+ }
nv10_graph_unload_context(dev);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index affc7d7..183e375 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -454,13 +454,13 @@ nv20_graph_context_del(struct nouveau_channel *chan, int engine)
unsigned long flags;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv04_graph_fifo_access(dev, false);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
/* Unload the context if it's the currently active one */
if (nv10_graph_channel(dev) == chan)
nv20_graph_unload_context(dev);
- nv04_graph_fifo_access(dev, true);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* Free the context resources */
@@ -654,8 +654,13 @@ nv30_graph_init(struct drm_device *dev, int engine)
}
int
-nv20_graph_fini(struct drm_device *dev, int engine)
+nv20_graph_fini(struct drm_device *dev, int engine, bool suspend)
{
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+ if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ return -EBUSY;
+ }
nv20_graph_unload_context(dev);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
return 0;
@@ -753,6 +758,7 @@ nv20_graph_create(struct drm_device *dev)
break;
default:
NV_ERROR(dev, "PGRAPH: unknown chipset\n");
+ kfree(pgraph);
return 0;
}
} else {
@@ -774,6 +780,7 @@ nv20_graph_create(struct drm_device *dev)
break;
default:
NV_ERROR(dev, "PGRAPH: unknown chipset\n");
+ kfree(pgraph);
return 0;
}
}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 5beb01b..ba14a93 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -35,89 +35,6 @@ struct nv40_graph_engine {
u32 grctx_size;
};
-static struct nouveau_channel *
-nv40_graph_channel(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *grctx;
- uint32_t inst;
- int i;
-
- inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
- if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
- return NULL;
- inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
-
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (!dev_priv->channels.ptr[i])
- continue;
-
- grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
- if (grctx && grctx->pinst == inst)
- return dev_priv->channels.ptr[i];
- }
-
- return NULL;
-}
-
-static int
-nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
-{
- uint32_t old_cp, tv = 1000, tmp;
- int i;
-
- old_cp = nv_rd32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER);
- nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
-
- tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0310);
- tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
- NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_0310, tmp);
-
- tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0304);
- tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_0304, tmp);
-
- nouveau_wait_for_idle(dev);
-
- for (i = 0; i < tv; i++) {
- if (nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C) == 0)
- break;
- }
-
- nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
-
- if (i == tv) {
- uint32_t ucstat = nv_rd32(dev, NV40_PGRAPH_CTXCTL_UCODE_STAT);
- NV_ERROR(dev, "Failed: Instance=0x%08x Save=%d\n", inst, save);
- NV_ERROR(dev, "IP: 0x%02x, Opcode: 0x%08x\n",
- ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
- ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
- NV_ERROR(dev, "0x40030C = 0x%08x\n",
- nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C));
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int
-nv40_graph_unload_context(struct drm_device *dev)
-{
- uint32_t inst;
- int ret;
-
- inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
- if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
- return 0;
- inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
-
- ret = nv40_graph_transfer_context(dev, inst, 1);
-
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst);
- return ret;
-}
-
static int
nv40_graph_context_new(struct nouveau_channel *chan, int engine)
{
@@ -163,16 +80,16 @@ nv40_graph_context_del(struct nouveau_channel *chan, int engine)
struct nouveau_gpuobj *grctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 inst = 0x01000000 | (grctx->pinst >> 4);
unsigned long flags;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv04_graph_fifo_access(dev, false);
-
- /* Unload the context if it's the currently active one */
- if (nv40_graph_channel(dev) == chan)
- nv40_graph_unload_context(dev);
-
- nv04_graph_fifo_access(dev, true);
+ nv_mask(dev, 0x400720, 0x00000000, 0x00000001);
+ if (nv_rd32(dev, 0x40032c) == inst)
+ nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
+ if (nv_rd32(dev, 0x400330) == inst)
+ nv_mask(dev, 0x400330, 0x01000000, 0x00000000);
+ nv_mask(dev, 0x400720, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* Free the context resources */
@@ -429,9 +346,20 @@ nv40_graph_init(struct drm_device *dev, int engine)
}
static int
-nv40_graph_fini(struct drm_device *dev, int engine)
+nv40_graph_fini(struct drm_device *dev, int engine, bool suspend)
{
- nv40_graph_unload_context(dev);
+ u32 inst = nv_rd32(dev, 0x40032c);
+ if (inst & 0x01000000) {
+ nv_wr32(dev, 0x400720, 0x00000000);
+ nv_wr32(dev, 0x400784, inst);
+ nv_mask(dev, 0x400310, 0x00000020, 0x00000020);
+ nv_mask(dev, 0x400304, 0x00000001, 0x00000001);
+ if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) {
+ u32 insn = nv_rd32(dev, 0x400308);
+ NV_ERROR(dev, "PGRAPH: ctxprog timeout 0x%08x\n", insn);
+ }
+ nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv40_mpeg.c b/drivers/gpu/drm/nouveau/nv40_mpeg.c
index 6d2af29..ad03a0e 100644
--- a/drivers/gpu/drm/nouveau/nv40_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv40_mpeg.c
@@ -137,7 +137,7 @@ nv40_mpeg_init(struct drm_device *dev, int engine)
}
static int
-nv40_mpeg_fini(struct drm_device *dev, int engine)
+nv40_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
{
/*XXX: context save? */
nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index ebabacf..5d98907 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -104,7 +104,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
OUT_RING(evo, nv_crtc->lut.depth == 8 ?
NV50_EVO_CRTC_CLUT_MODE_OFF :
NV50_EVO_CRTC_CLUT_MODE_ON);
- OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.start << PAGE_SHIFT) >> 8);
+ OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
if (dev_priv->chipset != 0x50) {
BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
OUT_RING(evo, NvEvoVRAM);
@@ -372,7 +372,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
nouveau_bo_unmap(cursor);
- nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT);
+ nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
nv_crtc->cursor.show(nv_crtc, true);
out:
@@ -519,12 +519,18 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
struct drm_device *dev = nv_crtc->base.dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *evo = nv50_display(dev)->master;
- struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
- struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+ struct drm_framebuffer *drm_fb;
+ struct nouveau_framebuffer *fb;
int ret;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+ /* no fb bound */
+ if (!atomic && !crtc->fb) {
+ NV_DEBUG_KMS(dev, "No FB bound\n");
+ return 0;
+ }
+
/* If atomic, we want to switch to the fb we were passed, so
* now we update pointers to do that. (We don't pin; just
* assume we're already pinned and update the base address.)
@@ -533,6 +539,8 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
drm_fb = passed_fb;
fb = nouveau_framebuffer(passed_fb);
} else {
+ drm_fb = crtc->fb;
+ fb = nouveau_framebuffer(crtc->fb);
/* If not atomic, we can go ahead and pin, and unpin the
* old fb we were passed.
*/
@@ -546,7 +554,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
}
}
- nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT;
+ nv_crtc->fb.offset = fb->nvbo->bo.offset;
nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
@@ -747,7 +755,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
}
nv_crtc->lut.depth = 0;
- ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
0, 0x0000, &nv_crtc->lut.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
@@ -773,7 +781,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
- ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
0, 0x0000, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 08da478..db1a5f4 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -415,8 +415,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* synchronise with the rendering channel, if necessary */
if (likely(chan)) {
- u64 offset = dispc->sem.bo->vma.offset + dispc->sem.offset;
-
ret = RING_SPACE(chan, 10);
if (ret) {
WIND_RING(evo);
@@ -438,6 +436,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
else
OUT_RING (chan, chan->vram_handle);
} else {
+ u64 offset = chan->dispc_vma[nv_crtc->index].offset;
+ offset += dispc->sem.offset;
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
@@ -484,7 +484,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
OUT_RING (evo, 0x00000000);
OUT_RING (evo, 0x00000000);
BEGIN_RING(evo, 0, 0x0800, 5);
- OUT_RING (evo, (nv_fb->nvbo->bo.mem.start << PAGE_SHIFT) >> 8);
+ OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8);
OUT_RING (evo, 0);
OUT_RING (evo, (fb->height << 16) | fb->width);
OUT_RING (evo, nv_fb->r_pitch);
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index c8e83c1..c99d975 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -38,6 +38,7 @@ nv50_evo_channel_del(struct nouveau_channel **pevo)
return;
*pevo = NULL;
+ nouveau_ramht_ref(NULL, &evo->ramht, evo);
nouveau_gpuobj_channel_takedown(evo);
nouveau_bo_unmap(evo->pushbuf_bo);
nouveau_bo_ref(NULL, &evo->pushbuf_bo);
@@ -116,7 +117,7 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
evo->user_get = 4;
evo->user_put = 0;
- ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
+ ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
&evo->pushbuf_bo);
if (ret == 0)
ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
@@ -153,7 +154,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo)
{
struct drm_device *dev = evo->dev;
int id = evo->id, ret, i;
- u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT;
+ u64 pushbuf = evo->pushbuf_bo->bo.offset;
u32 tmp;
tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
@@ -331,16 +332,15 @@ nv50_evo_create(struct drm_device *dev)
if (ret)
goto err;
- ret = nouveau_bo_new(dev, NULL, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
0, 0x0000, &dispc->sem.bo);
if (!ret) {
- offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
-
ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
if (!ret)
ret = nouveau_bo_map(dispc->sem.bo);
if (ret)
nouveau_bo_ref(NULL, &dispc->sem.bo);
+ offset = dispc->sem.bo->bo.offset;
}
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 791ded1..dc75a72 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -159,7 +159,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
- struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
+ struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
int ret, format;
switch (info->var.bits_per_pixel) {
@@ -247,8 +247,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
- OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
+ OUT_RING(chan, upper_32_bits(fb->vma.offset));
+ OUT_RING(chan, lower_32_bits(fb->vma.offset));
BEGIN_RING(chan, NvSub2D, 0x0230, 2);
OUT_RING(chan, format);
OUT_RING(chan, 1);
@@ -256,8 +256,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
- OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
+ OUT_RING(chan, upper_32_bits(fb->vma.offset));
+ OUT_RING(chan, lower_32_bits(fb->vma.offset));
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 40680f2b..d43c46c 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -124,7 +124,6 @@ static void
nv50_graph_init_reset(struct drm_device *dev)
{
uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
-
NV_DEBUG(dev, "\n");
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
@@ -254,9 +253,13 @@ nv50_graph_init(struct drm_device *dev, int engine)
}
static int
-nv50_graph_fini(struct drm_device *dev, int engine)
+nv50_graph_fini(struct drm_device *dev, int engine, bool suspend)
{
- NV_DEBUG(dev, "\n");
+ nv_mask(dev, 0x400500, 0x00010001, 0x00000000);
+ if (!nv_wait(dev, 0x400700, ~0, 0) && suspend) {
+ nv_mask(dev, 0x400500, 0x00010001, 0x00010001);
+ return -EBUSY;
+ }
nv50_graph_unload_context(dev);
nv_wr32(dev, 0x40013c, 0x00000000);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 4f95a1e..a7c12c9 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -305,9 +305,9 @@ struct nv50_gpuobj_node {
u32 align;
};
-
int
-nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
+nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
+ u32 size, u32 align)
{
struct drm_device *dev = gpuobj->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -336,7 +336,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
flags |= NV_MEM_ACCESS_SYS;
- ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, flags,
+ ret = nouveau_vm_get(chan->vm, size, 12, flags,
&node->chan_vma);
if (ret) {
vram->put(dev, &node->vram);
@@ -345,7 +345,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
}
nouveau_vm_map(&node->chan_vma, node->vram);
- gpuobj->vinst = node->chan_vma.offset;
+ gpuobj->linst = node->chan_vma.offset;
}
gpuobj->size = size;
diff --git a/drivers/gpu/drm/nouveau/nv50_mpeg.c b/drivers/gpu/drm/nouveau/nv50_mpeg.c
index 1dc5913..b57a2d1 100644
--- a/drivers/gpu/drm/nouveau/nv50_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv50_mpeg.c
@@ -160,7 +160,7 @@ nv50_mpeg_init(struct drm_device *dev, int engine)
}
static int
-nv50_mpeg_fini(struct drm_device *dev, int engine)
+nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
{
/*XXX: context save for s/r */
nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index c25c5938..ffe8b48 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -318,6 +318,8 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_entry *entry)
uint32_t tmp;
tmp = nv_rd32(dev, 0x61c700 + (or * 0x800));
+ if (!tmp)
+ tmp = nv_rd32(dev, 0x610798 + (or * 8));
switch ((tmp & 0x00000f00) >> 8) {
case 8:
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 1a0dd49..40b84f2 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -156,7 +156,7 @@ nv50_vm_flush(struct nouveau_vm *vm)
pinstmem->flush(vm->dev);
/* BAR */
- if (vm != dev_priv->chan_vm) {
+ if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) {
nv50_vm_flush_engine(vm->dev, 6);
return;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index ffbc3d8..af32dae 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -51,9 +51,7 @@ void
nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
- struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
- struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm *mm = dev_priv->engine.vram.mm;
struct nouveau_mm_node *this;
struct nouveau_mem *mem;
@@ -84,9 +82,7 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
u32 memtype, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
- struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
- struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm *mm = dev_priv->engine.vram.mm;
struct nouveau_mm_node *r;
struct nouveau_mem *mem;
int comp = (memtype & 0x300) >> 8;
@@ -190,22 +186,35 @@ int
nv50_vram_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+ u32 rblock, length;
dev_priv->vram_size = nv_rd32(dev, 0x10020c);
dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
dev_priv->vram_size &= 0xffffffff00ULL;
- switch (dev_priv->chipset) {
- case 0xaa:
- case 0xac:
- case 0xaf:
+ /* IGPs, no funky reordering happens here, they don't have VRAM */
+ if (dev_priv->chipset == 0xaa ||
+ dev_priv->chipset == 0xac ||
+ dev_priv->chipset == 0xaf) {
dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
- dev_priv->vram_rblock_size = 4096;
- break;
- default:
- dev_priv->vram_rblock_size = nv50_vram_rblock(dev);
- break;
+ rblock = 4096 >> 12;
+ } else {
+ rblock = nv50_vram_rblock(dev) >> 12;
}
- return 0;
+ length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
+
+ return nouveau_mm_init(&vram->mm, rsvd_head, length, rblock);
+}
+
+void
+nv50_vram_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+
+ nouveau_mm_fini(&vram->mm);
}
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
index 75b809a..edece9c 100644
--- a/drivers/gpu/drm/nouveau/nv84_crypt.c
+++ b/drivers/gpu/drm/nouveau/nv84_crypt.c
@@ -138,7 +138,7 @@ nv84_crypt_isr(struct drm_device *dev)
}
static int
-nv84_crypt_fini(struct drm_device *dev, int engine)
+nv84_crypt_fini(struct drm_device *dev, int engine, bool suspend)
{
nv_wr32(dev, 0x102140, 0x00000000);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.c b/drivers/gpu/drm/nouveau/nva3_copy.c
index b86820a..8f356d5 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.c
+++ b/drivers/gpu/drm/nouveau/nva3_copy.c
@@ -140,7 +140,7 @@ nva3_copy_init(struct drm_device *dev, int engine)
}
static int
-nva3_copy_fini(struct drm_device *dev, int engine)
+nva3_copy_fini(struct drm_device *dev, int engine, bool suspend)
{
nv_mask(dev, 0x104048, 0x00000003, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.c b/drivers/gpu/drm/nouveau/nvc0_copy.c
index 208fa7a..dddf006 100644
--- a/drivers/gpu/drm/nouveau/nvc0_copy.c
+++ b/drivers/gpu/drm/nouveau/nvc0_copy.c
@@ -48,14 +48,14 @@ nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
struct nouveau_gpuobj *ctx = NULL;
int ret;
- ret = nouveau_gpuobj_new(dev, NULL, 256, 256,
+ ret = nouveau_gpuobj_new(dev, chan, 256, 256,
NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER |
NVOBJ_FLAG_ZERO_ALLOC, &ctx);
if (ret)
return ret;
- nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->vinst));
- nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->vinst));
+ nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->linst));
+ nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->linst));
dev_priv->engine.instmem.flush(dev);
chan->engctx[engine] = ctx;
@@ -127,7 +127,7 @@ nvc0_copy_init(struct drm_device *dev, int engine)
}
static int
-nvc0_copy_fini(struct drm_device *dev, int engine)
+nvc0_copy_fini(struct drm_device *dev, int engine, bool suspend)
{
struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c
index 26a9960..08e6b11 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fb.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fb.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010 Red Hat Inc.
+ * Copyright 2011 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -23,16 +23,80 @@
*/
#include "drmP.h"
-
+#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+struct nvc0_fb_priv {
+ struct page *r100c10_page;
+ dma_addr_t r100c10;
+};
+
+static void
+nvc0_fb_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nvc0_fb_priv *priv = pfb->priv;
+
+ if (priv->r100c10_page) {
+ pci_unmap_page(dev->pdev, priv->r100c10, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_page(priv->r100c10_page);
+ }
+
+ kfree(priv);
+ pfb->priv = NULL;
+}
+
+static int
+nvc0_fb_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nvc0_fb_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ pfb->priv = priv;
+
+ priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!priv->r100c10_page) {
+ nvc0_fb_destroy(dev);
+ return -ENOMEM;
+ }
+
+ priv->r100c10 = pci_map_page(dev->pdev, priv->r100c10_page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, priv->r100c10)) {
+ nvc0_fb_destroy(dev);
+ return -EFAULT;
+ }
+
+ return 0;
+}
int
nvc0_fb_init(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_fb_priv *priv;
+ int ret;
+
+ if (!dev_priv->engine.fb.priv) {
+ ret = nvc0_fb_create(dev);
+ if (ret)
+ return ret;
+ }
+ priv = dev_priv->engine.fb.priv;
+
+ nv_wr32(dev, 0x100c10, priv->r100c10 >> 8);
return 0;
}
void
nvc0_fb_takedown(struct drm_device *dev)
{
+ nvc0_fb_destroy(dev);
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index fa5d4c2..a495e48 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -159,7 +159,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
- struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
+ struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
int ret, format;
ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d);
@@ -203,8 +203,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1);
OUT_RING (chan, 0x0000902d);
BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2);
- OUT_RING (chan, upper_32_bits(chan->notifier_bo->bo.offset));
- OUT_RING (chan, lower_32_bits(chan->notifier_bo->bo.offset));
+ OUT_RING (chan, upper_32_bits(chan->notifier_vma.offset));
+ OUT_RING (chan, lower_32_bits(chan->notifier_vma.offset));
BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1);
OUT_RING (chan, 0);
BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1);
@@ -249,8 +249,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->fix.line_length);
OUT_RING (chan, info->var.xres_virtual);
OUT_RING (chan, info->var.yres_virtual);
- OUT_RING (chan, upper_32_bits(nvbo->vma.offset));
- OUT_RING (chan, lower_32_bits(nvbo->vma.offset));
+ OUT_RING (chan, upper_32_bits(fb->vma.offset));
+ OUT_RING (chan, lower_32_bits(fb->vma.offset));
BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10);
OUT_RING (chan, format);
OUT_RING (chan, 1);
@@ -260,8 +260,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->fix.line_length);
OUT_RING (chan, info->var.xres_virtual);
OUT_RING (chan, info->var.yres_virtual);
- OUT_RING (chan, upper_32_bits(nvbo->vma.offset));
- OUT_RING (chan, lower_32_bits(nvbo->vma.offset));
+ OUT_RING (chan, upper_32_bits(fb->vma.offset));
+ OUT_RING (chan, lower_32_bits(fb->vma.offset));
FIRE_RING (chan);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index fb4f594..6f9f341 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -210,10 +210,10 @@ nvc0_fifo_unload_context(struct drm_device *dev)
int i;
for (i = 0; i < 128; i++) {
- if (!(nv_rd32(dev, 0x003004 + (i * 4)) & 1))
+ if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
continue;
- nv_mask(dev, 0x003004 + (i * 4), 0x00000001, 0x00000000);
+ nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
nv_wr32(dev, 0x002634, i);
if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index ca6db20..5b2f6f4 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -28,7 +28,34 @@
#include "nouveau_drv.h"
#include "nouveau_mm.h"
+
#include "nvc0_graph.h"
+#include "nvc0_grhub.fuc.h"
+#include "nvc0_grgpc.fuc.h"
+
+static void
+nvc0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
+{
+ NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
+ nv_rd32(dev, base + 0x400));
+ NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
+ nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
+ nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
+ NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
+ nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
+ nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
+}
+
+static void
+nvc0_graph_ctxctl_debug(struct drm_device *dev)
+{
+ u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
+ u32 gpc;
+
+ nvc0_graph_ctxctl_debug_unit(dev, 0x409000);
+ for (gpc = 0; gpc < gpcnr; gpc++)
+ nvc0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
+}
static int
nvc0_graph_load_context(struct nouveau_channel *chan)
@@ -72,24 +99,44 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
if (!ctx)
return -ENOMEM;
- nvc0_graph_load_context(chan);
-
- nv_wo32(grch->grctx, 0x1c, 1);
- nv_wo32(grch->grctx, 0x20, 0);
- nv_wo32(grch->grctx, 0x28, 0);
- nv_wo32(grch->grctx, 0x2c, 0);
- dev_priv->engine.instmem.flush(dev);
-
- ret = nvc0_grctx_generate(chan);
- if (ret) {
- kfree(ctx);
- return ret;
+ if (!nouveau_ctxfw) {
+ nv_wr32(dev, 0x409840, 0x80000000);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
+ nv_wr32(dev, 0x409504, 0x00000001);
+ if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
+ NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n");
+ nvc0_graph_ctxctl_debug(dev);
+ ret = -EBUSY;
+ goto err;
+ }
+ } else {
+ nvc0_graph_load_context(chan);
+
+ nv_wo32(grch->grctx, 0x1c, 1);
+ nv_wo32(grch->grctx, 0x20, 0);
+ nv_wo32(grch->grctx, 0x28, 0);
+ nv_wo32(grch->grctx, 0x2c, 0);
+ dev_priv->engine.instmem.flush(dev);
}
- ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
- if (ret) {
- kfree(ctx);
- return ret;
+ ret = nvc0_grctx_generate(chan);
+ if (ret)
+ goto err;
+
+ if (!nouveau_ctxfw) {
+ nv_wr32(dev, 0x409840, 0x80000000);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
+ nv_wr32(dev, 0x409504, 0x00000002);
+ if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
+ NV_ERROR(dev, "PGRAPH: HUB_CTX_SAVE timeout\n");
+ nvc0_graph_ctxctl_debug(dev);
+ ret = -EBUSY;
+ goto err;
+ }
+ } else {
+ ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
+ if (ret)
+ goto err;
}
for (i = 0; i < priv->grctx_size; i += 4)
@@ -97,6 +144,10 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
priv->grctx_vals = ctx;
return 0;
+
+err:
+ kfree(ctx);
+ return ret;
}
static int
@@ -108,50 +159,50 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
int i = 0, gpc, tp, ret;
u32 magic;
- ret = nouveau_gpuobj_new(dev, NULL, 0x2000, 256, NVOBJ_FLAG_VM,
+ ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM,
&grch->unk408004);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 256, NVOBJ_FLAG_VM,
+ ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
&grch->unk40800c);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096,
+ ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
&grch->unk418810);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0, NVOBJ_FLAG_VM,
+ ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
&grch->mmio);
if (ret)
return ret;
nv_wo32(grch->mmio, i++ * 4, 0x00408004);
- nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
nv_wo32(grch->mmio, i++ * 4, 0x00408008);
nv_wo32(grch->mmio, i++ * 4, 0x80000018);
nv_wo32(grch->mmio, i++ * 4, 0x0040800c);
- nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
nv_wo32(grch->mmio, i++ * 4, 0x00408010);
nv_wo32(grch->mmio, i++ * 4, 0x80000000);
nv_wo32(grch->mmio, i++ * 4, 0x00418810);
- nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->vinst >> 12);
+ nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->linst >> 12);
nv_wo32(grch->mmio, i++ * 4, 0x00419848);
- nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->vinst >> 12);
+ nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->linst >> 12);
nv_wo32(grch->mmio, i++ * 4, 0x00419004);
- nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
nv_wo32(grch->mmio, i++ * 4, 0x00419008);
nv_wo32(grch->mmio, i++ * 4, 0x00000000);
nv_wo32(grch->mmio, i++ * 4, 0x00418808);
- nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
nv_wo32(grch->mmio, i++ * 4, 0x80000018);
@@ -159,7 +210,7 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
nv_wo32(grch->mmio, i++ * 4, 0x00405830);
nv_wo32(grch->mmio, i++ * 4, magic);
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x02fc) {
+ for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x0324) {
u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800);
nv_wo32(grch->mmio, i++ * 4, reg);
nv_wo32(grch->mmio, i++ * 4, magic);
@@ -186,7 +237,7 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
return -ENOMEM;
chan->engctx[NVOBJ_ENGINE_GR] = grch;
- ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256,
+ ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
&grch->grctx);
if (ret)
@@ -197,8 +248,8 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
if (ret)
goto error;
- nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->vinst) | 4);
- nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->vinst));
+ nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
+ nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
pinstmem->flush(dev);
if (!priv->grctx_vals) {
@@ -210,15 +261,20 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
for (i = 0; i < priv->grctx_size; i += 4)
nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
- nv_wo32(grctx, 0xf4, 0);
- nv_wo32(grctx, 0xf8, 0);
- nv_wo32(grctx, 0x10, grch->mmio_nr);
- nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst));
- nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst));
- nv_wo32(grctx, 0x1c, 1);
- nv_wo32(grctx, 0x20, 0);
- nv_wo32(grctx, 0x28, 0);
- nv_wo32(grctx, 0x2c, 0);
+ if (!nouveau_ctxfw) {
+ nv_wo32(grctx, 0x00, grch->mmio_nr);
+ nv_wo32(grctx, 0x04, grch->mmio->linst >> 8);
+ } else {
+ nv_wo32(grctx, 0xf4, 0);
+ nv_wo32(grctx, 0xf8, 0);
+ nv_wo32(grctx, 0x10, grch->mmio_nr);
+ nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
+ nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
+ nv_wo32(grctx, 0x1c, 1);
+ nv_wo32(grctx, 0x20, 0);
+ nv_wo32(grctx, 0x28, 0);
+ nv_wo32(grctx, 0x2c, 0);
+ }
pinstmem->flush(dev);
return 0;
@@ -248,7 +304,7 @@ nvc0_graph_object_new(struct nouveau_channel *chan, int engine,
}
static int
-nvc0_graph_fini(struct drm_device *dev, int engine)
+nvc0_graph_fini(struct drm_device *dev, int engine, bool suspend)
{
return 0;
}
@@ -296,6 +352,7 @@ static void
nvc0_graph_init_gpc_0(struct drm_device *dev)
{
struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tp_total);
u32 data[TP_MAX / 8];
u8 tpnr[GPC_MAX];
int i, gpc, tpc;
@@ -307,13 +364,6 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
* 465: 3/4/4/0 4 7
* 470: 3/3/4/4 5 5
* 480: 3/4/4/4 6 6
- *
- * magicgpc918
- * 450: 00200000 00000000001000000000000000000000
- * 460: 00124925 00000000000100100100100100100101
- * 465: 000ba2e9 00000000000010111010001011101001
- * 470: 00092493 00000000000010010010010010010011
- * 480: 00088889 00000000000010001000100010001001
*/
memset(data, 0x00, sizeof(data));
@@ -336,10 +386,10 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
priv->tp_nr[gpc]);
nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0918), priv->magicgpc918);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
}
- nv_wr32(dev, GPC_BCAST(0x1bd4), priv->magicgpc918);
+ nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr);
}
@@ -419,8 +469,51 @@ nvc0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
static int
nvc0_graph_init_ctxctl(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
u32 r000260;
+ int i;
+
+ if (!nouveau_ctxfw) {
+ /* load HUB microcode */
+ r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x4091c0, 0x01000000);
+ for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++)
+ nv_wr32(dev, 0x4091c4, nvc0_grhub_data[i]);
+
+ nv_wr32(dev, 0x409180, 0x01000000);
+ for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(dev, 0x409188, i >> 6);
+ nv_wr32(dev, 0x409184, nvc0_grhub_code[i]);
+ }
+
+ /* load GPC microcode */
+ nv_wr32(dev, 0x41a1c0, 0x01000000);
+ for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++)
+ nv_wr32(dev, 0x41a1c4, nvc0_grgpc_data[i]);
+
+ nv_wr32(dev, 0x41a180, 0x01000000);
+ for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(dev, 0x41a188, i >> 6);
+ nv_wr32(dev, 0x41a184, nvc0_grgpc_code[i]);
+ }
+ nv_wr32(dev, 0x000260, r000260);
+
+ /* start HUB ucode running, it'll init the GPCs */
+ nv_wr32(dev, 0x409800, dev_priv->chipset);
+ nv_wr32(dev, 0x40910c, 0x00000000);
+ nv_wr32(dev, 0x409100, 0x00000002);
+ if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
+ NV_ERROR(dev, "PGRAPH: HUB_INIT timed out\n");
+ nvc0_graph_ctxctl_debug(dev);
+ return -EBUSY;
+ }
+
+ priv->grctx_size = nv_rd32(dev, 0x409804);
+ return 0;
+ }
/* load fuc microcode */
r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
@@ -528,6 +621,22 @@ nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
}
static void
+nvc0_graph_ctxctl_isr(struct drm_device *dev)
+{
+ u32 ustat = nv_rd32(dev, 0x409c18);
+
+ if (ustat & 0x00000001)
+ NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
+ if (ustat & 0x00080000)
+ NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
+ if (ustat & ~0x00080001)
+ NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
+
+ nvc0_graph_ctxctl_debug(dev);
+ nv_wr32(dev, 0x409c20, ustat);
+}
+
+static void
nvc0_graph_isr(struct drm_device *dev)
{
u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
@@ -578,11 +687,7 @@ nvc0_graph_isr(struct drm_device *dev)
}
if (stat & 0x00080000) {
- u32 ustat = nv_rd32(dev, 0x409c18);
-
- NV_INFO(dev, "PGRAPH: CTXCTRL ustat 0x%08x\n", ustat);
-
- nv_wr32(dev, 0x409c20, ustat);
+ nvc0_graph_ctxctl_isr(dev);
nv_wr32(dev, 0x400100, 0x00080000);
stat &= ~0x00080000;
}
@@ -606,7 +711,7 @@ nvc0_runk140_isr(struct drm_device *dev)
u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0);
u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0);
- NV_INFO(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1);
+ NV_DEBUG(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1);
units &= ~(1 << unit);
}
}
@@ -651,10 +756,12 @@ nvc0_graph_destroy(struct drm_device *dev, int engine)
{
struct nvc0_graph_priv *priv = nv_engine(dev, engine);
- nvc0_graph_destroy_fw(&priv->fuc409c);
- nvc0_graph_destroy_fw(&priv->fuc409d);
- nvc0_graph_destroy_fw(&priv->fuc41ac);
- nvc0_graph_destroy_fw(&priv->fuc41ad);
+ if (nouveau_ctxfw) {
+ nvc0_graph_destroy_fw(&priv->fuc409c);
+ nvc0_graph_destroy_fw(&priv->fuc409d);
+ nvc0_graph_destroy_fw(&priv->fuc41ac);
+ nvc0_graph_destroy_fw(&priv->fuc41ad);
+ }
nouveau_irq_unregister(dev, 12);
nouveau_irq_unregister(dev, 25);
@@ -675,13 +782,10 @@ nvc0_graph_create(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvc0_graph_priv *priv;
int ret, gpc, i;
+ u32 fermi;
- switch (dev_priv->chipset) {
- case 0xc0:
- case 0xc3:
- case 0xc4:
- break;
- default:
+ fermi = nvc0_graph_class(dev);
+ if (!fermi) {
NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
return 0;
}
@@ -701,15 +805,17 @@ nvc0_graph_create(struct drm_device *dev)
nouveau_irq_register(dev, 12, nvc0_graph_isr);
nouveau_irq_register(dev, 25, nvc0_runk140_isr);
- if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
- nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
- nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
- nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
- ret = 0;
- goto error;
+ if (nouveau_ctxfw) {
+ NV_INFO(dev, "PGRAPH: using external firmware\n");
+ if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
+ nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
+ nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
+ nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
+ ret = 0;
+ goto error;
+ }
}
-
ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
if (ret)
goto error;
@@ -735,25 +841,28 @@ nvc0_graph_create(struct drm_device *dev)
case 0xc0:
if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
priv->magic_not_rop_nr = 0x07;
- /* filled values up to tp_total, the rest 0 */
- priv->magicgpc918 = 0x000ba2e9;
} else
if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
priv->magic_not_rop_nr = 0x05;
- priv->magicgpc918 = 0x00092493;
} else
if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
priv->magic_not_rop_nr = 0x06;
- priv->magicgpc918 = 0x00088889;
}
break;
case 0xc3: /* 450, 4/0/0/0, 2 */
priv->magic_not_rop_nr = 0x03;
- priv->magicgpc918 = 0x00200000;
break;
case 0xc4: /* 460, 3/4/0/0, 4 */
priv->magic_not_rop_nr = 0x01;
- priv->magicgpc918 = 0x00124925;
+ break;
+ case 0xc1: /* 2/0/0/0, 1 */
+ priv->magic_not_rop_nr = 0x01;
+ break;
+ case 0xc8: /* 4/4/3/4, 5 */
+ priv->magic_not_rop_nr = 0x06;
+ break;
+ case 0xce: /* 4/4/0/0, 4 */
+ priv->magic_not_rop_nr = 0x03;
break;
}
@@ -763,13 +872,16 @@ nvc0_graph_create(struct drm_device *dev)
priv->tp_nr[3], priv->rop_nr);
/* use 0xc3's values... */
priv->magic_not_rop_nr = 0x03;
- priv->magicgpc918 = 0x00200000;
}
NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip);
NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
+ if (fermi >= 0x9197)
+ NVOBJ_CLASS(dev, 0x9197, GR); /* 3D (NVC1-) */
+ if (fermi >= 0x9297)
+ NVOBJ_CLASS(dev, 0x9297, GR); /* 3D (NVC8-) */
NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.fuc b/drivers/gpu/drm/nouveau/nvc0_graph.fuc
new file mode 100644
index 0000000..2a4b6dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.fuc
@@ -0,0 +1,400 @@
+/* fuc microcode util functions for nvc0 PGRAPH
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+define(`mmctx_data', `.b32 eval((($2 - 1) << 26) | $1)')
+define(`queue_init', `.skip eval((2 * 4) + ((8 * 4) * 2))')
+
+ifdef(`include_code', `
+// Error codes
+define(`E_BAD_COMMAND', 0x01)
+define(`E_CMD_OVERFLOW', 0x02)
+
+// Util macros to help with debugging ucode hangs etc
+define(`T_WAIT', 0)
+define(`T_MMCTX', 1)
+define(`T_STRWAIT', 2)
+define(`T_STRINIT', 3)
+define(`T_AUTO', 4)
+define(`T_CHAN', 5)
+define(`T_LOAD', 6)
+define(`T_SAVE', 7)
+define(`T_LCHAN', 8)
+define(`T_LCTXH', 9)
+
+define(`trace_set', `
+ mov $r8 0x83c
+ shl b32 $r8 6
+ clear b32 $r9
+ bset $r9 $1
+ iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
+')
+
+define(`trace_clr', `
+ mov $r8 0x85c
+ shl b32 $r8 6
+ clear b32 $r9
+ bset $r9 $1
+ iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
+')
+
+// queue_put - add request to queue
+//
+// In : $r13 queue pointer
+// $r14 command
+// $r15 data
+//
+queue_put:
+ // make sure we have space..
+ ld b32 $r8 D[$r13 + 0x0] // GET
+ ld b32 $r9 D[$r13 + 0x4] // PUT
+ xor $r8 8
+ cmpu b32 $r8 $r9
+ bra ne queue_put_next
+ mov $r15 E_CMD_OVERFLOW
+ call error
+ ret
+
+ // store cmd/data on queue
+ queue_put_next:
+ and $r8 $r9 7
+ shl b32 $r8 3
+ add b32 $r8 $r13
+ add b32 $r8 8
+ st b32 D[$r8 + 0x0] $r14
+ st b32 D[$r8 + 0x4] $r15
+
+ // update PUT
+ add b32 $r9 1
+ and $r9 0xf
+ st b32 D[$r13 + 0x4] $r9
+ ret
+
+// queue_get - fetch request from queue
+//
+// In : $r13 queue pointer
+//
+// Out: $p1 clear on success (data available)
+// $r14 command
+// $r15 data
+//
+queue_get:
+ bset $flags $p1
+ ld b32 $r8 D[$r13 + 0x0] // GET
+ ld b32 $r9 D[$r13 + 0x4] // PUT
+ cmpu b32 $r8 $r9
+ bra e queue_get_done
+ // fetch first cmd/data pair
+ and $r9 $r8 7
+ shl b32 $r9 3
+ add b32 $r9 $r13
+ add b32 $r9 8
+ ld b32 $r14 D[$r9 + 0x0]
+ ld b32 $r15 D[$r9 + 0x4]
+
+ // update GET
+ add b32 $r8 1
+ and $r8 0xf
+ st b32 D[$r13 + 0x0] $r8
+ bclr $flags $p1
+queue_get_done:
+ ret
+
+// nv_rd32 - read 32-bit value from nv register
+//
+// In : $r14 register
+// Out: $r15 value
+//
+nv_rd32:
+ mov $r11 0x728
+ shl b32 $r11 6
+ mov b32 $r12 $r14
+ bset $r12 31 // MMIO_CTRL_PENDING
+ iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
+ nv_rd32_wait:
+ iord $r12 I[$r11 + 0x000]
+ xbit $r12 $r12 31
+ bra ne nv_rd32_wait
+ mov $r10 6 // DONE_MMIO_RD
+ call wait_doneo
+ iord $r15 I[$r11 + 0x100] // MMIO_RDVAL
+ ret
+
+// nv_wr32 - write 32-bit value to nv register
+//
+// In : $r14 register
+// $r15 value
+//
+nv_wr32:
+ mov $r11 0x728
+ shl b32 $r11 6
+ iowr I[$r11 + 0x200] $r15 // MMIO_WRVAL
+ mov b32 $r12 $r14
+ bset $r12 31 // MMIO_CTRL_PENDING
+ bset $r12 30 // MMIO_CTRL_WRITE
+ iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
+ nv_wr32_wait:
+ iord $r12 I[$r11 + 0x000]
+ xbit $r12 $r12 31
+ bra ne nv_wr32_wait
+ ret
+
+// (re)set watchdog timer
+//
+// In : $r15 timeout
+//
+watchdog_reset:
+ mov $r8 0x430
+ shl b32 $r8 6
+ bset $r15 31
+ iowr I[$r8 + 0x000] $r15
+ ret
+
+// clear watchdog timer
+watchdog_clear:
+ mov $r8 0x430
+ shl b32 $r8 6
+ iowr I[$r8 + 0x000] $r0
+ ret
+
+// wait_done{z,o} - wait on FUC_DONE bit to become clear/set
+//
+// In : $r10 bit to wait on
+//
+define(`wait_done', `
+$1:
+ trace_set(T_WAIT);
+ mov $r8 0x818
+ shl b32 $r8 6
+ iowr I[$r8 + 0x000] $r10 // CC_SCRATCH[6] = wait bit
+ wait_done_$1:
+ mov $r8 0x400
+ shl b32 $r8 6
+ iord $r8 I[$r8 + 0x000] // DONE
+ xbit $r8 $r8 $r10
+ bra $2 wait_done_$1
+ trace_clr(T_WAIT)
+ ret
+')
+wait_done(wait_donez, ne)
+wait_done(wait_doneo, e)
+
+// mmctx_size - determine size of a mmio list transfer
+//
+// In : $r14 mmio list head
+// $r15 mmio list tail
+// Out: $r15 transfer size (in bytes)
+//
+mmctx_size:
+ clear b32 $r9
+ nv_mmctx_size_loop:
+ ld b32 $r8 D[$r14]
+ shr b32 $r8 26
+ add b32 $r8 1
+ shl b32 $r8 2
+ add b32 $r9 $r8
+ add b32 $r14 4
+ cmpu b32 $r14 $r15
+ bra ne nv_mmctx_size_loop
+ mov b32 $r15 $r9
+ ret
+
+// mmctx_xfer - execute a list of mmio transfers
+//
+// In : $r10 flags
+// bit 0: direction (0 = save, 1 = load)
+// bit 1: set if first transfer
+// bit 2: set if last transfer
+// $r11 base
+// $r12 mmio list head
+// $r13 mmio list tail
+// $r14 multi_stride
+// $r15 multi_mask
+//
+mmctx_xfer:
+ trace_set(T_MMCTX)
+ mov $r8 0x710
+ shl b32 $r8 6
+ clear b32 $r9
+ or $r11 $r11
+ bra e mmctx_base_disabled
+ iowr I[$r8 + 0x000] $r11 // MMCTX_BASE
+ bset $r9 0 // BASE_EN
+ mmctx_base_disabled:
+ or $r14 $r14
+ bra e mmctx_multi_disabled
+ iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE
+ iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK
+ bset $r9 1 // MULTI_EN
+ mmctx_multi_disabled:
+ add b32 $r8 0x100
+
+ xbit $r11 $r10 0
+ shl b32 $r11 16 // DIR
+ bset $r11 12 // QLIMIT = 0x10
+ xbit $r14 $r10 1
+ shl b32 $r14 17
+ or $r11 $r14 // START_TRIGGER
+ iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
+
+ // loop over the mmio list, and send requests to the hw
+ mmctx_exec_loop:
+ // wait for space in mmctx queue
+ mmctx_wait_free:
+ iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
+ and $r14 0x1f
+ bra e mmctx_wait_free
+
+ // queue up an entry
+ ld b32 $r14 D[$r12]
+ or $r14 $r9
+ iowr I[$r8 + 0x300] $r14
+ add b32 $r12 4
+ cmpu b32 $r12 $r13
+ bra ne mmctx_exec_loop
+
+ xbit $r11 $r10 2
+ bra ne mmctx_stop
+ // wait for queue to empty
+ mmctx_fini_wait:
+ iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
+ and $r11 0x1f
+ cmpu b32 $r11 0x10
+ bra ne mmctx_fini_wait
+ mov $r10 2 // DONE_MMCTX
+ call wait_donez
+ bra mmctx_done
+ mmctx_stop:
+ xbit $r11 $r10 0
+ shl b32 $r11 16 // DIR
+ bset $r11 12 // QLIMIT = 0x10
+ bset $r11 18 // STOP_TRIGGER
+ iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
+ mmctx_stop_wait:
+ // wait for STOP_TRIGGER to clear
+ iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
+ xbit $r11 $r11 18
+ bra ne mmctx_stop_wait
+ mmctx_done:
+ trace_clr(T_MMCTX)
+ ret
+
+// Wait for DONE_STRAND
+//
+strand_wait:
+ push $r10
+ mov $r10 2
+ call wait_donez
+ pop $r10
+ ret
+
+// unknown - call before issuing strand commands
+//
+strand_pre:
+ mov $r8 0x4afc
+ sethi $r8 0x20000
+ mov $r9 0xc
+ iowr I[$r8] $r9
+ call strand_wait
+ ret
+
+// unknown - call after issuing strand commands
+//
+strand_post:
+ mov $r8 0x4afc
+ sethi $r8 0x20000
+ mov $r9 0xd
+ iowr I[$r8] $r9
+ call strand_wait
+ ret
+
+// Selects strand set?!
+//
+// In: $r14 id
+//
+strand_set:
+ mov $r10 0x4ffc
+ sethi $r10 0x20000
+ sub b32 $r11 $r10 0x500
+ mov $r12 0xf
+ iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf
+ mov $r12 0xb
+ iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb
+ call strand_wait
+ iowr I[$r10 + 0x000] $r14 // 0x93c = <id>
+ mov $r12 0xa
+ iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa
+ call strand_wait
+ ret
+
+// Initialise strand context data
+//
+// In : $r15 context base
+// Out: $r15 context size (in bytes)
+//
+// Strandset(?) 3 hardcoded currently
+//
+strand_ctx_init:
+ trace_set(T_STRINIT)
+ call strand_pre
+ mov $r14 3
+ call strand_set
+ mov $r10 0x46fc
+ sethi $r10 0x20000
+ add b32 $r11 $r10 0x400
+ iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0
+ mov $r12 1
+ iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE
+ call strand_wait
+ sub b32 $r12 $r0 1
+ iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff
+ mov $r12 2
+ iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT
+ call strand_wait
+ call strand_post
+
+ // read the size of each strand, poke the context offset of
+ // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
+ // about it later then.
+ mov $r8 0x880
+ shl b32 $r8 6
+ iord $r9 I[$r8 + 0x000] // STRANDS
+ add b32 $r8 0x2200
+ shr b32 $r14 $r15 8
+ ctx_init_strand_loop:
+ iowr I[$r8 + 0x000] $r14 // STRAND_SAVE_SWBASE
+ iowr I[$r8 + 0x100] $r14 // STRAND_LOAD_SWBASE
+ iord $r10 I[$r8 + 0x200] // STRAND_SIZE
+ shr b32 $r10 6
+ add b32 $r10 1
+ add b32 $r14 $r10
+ add b32 $r8 4
+ sub b32 $r9 1
+ bra ne ctx_init_strand_loop
+
+ shl b32 $r14 8
+ sub b32 $r15 $r14 $r15
+ trace_clr(T_STRINIT)
+ ret
+')
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
index f5d184e0..55689e9 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.h
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
@@ -57,8 +57,7 @@ struct nvc0_graph_priv {
struct nouveau_gpuobj *unk4188b4;
struct nouveau_gpuobj *unk4188b8;
- u8 magic_not_rop_nr;
- u32 magicgpc918;
+ u8 magic_not_rop_nr;
};
struct nvc0_graph_chan {
@@ -72,4 +71,25 @@ struct nvc0_graph_chan {
int nvc0_grctx_generate(struct nouveau_channel *);
+/* nvc0_graph.c uses this also to determine supported chipsets */
+static inline u32
+nvc0_graph_class(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (dev_priv->chipset) {
+ case 0xc0:
+ case 0xc3:
+ case 0xc4:
+ case 0xce: /* guess, mmio trace shows only 0x9097 state */
+ return 0x9097;
+ case 0xc1:
+ return 0x9197;
+ case 0xc8:
+ return 0x9297;
+ default:
+ return 0;
+ }
+}
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index 6df0661..31018ea 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -45,6 +45,9 @@ nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
static void
nvc0_grctx_generate_9097(struct drm_device *dev)
{
+ u32 fermi = nvc0_graph_class(dev);
+ u32 mthd;
+
nv_mthd(dev, 0x9097, 0x0800, 0x00000000);
nv_mthd(dev, 0x9097, 0x0840, 0x00000000);
nv_mthd(dev, 0x9097, 0x0880, 0x00000000);
@@ -824,134 +827,10 @@ nvc0_grctx_generate_9097(struct drm_device *dev)
nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001);
nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001);
nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001);
- nv_mthd(dev, 0x9097, 0x3400, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3404, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3408, 0x00000000);
- nv_mthd(dev, 0x9097, 0x340c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3410, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3414, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3418, 0x00000000);
- nv_mthd(dev, 0x9097, 0x341c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3420, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3424, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3428, 0x00000000);
- nv_mthd(dev, 0x9097, 0x342c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3430, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3434, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3438, 0x00000000);
- nv_mthd(dev, 0x9097, 0x343c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3440, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3444, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3448, 0x00000000);
- nv_mthd(dev, 0x9097, 0x344c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3450, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3454, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3458, 0x00000000);
- nv_mthd(dev, 0x9097, 0x345c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3460, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3464, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3468, 0x00000000);
- nv_mthd(dev, 0x9097, 0x346c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3470, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3474, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3478, 0x00000000);
- nv_mthd(dev, 0x9097, 0x347c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3480, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3484, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3488, 0x00000000);
- nv_mthd(dev, 0x9097, 0x348c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3490, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3494, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3498, 0x00000000);
- nv_mthd(dev, 0x9097, 0x349c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34a0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34a4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34a8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34ac, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34b0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34b4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34b8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34bc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34c0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34c4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34c8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34cc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34d0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34d4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34d8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34dc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34e0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34e4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34e8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34ec, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34f0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34f4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34f8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34fc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3500, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3504, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3508, 0x00000000);
- nv_mthd(dev, 0x9097, 0x350c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3510, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3514, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3518, 0x00000000);
- nv_mthd(dev, 0x9097, 0x351c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3520, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3524, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3528, 0x00000000);
- nv_mthd(dev, 0x9097, 0x352c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3530, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3534, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3538, 0x00000000);
- nv_mthd(dev, 0x9097, 0x353c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3540, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3544, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3548, 0x00000000);
- nv_mthd(dev, 0x9097, 0x354c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3550, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3554, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3558, 0x00000000);
- nv_mthd(dev, 0x9097, 0x355c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3560, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3564, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3568, 0x00000000);
- nv_mthd(dev, 0x9097, 0x356c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3570, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3574, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3578, 0x00000000);
- nv_mthd(dev, 0x9097, 0x357c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3580, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3584, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3588, 0x00000000);
- nv_mthd(dev, 0x9097, 0x358c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3590, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3594, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3598, 0x00000000);
- nv_mthd(dev, 0x9097, 0x359c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35a0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35a4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35a8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35ac, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35b0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35b4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35b8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35bc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35c0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35c4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35c8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35cc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35d0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35d4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35d8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35dc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35e0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35e4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35e8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35ec, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35f0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35f4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35f8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35fc, 0x00000000);
+ if (fermi == 0x9097) {
+ for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
+ nv_mthd(dev, 0x9097, mthd, 0x00000000);
+ }
nv_mthd(dev, 0x9097, 0x030c, 0x00000001);
nv_mthd(dev, 0x9097, 0x1944, 0x00000000);
nv_mthd(dev, 0x9097, 0x1514, 0x00000000);
@@ -1321,6 +1200,37 @@ nvc0_grctx_generate_9097(struct drm_device *dev)
}
static void
+nvc0_grctx_generate_9197(struct drm_device *dev)
+{
+ u32 fermi = nvc0_graph_class(dev);
+ u32 mthd;
+
+ if (fermi == 0x9197) {
+ for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
+ nv_mthd(dev, 0x9197, mthd, 0x00000000);
+ }
+ nv_mthd(dev, 0x9197, 0x02e4, 0x0000b001);
+}
+
+static void
+nvc0_grctx_generate_9297(struct drm_device *dev)
+{
+ u32 fermi = nvc0_graph_class(dev);
+ u32 mthd;
+
+ if (fermi == 0x9297) {
+ for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
+ nv_mthd(dev, 0x9297, mthd, 0x00000000);
+ }
+ nv_mthd(dev, 0x9297, 0x036c, 0x00000000);
+ nv_mthd(dev, 0x9297, 0x0370, 0x00000000);
+ nv_mthd(dev, 0x9297, 0x07a4, 0x00000000);
+ nv_mthd(dev, 0x9297, 0x07a8, 0x00000000);
+ nv_mthd(dev, 0x9297, 0x0374, 0x00000000);
+ nv_mthd(dev, 0x9297, 0x0378, 0x00000020);
+}
+
+static void
nvc0_grctx_generate_902d(struct drm_device *dev)
{
nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
@@ -1559,8 +1469,15 @@ nvc0_grctx_generate_unk47xx(struct drm_device *dev)
static void
nvc0_grctx_generate_shaders(struct drm_device *dev)
{
- nv_wr32(dev, 0x405800, 0x078000bf);
- nv_wr32(dev, 0x405830, 0x02180000);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset != 0xc1) {
+ nv_wr32(dev, 0x405800, 0x078000bf);
+ nv_wr32(dev, 0x405830, 0x02180000);
+ } else {
+ nv_wr32(dev, 0x405800, 0x0f8000bf);
+ nv_wr32(dev, 0x405830, 0x02180218);
+ }
nv_wr32(dev, 0x405834, 0x00000000);
nv_wr32(dev, 0x405838, 0x00000000);
nv_wr32(dev, 0x405854, 0x00000000);
@@ -1586,10 +1503,16 @@ nvc0_grctx_generate_unk60xx(struct drm_device *dev)
static void
nvc0_grctx_generate_unk64xx(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
nv_wr32(dev, 0x4064a8, 0x00000000);
nv_wr32(dev, 0x4064ac, 0x00003fff);
nv_wr32(dev, 0x4064b4, 0x00000000);
nv_wr32(dev, 0x4064b8, 0x00000000);
+ if (dev_priv->chipset == 0xc1) {
+ nv_wr32(dev, 0x4064c0, 0x80140078);
+ nv_wr32(dev, 0x4064c4, 0x0086ffff);
+ }
}
static void
@@ -1622,21 +1545,14 @@ static void
nvc0_grctx_generate_rop(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chipset = dev_priv->chipset;
/* ROPC_BROADCAST */
nv_wr32(dev, 0x408800, 0x02802a3c);
nv_wr32(dev, 0x408804, 0x00000040);
- nv_wr32(dev, 0x408808, 0x0003e00d);
- switch (dev_priv->chipset) {
- case 0xc0:
- nv_wr32(dev, 0x408900, 0x0080b801);
- break;
- case 0xc3:
- case 0xc4:
- nv_wr32(dev, 0x408900, 0x3080b801);
- break;
- }
- nv_wr32(dev, 0x408904, 0x02000001);
+ nv_wr32(dev, 0x408808, chipset != 0xc1 ? 0x0003e00d : 0x1003e005);
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ nv_wr32(dev, 0x408904, chipset != 0xc1 ? 0x02000001 : 0x62000001);
nv_wr32(dev, 0x408908, 0x00c80929);
nv_wr32(dev, 0x40890c, 0x00000000);
nv_wr32(dev, 0x408980, 0x0000011d);
@@ -1645,6 +1561,8 @@ nvc0_grctx_generate_rop(struct drm_device *dev)
static void
nvc0_grctx_generate_gpc(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chipset = dev_priv->chipset;
int i;
/* GPC_BROADCAST */
@@ -1676,7 +1594,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x41880c, 0x00000000);
nv_wr32(dev, 0x418810, 0x00000000);
nv_wr32(dev, 0x418828, 0x00008442);
- nv_wr32(dev, 0x418830, 0x00000001);
+ nv_wr32(dev, 0x418830, chipset != 0xc1 ? 0x00000001 : 0x10000001);
nv_wr32(dev, 0x4188d8, 0x00000008);
nv_wr32(dev, 0x4188e0, 0x01000000);
nv_wr32(dev, 0x4188e8, 0x00000000);
@@ -1684,7 +1602,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x4188f0, 0x00000000);
nv_wr32(dev, 0x4188f4, 0x00000000);
nv_wr32(dev, 0x4188f8, 0x00000000);
- nv_wr32(dev, 0x4188fc, 0x00100000);
+ nv_wr32(dev, 0x4188fc, chipset != 0xc1 ? 0x00100000 : 0x00100018);
nv_wr32(dev, 0x41891c, 0x00ff00ff);
nv_wr32(dev, 0x418924, 0x00000000);
nv_wr32(dev, 0x418928, 0x00ffff00);
@@ -1715,6 +1633,8 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x418c24, 0x00000000);
nv_wr32(dev, 0x418c28, 0x00000000);
nv_wr32(dev, 0x418c2c, 0x00000000);
+ if (chipset == 0xc1)
+ nv_wr32(dev, 0x418c6c, 0x00000001);
nv_wr32(dev, 0x418c80, 0x20200004);
nv_wr32(dev, 0x418c8c, 0x00000001);
nv_wr32(dev, 0x419000, 0x00000780);
@@ -1727,10 +1647,13 @@ static void
nvc0_grctx_generate_tp(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chipset = dev_priv->chipset;
/* GPC_BROADCAST.TP_BROADCAST */
+ nv_wr32(dev, 0x419818, 0x00000000);
+ nv_wr32(dev, 0x41983c, 0x00038bc7);
nv_wr32(dev, 0x419848, 0x00000000);
- nv_wr32(dev, 0x419864, 0x0000012a);
+ nv_wr32(dev, 0x419864, chipset != 0xc1 ? 0x0000012a : 0x00000129);
nv_wr32(dev, 0x419888, 0x00000000);
nv_wr32(dev, 0x419a00, 0x000001f0);
nv_wr32(dev, 0x419a04, 0x00000001);
@@ -1740,8 +1663,8 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419a14, 0x00000200);
nv_wr32(dev, 0x419a1c, 0x00000000);
nv_wr32(dev, 0x419a20, 0x00000800);
- if (dev_priv->chipset != 0xc0)
- nv_wr32(dev, 0x00419ac4, 0x0007f440); /* 0xc3 */
+ if (chipset != 0xc0 && chipset != 0xc8)
+ nv_wr32(dev, 0x00419ac4, 0x0007f440);
nv_wr32(dev, 0x419b00, 0x0a418820);
nv_wr32(dev, 0x419b04, 0x062080e6);
nv_wr32(dev, 0x419b08, 0x020398a4);
@@ -1749,17 +1672,19 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419b10, 0x0a418820);
nv_wr32(dev, 0x419b14, 0x000000e6);
nv_wr32(dev, 0x419bd0, 0x00900103);
- nv_wr32(dev, 0x419be0, 0x00000001);
+ nv_wr32(dev, 0x419be0, chipset != 0xc1 ? 0x00000001 : 0x00400001);
nv_wr32(dev, 0x419be4, 0x00000000);
nv_wr32(dev, 0x419c00, 0x00000002);
nv_wr32(dev, 0x419c04, 0x00000006);
nv_wr32(dev, 0x419c08, 0x00000002);
nv_wr32(dev, 0x419c20, 0x00000000);
- nv_wr32(dev, 0x419cbc, 0x28137606);
+ nv_wr32(dev, 0x419cb0, 0x00060048); //XXX: 0xce 0x00020048
nv_wr32(dev, 0x419ce8, 0x00000000);
nv_wr32(dev, 0x419cf4, 0x00000183);
- nv_wr32(dev, 0x419d20, 0x02180000);
+ nv_wr32(dev, 0x419d20, chipset != 0xc1 ? 0x02180000 : 0x12180000);
nv_wr32(dev, 0x419d24, 0x00001fff);
+ if (chipset == 0xc1)
+ nv_wr32(dev, 0x419d44, 0x02180218);
nv_wr32(dev, 0x419e04, 0x00000000);
nv_wr32(dev, 0x419e08, 0x00000000);
nv_wr32(dev, 0x419e0c, 0x00000000);
@@ -1785,11 +1710,11 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419e8c, 0x00000000);
nv_wr32(dev, 0x419e90, 0x00000000);
nv_wr32(dev, 0x419e98, 0x00000000);
- if (dev_priv->chipset != 0xc0)
+ if (chipset != 0xc0 && chipset != 0xc8)
nv_wr32(dev, 0x419ee0, 0x00011110);
nv_wr32(dev, 0x419f50, 0x00000000);
nv_wr32(dev, 0x419f54, 0x00000000);
- if (dev_priv->chipset != 0xc0)
+ if (chipset != 0xc0 && chipset != 0xc8)
nv_wr32(dev, 0x419f58, 0x00000000);
}
@@ -1801,6 +1726,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
struct drm_device *dev = chan->dev;
int i, gpc, tp, id;
+ u32 fermi = nvc0_graph_class(dev);
u32 r000260, tmp;
r000260 = nv_rd32(dev, 0x000260);
@@ -1857,10 +1783,11 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_wr32(dev, 0x40587c, 0x00000000);
if (1) {
- const u8 chipset_tp_max[] = { 16, 0, 0, 4, 8 };
+ const u8 chipset_tp_max[] = { 16, 4, 0, 4, 8, 0, 0, 0,
+ 16, 0, 0, 0, 0, 0, 8, 0 };
u8 max = chipset_tp_max[dev_priv->chipset & 0x0f];
u8 tpnr[GPC_MAX];
- u8 data[32];
+ u8 data[TP_MAX];
memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
memset(data, 0x1f, sizeof(data));
@@ -2633,6 +2560,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_icmd(dev, 0x0000053f, 0xffff0000);
nv_icmd(dev, 0x00000585, 0x0000003f);
nv_icmd(dev, 0x00000576, 0x00000003);
+ if (dev_priv->chipset == 0xc1)
+ nv_icmd(dev, 0x0000057b, 0x00000059);
nv_icmd(dev, 0x00000586, 0x00000040);
nv_icmd(dev, 0x00000582, 0x00000080);
nv_icmd(dev, 0x00000583, 0x00000080);
@@ -2865,6 +2794,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_wr32(dev, 0x404154, 0x00000400);
nvc0_grctx_generate_9097(dev);
+ if (fermi >= 0x9197)
+ nvc0_grctx_generate_9197(dev);
+ if (fermi >= 0x9297)
+ nvc0_grctx_generate_9297(dev);
nvc0_grctx_generate_902d(dev);
nvc0_grctx_generate_9039(dev);
nvc0_grctx_generate_90c0(dev);
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
new file mode 100644
index 0000000..0ec2add
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
@@ -0,0 +1,474 @@
+/* fuc microcode for nvc0 PGRAPH/GPC
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* To build:
+ * m4 nvc0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grgpc.fuc.h
+ */
+
+/* TODO
+ * - bracket certain functions with scratch writes, useful for debugging
+ * - watchdog timer around ctx operations
+ */
+
+.section nvc0_grgpc_data
+include(`nvc0_graph.fuc')
+gpc_id: .b32 0
+gpc_mmio_list_head: .b32 0
+gpc_mmio_list_tail: .b32 0
+
+tpc_count: .b32 0
+tpc_mask: .b32 0
+tpc_mmio_list_head: .b32 0
+tpc_mmio_list_tail: .b32 0
+
+cmd_queue: queue_init
+
+// chipset descriptions
+chipsets:
+.b8 0xc0 0 0 0
+.b16 nvc0_gpc_mmio_head
+.b16 nvc0_gpc_mmio_tail
+.b16 nvc0_tpc_mmio_head
+.b16 nvc0_tpc_mmio_tail
+.b8 0xc1 0 0 0
+.b16 nvc0_gpc_mmio_head
+.b16 nvc1_gpc_mmio_tail
+.b16 nvc0_tpc_mmio_head
+.b16 nvc1_tpc_mmio_tail
+.b8 0xc3 0 0 0
+.b16 nvc0_gpc_mmio_head
+.b16 nvc0_gpc_mmio_tail
+.b16 nvc0_tpc_mmio_head
+.b16 nvc3_tpc_mmio_tail
+.b8 0xc4 0 0 0
+.b16 nvc0_gpc_mmio_head
+.b16 nvc0_gpc_mmio_tail
+.b16 nvc0_tpc_mmio_head
+.b16 nvc3_tpc_mmio_tail
+.b8 0xc8 0 0 0
+.b16 nvc0_gpc_mmio_head
+.b16 nvc0_gpc_mmio_tail
+.b16 nvc0_tpc_mmio_head
+.b16 nvc0_tpc_mmio_tail
+.b8 0xce 0 0 0
+.b16 nvc0_gpc_mmio_head
+.b16 nvc0_gpc_mmio_tail
+.b16 nvc0_tpc_mmio_head
+.b16 nvc3_tpc_mmio_tail
+.b8 0 0 0 0
+
+// GPC mmio lists
+nvc0_gpc_mmio_head:
+mmctx_data(0x000380, 1)
+mmctx_data(0x000400, 6)
+mmctx_data(0x000450, 9)
+mmctx_data(0x000600, 1)
+mmctx_data(0x000684, 1)
+mmctx_data(0x000700, 5)
+mmctx_data(0x000800, 1)
+mmctx_data(0x000808, 3)
+mmctx_data(0x000828, 1)
+mmctx_data(0x000830, 1)
+mmctx_data(0x0008d8, 1)
+mmctx_data(0x0008e0, 1)
+mmctx_data(0x0008e8, 6)
+mmctx_data(0x00091c, 1)
+mmctx_data(0x000924, 3)
+mmctx_data(0x000b00, 1)
+mmctx_data(0x000b08, 6)
+mmctx_data(0x000bb8, 1)
+mmctx_data(0x000c08, 1)
+mmctx_data(0x000c10, 8)
+mmctx_data(0x000c80, 1)
+mmctx_data(0x000c8c, 1)
+mmctx_data(0x001000, 3)
+mmctx_data(0x001014, 1)
+nvc0_gpc_mmio_tail:
+mmctx_data(0x000c6c, 1);
+nvc1_gpc_mmio_tail:
+
+// TPC mmio lists
+nvc0_tpc_mmio_head:
+mmctx_data(0x000018, 1)
+mmctx_data(0x00003c, 1)
+mmctx_data(0x000048, 1)
+mmctx_data(0x000064, 1)
+mmctx_data(0x000088, 1)
+mmctx_data(0x000200, 6)
+mmctx_data(0x00021c, 2)
+mmctx_data(0x000300, 6)
+mmctx_data(0x0003d0, 1)
+mmctx_data(0x0003e0, 2)
+mmctx_data(0x000400, 3)
+mmctx_data(0x000420, 1)
+mmctx_data(0x0004b0, 1)
+mmctx_data(0x0004e8, 1)
+mmctx_data(0x0004f4, 1)
+mmctx_data(0x000520, 2)
+mmctx_data(0x000604, 4)
+mmctx_data(0x000644, 20)
+mmctx_data(0x000698, 1)
+mmctx_data(0x000750, 2)
+nvc0_tpc_mmio_tail:
+mmctx_data(0x000758, 1)
+mmctx_data(0x0002c4, 1)
+mmctx_data(0x0004bc, 1)
+mmctx_data(0x0006e0, 1)
+nvc3_tpc_mmio_tail:
+mmctx_data(0x000544, 1)
+nvc1_tpc_mmio_tail:
+
+
+.section nvc0_grgpc_code
+bra init
+define(`include_code')
+include(`nvc0_graph.fuc')
+
+// reports an exception to the host
+//
+// In: $r15 error code (see nvc0_graph.fuc)
+//
+error:
+ push $r14
+ mov $r14 -0x67ec // 0x9814
+ sethi $r14 0x400000
+ call nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code
+ add b32 $r14 0x41c
+ mov $r15 1
+ call nv_wr32 // HUB_CTXCTL_INTR_UP_SET
+ pop $r14
+ ret
+
+// GPC fuc initialisation, executed by triggering ucode start, will
+// fall through to main loop after completion.
+//
+// Input:
+// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
+// CC_SCRATCH[1]: context base
+//
+// Output:
+// CC_SCRATCH[0]:
+// 31:31: set to signal completion
+// CC_SCRATCH[1]:
+// 31:0: GPC context size
+//
+init:
+ clear b32 $r0
+ mov $sp $r0
+
+ // enable fifo access
+ mov $r1 0x1200
+ mov $r2 2
+ iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
+
+ // setup i0 handler, and route all interrupts to it
+ mov $r1 ih
+ mov $iv0 $r1
+ mov $r1 0x400
+ iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
+
+ // enable fifo interrupt
+ mov $r2 4
+ iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
+
+ // enable interrupts
+ bset $flags ie0
+
+ // figure out which GPC we are, and how many TPCs we have
+ mov $r1 0x608
+ shl b32 $r1 6
+ iord $r2 I[$r1 + 0x000] // UNITS
+ mov $r3 1
+ and $r2 0x1f
+ shl b32 $r3 $r2
+ sub b32 $r3 1
+ st b32 D[$r0 + tpc_count] $r2
+ st b32 D[$r0 + tpc_mask] $r3
+ add b32 $r1 0x400
+ iord $r2 I[$r1 + 0x000] // MYINDEX
+ st b32 D[$r0 + gpc_id] $r2
+
+ // find context data for this chipset
+ mov $r2 0x800
+ shl b32 $r2 6
+ iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
+ mov $r1 chipsets - 12
+ init_find_chipset:
+ add b32 $r1 12
+ ld b32 $r3 D[$r1 + 0x00]
+ cmpu b32 $r3 $r2
+ bra e init_context
+ cmpu b32 $r3 0
+ bra ne init_find_chipset
+ // unknown chipset
+ ret
+
+ // initialise context base, and size tracking
+ init_context:
+ mov $r2 0x800
+ shl b32 $r2 6
+ iord $r2 I[$r2 + 0x100] // CC_SCRATCH[1], initial base
+ clear b32 $r3 // track GPC context size here
+
+ // set mmctx base addresses now so we don't have to do it later,
+ // they don't currently ever change
+ mov $r4 0x700
+ shl b32 $r4 6
+ shr b32 $r5 $r2 8
+ iowr I[$r4 + 0x000] $r5 // MMCTX_SAVE_SWBASE
+ iowr I[$r4 + 0x100] $r5 // MMCTX_LOAD_SWBASE
+
+ // calculate GPC mmio context size, store the chipset-specific
+ // mmio list pointers somewhere we can get at them later without
+ // re-parsing the chipset list
+ clear b32 $r14
+ clear b32 $r15
+ ld b16 $r14 D[$r1 + 4]
+ ld b16 $r15 D[$r1 + 6]
+ st b16 D[$r0 + gpc_mmio_list_head] $r14
+ st b16 D[$r0 + gpc_mmio_list_tail] $r15
+ call mmctx_size
+ add b32 $r2 $r15
+ add b32 $r3 $r15
+
+ // calculate per-TPC mmio context size, store the list pointers
+ ld b16 $r14 D[$r1 + 8]
+ ld b16 $r15 D[$r1 + 10]
+ st b16 D[$r0 + tpc_mmio_list_head] $r14
+ st b16 D[$r0 + tpc_mmio_list_tail] $r15
+ call mmctx_size
+ ld b32 $r14 D[$r0 + tpc_count]
+ mulu $r14 $r15
+ add b32 $r2 $r14
+ add b32 $r3 $r14
+
+ // round up base/size to 256 byte boundary (for strand SWBASE)
+ add b32 $r4 0x1300
+ shr b32 $r3 2
+ iowr I[$r4 + 0x000] $r3 // MMCTX_LOAD_COUNT, wtf for?!?
+ shr b32 $r2 8
+ shr b32 $r3 6
+ add b32 $r2 1
+ add b32 $r3 1
+ shl b32 $r2 8
+ shl b32 $r3 8
+
+ // calculate size of strand context data
+ mov b32 $r15 $r2
+ call strand_ctx_init
+ add b32 $r3 $r15
+
+ // save context size, and tell HUB we're done
+ mov $r1 0x800
+ shl b32 $r1 6
+ iowr I[$r1 + 0x100] $r3 // CC_SCRATCH[1] = context size
+ add b32 $r1 0x800
+ clear b32 $r2
+ bset $r2 31
+ iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
+
+// Main program loop, very simple, sleeps until woken up by the interrupt
+// handler, pulls a command from the queue and executes its handler
+//
+main:
+ bset $flags $p0
+ sleep $p0
+ mov $r13 cmd_queue
+ call queue_get
+ bra $p1 main
+
+ // 0x0000-0x0003 are all context transfers
+ cmpu b32 $r14 0x04
+ bra nc main_not_ctx_xfer
+ // fetch $flags and mask off $p1/$p2
+ mov $r1 $flags
+ mov $r2 0x0006
+ not b32 $r2
+ and $r1 $r2
+ // set $p1/$p2 according to transfer type
+ shl b32 $r14 1
+ or $r1 $r14
+ mov $flags $r1
+ // transfer context data
+ call ctx_xfer
+ bra main
+
+ main_not_ctx_xfer:
+ shl b32 $r15 $r14 16
+ or $r15 E_BAD_COMMAND
+ call error
+ bra main
+
+// interrupt handler
+ih:
+ push $r8
+ mov $r8 $flags
+ push $r8
+ push $r9
+ push $r10
+ push $r11
+ push $r13
+ push $r14
+ push $r15
+
+ // incoming fifo command?
+ iord $r10 I[$r0 + 0x200] // INTR
+ and $r11 $r10 0x00000004
+ bra e ih_no_fifo
+ // queue incoming fifo command for later processing
+ mov $r11 0x1900
+ mov $r13 cmd_queue
+ iord $r14 I[$r11 + 0x100] // FIFO_CMD
+ iord $r15 I[$r11 + 0x000] // FIFO_DATA
+ call queue_put
+ add b32 $r11 0x400
+ mov $r14 1
+ iowr I[$r11 + 0x000] $r14 // FIFO_ACK
+
+ // ack, and wake up main()
+ ih_no_fifo:
+ iowr I[$r0 + 0x100] $r10 // INTR_ACK
+
+ pop $r15
+ pop $r14
+ pop $r13
+ pop $r11
+ pop $r10
+ pop $r9
+ pop $r8
+ mov $flags $r8
+ pop $r8
+ bclr $flags $p0
+ iret
+
+// Set this GPC's bit in HUB_BAR, used to signal completion of various
+// activities to the HUB fuc
+//
+hub_barrier_done:
+ mov $r15 1
+ ld b32 $r14 D[$r0 + gpc_id]
+ shl b32 $r15 $r14
+ mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET
+ sethi $r14 0x400000
+ call nv_wr32
+ ret
+
+// Disables various things, waits a bit, and re-enables them..
+//
+// Not sure how exactly this helps, perhaps "ENABLE" is not such a
+// good description for the bits we turn off? Anyways, without this,
+// funny things happen.
+//
+ctx_redswitch:
+ mov $r14 0x614
+ shl b32 $r14 6
+ mov $r15 0x020
+ iowr I[$r14] $r15 // GPC_RED_SWITCH = POWER
+ mov $r15 8
+ ctx_redswitch_delay:
+ sub b32 $r15 1
+ bra ne ctx_redswitch_delay
+ mov $r15 0xa20
+ iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER
+ ret
+
+// Transfer GPC context data between GPU and storage area
+//
+// In: $r15 context base address
+// $p1 clear on save, set on load
+// $p2 set if opposite direction done/will be done, so:
+// on save it means: "a load will follow this save"
+// on load it means: "a save preceeded this load"
+//
+ctx_xfer:
+ // set context base address
+ mov $r1 0xa04
+ shl b32 $r1 6
+ iowr I[$r1 + 0x000] $r15// MEM_BASE
+ bra not $p1 ctx_xfer_not_load
+ call ctx_redswitch
+ ctx_xfer_not_load:
+
+ // strands
+ mov $r1 0x4afc
+ sethi $r1 0x20000
+ mov $r2 0xc
+ iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
+ call strand_wait
+ mov $r2 0x47fc
+ sethi $r2 0x20000
+ iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
+ xbit $r2 $flags $p1
+ add b32 $r2 3
+ iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+
+ // mmio context
+ xbit $r10 $flags $p1 // direction
+ or $r10 2 // first
+ mov $r11 0x0000
+ sethi $r11 0x500000
+ ld b32 $r12 D[$r0 + gpc_id]
+ shl b32 $r12 15
+ add b32 $r11 $r12 // base = NV_PGRAPH_GPCn
+ ld b32 $r12 D[$r0 + gpc_mmio_list_head]
+ ld b32 $r13 D[$r0 + gpc_mmio_list_tail]
+ mov $r14 0 // not multi
+ call mmctx_xfer
+
+ // per-TPC mmio context
+ xbit $r10 $flags $p1 // direction
+ or $r10 4 // last
+ mov $r11 0x4000
+ sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0
+ ld b32 $r12 D[$r0 + gpc_id]
+ shl b32 $r12 15
+ add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0
+ ld b32 $r12 D[$r0 + tpc_mmio_list_head]
+ ld b32 $r13 D[$r0 + tpc_mmio_list_tail]
+ ld b32 $r15 D[$r0 + tpc_mask]
+ mov $r14 0x800 // stride = 0x800
+ call mmctx_xfer
+
+ // wait for strands to finish
+ call strand_wait
+
+ // if load, or a save without a load following, do some
+ // unknown stuff that's done after finishing a block of
+ // strand commands
+ bra $p1 ctx_xfer_post
+ bra not $p2 ctx_xfer_done
+ ctx_xfer_post:
+ mov $r1 0x4afc
+ sethi $r1 0x20000
+ mov $r2 0xd
+ iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d
+ call strand_wait
+
+ // mark completion in HUB's barrier
+ ctx_xfer_done:
+ call hub_barrier_done
+ ret
+
+.align 256
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
new file mode 100644
index 0000000..1896c89
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
@@ -0,0 +1,483 @@
+uint32_t nvc0_grgpc_data[] = {
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x000000c0,
+ 0x011000b0,
+ 0x01640114,
+ 0x000000c1,
+ 0x011400b0,
+ 0x01780114,
+ 0x000000c3,
+ 0x011000b0,
+ 0x01740114,
+ 0x000000c4,
+ 0x011000b0,
+ 0x01740114,
+ 0x000000c8,
+ 0x011000b0,
+ 0x01640114,
+ 0x000000ce,
+ 0x011000b0,
+ 0x01740114,
+ 0x00000000,
+ 0x00000380,
+ 0x14000400,
+ 0x20000450,
+ 0x00000600,
+ 0x00000684,
+ 0x10000700,
+ 0x00000800,
+ 0x08000808,
+ 0x00000828,
+ 0x00000830,
+ 0x000008d8,
+ 0x000008e0,
+ 0x140008e8,
+ 0x0000091c,
+ 0x08000924,
+ 0x00000b00,
+ 0x14000b08,
+ 0x00000bb8,
+ 0x00000c08,
+ 0x1c000c10,
+ 0x00000c80,
+ 0x00000c8c,
+ 0x08001000,
+ 0x00001014,
+ 0x00000c6c,
+ 0x00000018,
+ 0x0000003c,
+ 0x00000048,
+ 0x00000064,
+ 0x00000088,
+ 0x14000200,
+ 0x0400021c,
+ 0x14000300,
+ 0x000003d0,
+ 0x040003e0,
+ 0x08000400,
+ 0x00000420,
+ 0x000004b0,
+ 0x000004e8,
+ 0x000004f4,
+ 0x04000520,
+ 0x0c000604,
+ 0x4c000644,
+ 0x00000698,
+ 0x04000750,
+ 0x00000758,
+ 0x000002c4,
+ 0x000004bc,
+ 0x000006e0,
+ 0x00000544,
+};
+
+uint32_t nvc0_grgpc_code[] = {
+ 0x03060ef5,
+ 0x9800d898,
+ 0x86f001d9,
+ 0x0489b808,
+ 0xf00c1bf4,
+ 0x21f502f7,
+ 0x00f802ec,
+ 0xb60798c4,
+ 0x8dbb0384,
+ 0x0880b600,
+ 0x80008e80,
+ 0x90b6018f,
+ 0x0f94f001,
+ 0xf801d980,
+ 0x0131f400,
+ 0x9800d898,
+ 0x89b801d9,
+ 0x210bf404,
+ 0xb60789c4,
+ 0x9dbb0394,
+ 0x0890b600,
+ 0x98009e98,
+ 0x80b6019f,
+ 0x0f84f001,
+ 0xf400d880,
+ 0x00f80132,
+ 0x0728b7f1,
+ 0xb906b4b6,
+ 0xc9f002ec,
+ 0x00bcd01f,
+ 0xc800bccf,
+ 0x1bf41fcc,
+ 0x06a7f0fa,
+ 0x010321f5,
+ 0xf840bfcf,
+ 0x28b7f100,
+ 0x06b4b607,
+ 0xb980bfd0,
+ 0xc9f002ec,
+ 0x1ec9f01f,
+ 0xcf00bcd0,
+ 0xccc800bc,
+ 0xfa1bf41f,
+ 0x87f100f8,
+ 0x84b60430,
+ 0x1ff9f006,
+ 0xf8008fd0,
+ 0x3087f100,
+ 0x0684b604,
+ 0xf80080d0,
+ 0x3c87f100,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d000,
+ 0x081887f1,
+ 0xd00684b6,
+ 0x87f1008a,
+ 0x84b60400,
+ 0x0088cf06,
+ 0xf4888aff,
+ 0x87f1f31b,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00099,
+ 0xf100f800,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00099f0,
+ 0x87f10089,
+ 0x84b60818,
+ 0x008ad006,
+ 0x040087f1,
+ 0xcf0684b6,
+ 0x8aff0088,
+ 0xf30bf488,
+ 0x085c87f1,
+ 0xbd0684b6,
+ 0x0099f094,
+ 0xf80089d0,
+ 0x9894bd00,
+ 0x85b600e8,
+ 0x0180b61a,
+ 0xbb0284b6,
+ 0xe0b60098,
+ 0x04efb804,
+ 0xb9eb1bf4,
+ 0x00f8029f,
+ 0x083c87f1,
+ 0xbd0684b6,
+ 0x0199f094,
+ 0xf10089d0,
+ 0xb6071087,
+ 0x94bd0684,
+ 0xf405bbfd,
+ 0x8bd0090b,
+ 0x0099f000,
+ 0xf405eefd,
+ 0x8ed00c0b,
+ 0xc08fd080,
+ 0xb70199f0,
+ 0xc8010080,
+ 0xb4b600ab,
+ 0x0cb9f010,
+ 0xb601aec8,
+ 0xbefd11e4,
+ 0x008bd005,
+ 0xf0008ecf,
+ 0x0bf41fe4,
+ 0x00ce98fa,
+ 0xd005e9fd,
+ 0xc0b6c08e,
+ 0x04cdb804,
+ 0xc8e81bf4,
+ 0x1bf402ab,
+ 0x008bcf18,
+ 0xb01fb4f0,
+ 0x1bf410b4,
+ 0x02a7f0f7,
+ 0xf4c921f4,
+ 0xabc81b0e,
+ 0x10b4b600,
+ 0xf00cb9f0,
+ 0x8bd012b9,
+ 0x008bcf00,
+ 0xf412bbc8,
+ 0x87f1fa1b,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00199,
+ 0xf900f800,
+ 0x02a7f0a0,
+ 0xfcc921f4,
+ 0xf100f8a0,
+ 0xf04afc87,
+ 0x97f00283,
+ 0x0089d00c,
+ 0x020721f5,
+ 0x87f100f8,
+ 0x83f04afc,
+ 0x0d97f002,
+ 0xf50089d0,
+ 0xf8020721,
+ 0xfca7f100,
+ 0x02a3f04f,
+ 0x0500aba2,
+ 0xd00fc7f0,
+ 0xc7f000ac,
+ 0x00bcd00b,
+ 0x020721f5,
+ 0xf000aed0,
+ 0xbcd00ac7,
+ 0x0721f500,
+ 0xf100f802,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00399f0,
+ 0x21f50089,
+ 0xe7f00213,
+ 0x3921f503,
+ 0xfca7f102,
+ 0x02a3f046,
+ 0x0400aba0,
+ 0xf040a0d0,
+ 0xbcd001c7,
+ 0x0721f500,
+ 0x010c9202,
+ 0xf000acd0,
+ 0xbcd002c7,
+ 0x0721f500,
+ 0x2621f502,
+ 0x8087f102,
+ 0x0684b608,
+ 0xb70089cf,
+ 0x95220080,
+ 0x8ed008fe,
+ 0x408ed000,
+ 0xb6808acf,
+ 0xa0b606a5,
+ 0x00eabb01,
+ 0xb60480b6,
+ 0x1bf40192,
+ 0x08e4b6e8,
+ 0xf1f2efbc,
+ 0xb6085c87,
+ 0x94bd0684,
+ 0xd00399f0,
+ 0x00f80089,
+ 0xe7f1e0f9,
+ 0xe3f09814,
+ 0x8d21f440,
+ 0x041ce0b7,
+ 0xf401f7f0,
+ 0xe0fc8d21,
+ 0x04bd00f8,
+ 0xf10004fe,
+ 0xf0120017,
+ 0x12d00227,
+ 0x3e17f100,
+ 0x0010fe04,
+ 0x040017f1,
+ 0xf0c010d0,
+ 0x12d00427,
+ 0x1031f400,
+ 0x060817f1,
+ 0xcf0614b6,
+ 0x37f00012,
+ 0x1f24f001,
+ 0xb60432bb,
+ 0x02800132,
+ 0x04038003,
+ 0x040010b7,
+ 0x800012cf,
+ 0x27f10002,
+ 0x24b60800,
+ 0x0022cf06,
+ 0xb65817f0,
+ 0x13980c10,
+ 0x0432b800,
+ 0xb00b0bf4,
+ 0x1bf40034,
+ 0xf100f8f1,
+ 0xb6080027,
+ 0x22cf0624,
+ 0xf134bd40,
+ 0xb6070047,
+ 0x25950644,
+ 0x0045d008,
+ 0xbd4045d0,
+ 0x58f4bde4,
+ 0x1f58021e,
+ 0x020e4003,
+ 0xf5040f40,
+ 0xbb013d21,
+ 0x3fbb002f,
+ 0x041e5800,
+ 0x40051f58,
+ 0x0f400a0e,
+ 0x3d21f50c,
+ 0x030e9801,
+ 0xbb00effd,
+ 0x3ebb002e,
+ 0x0040b700,
+ 0x0235b613,
+ 0xb60043d0,
+ 0x35b60825,
+ 0x0120b606,
+ 0xb60130b6,
+ 0x34b60824,
+ 0x022fb908,
+ 0x026321f5,
+ 0xf1003fbb,
+ 0xb6080017,
+ 0x13d00614,
+ 0x0010b740,
+ 0xf024bd08,
+ 0x12d01f29,
+ 0x0031f400,
+ 0xf00028f4,
+ 0x21f41cd7,
+ 0xf401f439,
+ 0xf404e4b0,
+ 0x81fe1e18,
+ 0x0627f001,
+ 0x12fd20bd,
+ 0x01e4b604,
+ 0xfe051efd,
+ 0x21f50018,
+ 0x0ef404c3,
+ 0x10ef94d3,
+ 0xf501f5f0,
+ 0xf402ec21,
+ 0x80f9c60e,
+ 0xf90188fe,
+ 0xf990f980,
+ 0xf9b0f9a0,
+ 0xf9e0f9d0,
+ 0x800acff0,
+ 0xf404abc4,
+ 0xb7f11d0b,
+ 0xd7f01900,
+ 0x40becf1c,
+ 0xf400bfcf,
+ 0xb0b70421,
+ 0xe7f00400,
+ 0x00bed001,
+ 0xfc400ad0,
+ 0xfce0fcf0,
+ 0xfcb0fcd0,
+ 0xfc90fca0,
+ 0x0088fe80,
+ 0x32f480fc,
+ 0xf001f800,
+ 0x0e9801f7,
+ 0x04febb00,
+ 0x9418e7f1,
+ 0xf440e3f0,
+ 0x00f88d21,
+ 0x0614e7f1,
+ 0xf006e4b6,
+ 0xefd020f7,
+ 0x08f7f000,
+ 0xf401f2b6,
+ 0xf7f1fd1b,
+ 0xefd00a20,
+ 0xf100f800,
+ 0xb60a0417,
+ 0x1fd00614,
+ 0x0711f400,
+ 0x04a421f5,
+ 0x4afc17f1,
+ 0xf00213f0,
+ 0x12d00c27,
+ 0x0721f500,
+ 0xfc27f102,
+ 0x0223f047,
+ 0xf00020d0,
+ 0x20b6012c,
+ 0x0012d003,
+ 0xf001acf0,
+ 0xb7f002a5,
+ 0x50b3f000,
+ 0xb6000c98,
+ 0xbcbb0fc4,
+ 0x010c9800,
+ 0xf0020d98,
+ 0x21f500e7,
+ 0xacf0015c,
+ 0x04a5f001,
+ 0x4000b7f1,
+ 0x9850b3f0,
+ 0xc4b6000c,
+ 0x00bcbb0f,
+ 0x98050c98,
+ 0x0f98060d,
+ 0x00e7f104,
+ 0x5c21f508,
+ 0x0721f501,
+ 0x0601f402,
+ 0xf11412f4,
+ 0xf04afc17,
+ 0x27f00213,
+ 0x0012d00d,
+ 0x020721f5,
+ 0x048f21f5,
+ 0x000000f8,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
new file mode 100644
index 0000000..a1a5991
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
@@ -0,0 +1,808 @@
+/* fuc microcode for nvc0 PGRAPH/HUB
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* To build:
+ * m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h
+ */
+
+.section nvc0_grhub_data
+include(`nvc0_graph.fuc')
+gpc_count: .b32 0
+rop_count: .b32 0
+cmd_queue: queue_init
+hub_mmio_list_head: .b32 0
+hub_mmio_list_tail: .b32 0
+
+ctx_current: .b32 0
+
+chipsets:
+.b8 0xc0 0 0 0
+.b16 nvc0_hub_mmio_head
+.b16 nvc0_hub_mmio_tail
+.b8 0xc1 0 0 0
+.b16 nvc0_hub_mmio_head
+.b16 nvc1_hub_mmio_tail
+.b8 0xc3 0 0 0
+.b16 nvc0_hub_mmio_head
+.b16 nvc0_hub_mmio_tail
+.b8 0xc4 0 0 0
+.b16 nvc0_hub_mmio_head
+.b16 nvc0_hub_mmio_tail
+.b8 0xc8 0 0 0
+.b16 nvc0_hub_mmio_head
+.b16 nvc0_hub_mmio_tail
+.b8 0xce 0 0 0
+.b16 nvc0_hub_mmio_head
+.b16 nvc0_hub_mmio_tail
+.b8 0 0 0 0
+
+nvc0_hub_mmio_head:
+mmctx_data(0x17e91c, 2)
+mmctx_data(0x400204, 2)
+mmctx_data(0x404004, 11)
+mmctx_data(0x404044, 1)
+mmctx_data(0x404094, 14)
+mmctx_data(0x4040d0, 7)
+mmctx_data(0x4040f8, 1)
+mmctx_data(0x404130, 3)
+mmctx_data(0x404150, 3)
+mmctx_data(0x404164, 2)
+mmctx_data(0x404174, 3)
+mmctx_data(0x404200, 8)
+mmctx_data(0x404404, 14)
+mmctx_data(0x404460, 4)
+mmctx_data(0x404480, 1)
+mmctx_data(0x404498, 1)
+mmctx_data(0x404604, 4)
+mmctx_data(0x404618, 32)
+mmctx_data(0x404698, 21)
+mmctx_data(0x4046f0, 2)
+mmctx_data(0x404700, 22)
+mmctx_data(0x405800, 1)
+mmctx_data(0x405830, 3)
+mmctx_data(0x405854, 1)
+mmctx_data(0x405870, 4)
+mmctx_data(0x405a00, 2)
+mmctx_data(0x405a18, 1)
+mmctx_data(0x406020, 1)
+mmctx_data(0x406028, 4)
+mmctx_data(0x4064a8, 2)
+mmctx_data(0x4064b4, 2)
+mmctx_data(0x407804, 1)
+mmctx_data(0x40780c, 6)
+mmctx_data(0x4078bc, 1)
+mmctx_data(0x408000, 7)
+mmctx_data(0x408064, 1)
+mmctx_data(0x408800, 3)
+mmctx_data(0x408900, 4)
+mmctx_data(0x408980, 1)
+nvc0_hub_mmio_tail:
+mmctx_data(0x4064c0, 2)
+nvc1_hub_mmio_tail:
+
+.align 256
+chan_data:
+chan_mmio_count: .b32 0
+chan_mmio_address: .b32 0
+
+.align 256
+xfer_data: .b32 0
+
+.section nvc0_grhub_code
+bra init
+define(`include_code')
+include(`nvc0_graph.fuc')
+
+// reports an exception to the host
+//
+// In: $r15 error code (see nvc0_graph.fuc)
+//
+error:
+ push $r14
+ mov $r14 0x814
+ shl b32 $r14 6
+ iowr I[$r14 + 0x000] $r15 // CC_SCRATCH[5] = error code
+ mov $r14 0xc1c
+ shl b32 $r14 6
+ mov $r15 1
+ iowr I[$r14 + 0x000] $r15 // INTR_UP_SET
+ pop $r14
+ ret
+
+// HUB fuc initialisation, executed by triggering ucode start, will
+// fall through to main loop after completion.
+//
+// Input:
+// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
+//
+// Output:
+// CC_SCRATCH[0]:
+// 31:31: set to signal completion
+// CC_SCRATCH[1]:
+// 31:0: total PGRAPH context size
+//
+init:
+ clear b32 $r0
+ mov $sp $r0
+ mov $xdbase $r0
+
+ // enable fifo access
+ mov $r1 0x1200
+ mov $r2 2
+ iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
+
+ // setup i0 handler, and route all interrupts to it
+ mov $r1 ih
+ mov $iv0 $r1
+ mov $r1 0x400
+ iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
+
+ // route HUB_CHANNEL_SWITCH to fuc interrupt 8
+ mov $r3 0x404
+ shl b32 $r3 6
+ mov $r2 0x2003 // { HUB_CHANNEL_SWITCH, ZERO } -> intr 8
+ iowr I[$r3 + 0x000] $r2
+
+ // not sure what these are, route them because NVIDIA does, and
+ // the IRQ handler will signal the host if we ever get one.. we
+ // may find out if/why we need to handle these if so..
+ //
+ mov $r2 0x2004
+ iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9
+ mov $r2 0x200b
+ iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10
+ mov $r2 0x200c
+ iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15
+
+ // enable all INTR_UP interrupts
+ mov $r2 0xc24
+ shl b32 $r2 6
+ not b32 $r3 $r0
+ iowr I[$r2] $r3
+
+ // enable fifo, ctxsw, 9, 10, 15 interrupts
+ mov $r2 -0x78fc // 0x8704
+ sethi $r2 0
+ iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
+
+ // fifo level triggered, rest edge
+ sub b32 $r1 0x100
+ mov $r2 4
+ iowr I[$r1] $r2
+
+ // enable interrupts
+ bset $flags ie0
+
+ // fetch enabled GPC/ROP counts
+ mov $r14 -0x69fc // 0x409604
+ sethi $r14 0x400000
+ call nv_rd32
+ extr $r1 $r15 16:20
+ st b32 D[$r0 + rop_count] $r1
+ and $r15 0x1f
+ st b32 D[$r0 + gpc_count] $r15
+
+ // set BAR_REQMASK to GPC mask
+ mov $r1 1
+ shl b32 $r1 $r15
+ sub b32 $r1 1
+ mov $r2 0x40c
+ shl b32 $r2 6
+ iowr I[$r2 + 0x000] $r1
+ iowr I[$r2 + 0x100] $r1
+
+ // find context data for this chipset
+ mov $r2 0x800
+ shl b32 $r2 6
+ iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
+ mov $r15 chipsets - 8
+ init_find_chipset:
+ add b32 $r15 8
+ ld b32 $r3 D[$r15 + 0x00]
+ cmpu b32 $r3 $r2
+ bra e init_context
+ cmpu b32 $r3 0
+ bra ne init_find_chipset
+ // unknown chipset
+ ret
+
+ // context size calculation, reserve first 256 bytes for use by fuc
+ init_context:
+ mov $r1 256
+
+ // calculate size of mmio context data
+ ld b16 $r14 D[$r15 + 4]
+ ld b16 $r15 D[$r15 + 6]
+ sethi $r14 0
+ st b32 D[$r0 + hub_mmio_list_head] $r14
+ st b32 D[$r0 + hub_mmio_list_tail] $r15
+ call mmctx_size
+
+ // set mmctx base addresses now so we don't have to do it later,
+ // they don't (currently) ever change
+ mov $r3 0x700
+ shl b32 $r3 6
+ shr b32 $r4 $r1 8
+ iowr I[$r3 + 0x000] $r4 // MMCTX_SAVE_SWBASE
+ iowr I[$r3 + 0x100] $r4 // MMCTX_LOAD_SWBASE
+ add b32 $r3 0x1300
+ add b32 $r1 $r15
+ shr b32 $r15 2
+ iowr I[$r3 + 0x000] $r15 // MMCTX_LOAD_COUNT, wtf for?!?
+
+ // strands, base offset needs to be aligned to 256 bytes
+ shr b32 $r1 8
+ add b32 $r1 1
+ shl b32 $r1 8
+ mov b32 $r15 $r1
+ call strand_ctx_init
+ add b32 $r1 $r15
+
+ // initialise each GPC in sequence by passing in the offset of its
+ // context data in GPCn_CC_SCRATCH[1], and starting its FUC (which
+ // has previously been uploaded by the host) running.
+ //
+ // the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31
+ // when it has completed, and return the size of its context data
+ // in GPCn_CC_SCRATCH[1]
+ //
+ ld b32 $r3 D[$r0 + gpc_count]
+ mov $r4 0x2000
+ sethi $r4 0x500000
+ init_gpc:
+ // setup, and start GPC ucode running
+ add b32 $r14 $r4 0x804
+ mov b32 $r15 $r1
+ call nv_wr32 // CC_SCRATCH[1] = ctx offset
+ add b32 $r14 $r4 0x800
+ mov b32 $r15 $r2
+ call nv_wr32 // CC_SCRATCH[0] = chipset
+ add b32 $r14 $r4 0x10c
+ clear b32 $r15
+ call nv_wr32
+ add b32 $r14 $r4 0x104
+ call nv_wr32 // ENTRY
+ add b32 $r14 $r4 0x100
+ mov $r15 2 // CTRL_START_TRIGGER
+ call nv_wr32 // CTRL
+
+ // wait for it to complete, and adjust context size
+ add b32 $r14 $r4 0x800
+ init_gpc_wait:
+ call nv_rd32
+ xbit $r15 $r15 31
+ bra e init_gpc_wait
+ add b32 $r14 $r4 0x804
+ call nv_rd32
+ add b32 $r1 $r15
+
+ // next!
+ add b32 $r4 0x8000
+ sub b32 $r3 1
+ bra ne init_gpc
+
+ // save context size, and tell host we're ready
+ mov $r2 0x800
+ shl b32 $r2 6
+ iowr I[$r2 + 0x100] $r1 // CC_SCRATCH[1] = context size
+ add b32 $r2 0x800
+ clear b32 $r1
+ bset $r1 31
+ iowr I[$r2 + 0x000] $r1 // CC_SCRATCH[0] |= 0x80000000
+
+// Main program loop, very simple, sleeps until woken up by the interrupt
+// handler, pulls a command from the queue and executes its handler
+//
+main:
+ // sleep until we have something to do
+ bset $flags $p0
+ sleep $p0
+ mov $r13 cmd_queue
+ call queue_get
+ bra $p1 main
+
+ // context switch, requested by GPU?
+ cmpu b32 $r14 0x4001
+ bra ne main_not_ctx_switch
+ trace_set(T_AUTO)
+ mov $r1 0xb00
+ shl b32 $r1 6
+ iord $r2 I[$r1 + 0x100] // CHAN_NEXT
+ iord $r1 I[$r1 + 0x000] // CHAN_CUR
+
+ xbit $r3 $r1 31
+ bra e chsw_no_prev
+ xbit $r3 $r2 31
+ bra e chsw_prev_no_next
+ push $r2
+ mov b32 $r2 $r1
+ trace_set(T_SAVE)
+ bclr $flags $p1
+ bset $flags $p2
+ call ctx_xfer
+ trace_clr(T_SAVE);
+ pop $r2
+ trace_set(T_LOAD);
+ bset $flags $p1
+ call ctx_xfer
+ trace_clr(T_LOAD);
+ bra chsw_done
+ chsw_prev_no_next:
+ push $r2
+ mov b32 $r2 $r1
+ bclr $flags $p1
+ bclr $flags $p2
+ call ctx_xfer
+ pop $r2
+ mov $r1 0xb00
+ shl b32 $r1 6
+ iowr I[$r1] $r2
+ bra chsw_done
+ chsw_no_prev:
+ xbit $r3 $r2 31
+ bra e chsw_done
+ bset $flags $p1
+ bclr $flags $p2
+ call ctx_xfer
+
+ // ack the context switch request
+ chsw_done:
+ mov $r1 0xb0c
+ shl b32 $r1 6
+ mov $r2 1
+ iowr I[$r1 + 0x000] $r2 // 0x409b0c
+ trace_clr(T_AUTO)
+ bra main
+
+ // request to set current channel? (*not* a context switch)
+ main_not_ctx_switch:
+ cmpu b32 $r14 0x0001
+ bra ne main_not_ctx_chan
+ mov b32 $r2 $r15
+ call ctx_chan
+ bra main_done
+
+ // request to store current channel context?
+ main_not_ctx_chan:
+ cmpu b32 $r14 0x0002
+ bra ne main_not_ctx_save
+ trace_set(T_SAVE)
+ bclr $flags $p1
+ bclr $flags $p2
+ call ctx_xfer
+ trace_clr(T_SAVE)
+ bra main_done
+
+ main_not_ctx_save:
+ shl b32 $r15 $r14 16
+ or $r15 E_BAD_COMMAND
+ call error
+ bra main
+
+ main_done:
+ mov $r1 0x820
+ shl b32 $r1 6
+ clear b32 $r2
+ bset $r2 31
+ iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
+ bra main
+
+// interrupt handler
+ih:
+ push $r8
+ mov $r8 $flags
+ push $r8
+ push $r9
+ push $r10
+ push $r11
+ push $r13
+ push $r14
+ push $r15
+
+ // incoming fifo command?
+ iord $r10 I[$r0 + 0x200] // INTR
+ and $r11 $r10 0x00000004
+ bra e ih_no_fifo
+ // queue incoming fifo command for later processing
+ mov $r11 0x1900
+ mov $r13 cmd_queue
+ iord $r14 I[$r11 + 0x100] // FIFO_CMD
+ iord $r15 I[$r11 + 0x000] // FIFO_DATA
+ call queue_put
+ add b32 $r11 0x400
+ mov $r14 1
+ iowr I[$r11 + 0x000] $r14 // FIFO_ACK
+
+ // context switch request?
+ ih_no_fifo:
+ and $r11 $r10 0x00000100
+ bra e ih_no_ctxsw
+ // enqueue a context switch for later processing
+ mov $r13 cmd_queue
+ mov $r14 0x4001
+ call queue_put
+
+ // anything we didn't handle, bring it to the host's attention
+ ih_no_ctxsw:
+ mov $r11 0x104
+ not b32 $r11
+ and $r11 $r10 $r11
+ bra e ih_no_other
+ mov $r10 0xc1c
+ shl b32 $r10 6
+ iowr I[$r10] $r11 // INTR_UP_SET
+
+ // ack, and wake up main()
+ ih_no_other:
+ iowr I[$r0 + 0x100] $r10 // INTR_ACK
+
+ pop $r15
+ pop $r14
+ pop $r13
+ pop $r11
+ pop $r10
+ pop $r9
+ pop $r8
+ mov $flags $r8
+ pop $r8
+ bclr $flags $p0
+ iret
+
+// Not real sure, but, MEM_CMD 7 will hang forever if this isn't done
+ctx_4160s:
+ mov $r14 0x4160
+ sethi $r14 0x400000
+ mov $r15 1
+ call nv_wr32
+ ctx_4160s_wait:
+ call nv_rd32
+ xbit $r15 $r15 4
+ bra e ctx_4160s_wait
+ ret
+
+// Without clearing again at end of xfer, some things cause PGRAPH
+// to hang with STATUS=0x00000007 until it's cleared.. fbcon can
+// still function with it set however...
+ctx_4160c:
+ mov $r14 0x4160
+ sethi $r14 0x400000
+ clear b32 $r15
+ call nv_wr32
+ ret
+
+// Again, not real sure
+//
+// In: $r15 value to set 0x404170 to
+//
+ctx_4170s:
+ mov $r14 0x4170
+ sethi $r14 0x400000
+ or $r15 0x10
+ call nv_wr32
+ ret
+
+// Waits for a ctx_4170s() call to complete
+//
+ctx_4170w:
+ mov $r14 0x4170
+ sethi $r14 0x400000
+ call nv_rd32
+ and $r15 0x10
+ bra ne ctx_4170w
+ ret
+
+// Disables various things, waits a bit, and re-enables them..
+//
+// Not sure how exactly this helps, perhaps "ENABLE" is not such a
+// good description for the bits we turn off? Anyways, without this,
+// funny things happen.
+//
+ctx_redswitch:
+ mov $r14 0x614
+ shl b32 $r14 6
+ mov $r15 0x270
+ iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL
+ mov $r15 8
+ ctx_redswitch_delay:
+ sub b32 $r15 1
+ bra ne ctx_redswitch_delay
+ mov $r15 0x770
+ iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
+ ret
+
+// Not a clue what this is for, except that unless the value is 0x10, the
+// strand context is saved (and presumably restored) incorrectly..
+//
+// In: $r15 value to set to (0x00/0x10 are used)
+//
+ctx_86c:
+ mov $r14 0x86c
+ shl b32 $r14 6
+ iowr I[$r14] $r15 // HUB(0x86c) = val
+ mov $r14 -0x75ec
+ sethi $r14 0x400000
+ call nv_wr32 // ROP(0xa14) = val
+ mov $r14 -0x5794
+ sethi $r14 0x410000
+ call nv_wr32 // GPC(0x86c) = val
+ ret
+
+// ctx_load - load's a channel's ctxctl data, and selects its vm
+//
+// In: $r2 channel address
+//
+ctx_load:
+ trace_set(T_CHAN)
+
+ // switch to channel, somewhat magic in parts..
+ mov $r10 12 // DONE_UNK12
+ call wait_donez
+ mov $r1 0xa24
+ shl b32 $r1 6
+ iowr I[$r1 + 0x000] $r0 // 0x409a24
+ mov $r3 0xb00
+ shl b32 $r3 6
+ iowr I[$r3 + 0x100] $r2 // CHAN_NEXT
+ mov $r1 0xa0c
+ shl b32 $r1 6
+ mov $r4 7
+ iowr I[$r1 + 0x000] $r2 // MEM_CHAN
+ iowr I[$r1 + 0x100] $r4 // MEM_CMD
+ ctx_chan_wait_0:
+ iord $r4 I[$r1 + 0x100]
+ and $r4 0x1f
+ bra ne ctx_chan_wait_0
+ iowr I[$r3 + 0x000] $r2 // CHAN_CUR
+
+ // load channel header, fetch PGRAPH context pointer
+ mov $xtargets $r0
+ bclr $r2 31
+ shl b32 $r2 4
+ add b32 $r2 2
+
+ trace_set(T_LCHAN)
+ mov $r1 0xa04
+ shl b32 $r1 6
+ iowr I[$r1 + 0x000] $r2 // MEM_BASE
+ mov $r1 0xa20
+ shl b32 $r1 6
+ mov $r2 0x0002
+ sethi $r2 0x80000000
+ iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram
+ mov $r1 0x10 // chan + 0x0210
+ mov $r2 xfer_data
+ sethi $r2 0x00020000 // 16 bytes
+ xdld $r1 $r2
+ xdwait
+ trace_clr(T_LCHAN)
+
+ // update current context
+ ld b32 $r1 D[$r0 + xfer_data + 4]
+ shl b32 $r1 24
+ ld b32 $r2 D[$r0 + xfer_data + 0]
+ shr b32 $r2 8
+ or $r1 $r2
+ st b32 D[$r0 + ctx_current] $r1
+
+ // set transfer base to start of context, and fetch context header
+ trace_set(T_LCTXH)
+ mov $r2 0xa04
+ shl b32 $r2 6
+ iowr I[$r2 + 0x000] $r1 // MEM_BASE
+ mov $r2 1
+ mov $r1 0xa20
+ shl b32 $r1 6
+ iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm
+ mov $r1 chan_data
+ sethi $r1 0x00060000 // 256 bytes
+ xdld $r0 $r1
+ xdwait
+ trace_clr(T_LCTXH)
+
+ trace_clr(T_CHAN)
+ ret
+
+// ctx_chan - handler for HUB_SET_CHAN command, will set a channel as
+// the active channel for ctxctl, but not actually transfer
+// any context data. intended for use only during initial
+// context construction.
+//
+// In: $r2 channel address
+//
+ctx_chan:
+ call ctx_4160s
+ call ctx_load
+ mov $r10 12 // DONE_UNK12
+ call wait_donez
+ mov $r1 0xa10
+ shl b32 $r1 6
+ mov $r2 5
+ iowr I[$r1 + 0x000] $r2 // MEM_CMD = 5 (???)
+ ctx_chan_wait:
+ iord $r2 I[$r1 + 0x000]
+ or $r2 $r2
+ bra ne ctx_chan_wait
+ call ctx_4160c
+ ret
+
+// Execute per-context state overrides list
+//
+// Only executed on the first load of a channel. Might want to look into
+// removing this and having the host directly modify the channel's context
+// to change this state... The nouveau DRM already builds this list as
+// it's definitely needed for NVIDIA's, so we may as well use it for now
+//
+// Input: $r1 mmio list length
+//
+ctx_mmio_exec:
+ // set transfer base to be the mmio list
+ ld b32 $r3 D[$r0 + chan_mmio_address]
+ mov $r2 0xa04
+ shl b32 $r2 6
+ iowr I[$r2 + 0x000] $r3 // MEM_BASE
+
+ clear b32 $r3
+ ctx_mmio_loop:
+ // fetch next 256 bytes of mmio list if necessary
+ and $r4 $r3 0xff
+ bra ne ctx_mmio_pull
+ mov $r5 xfer_data
+ sethi $r5 0x00060000 // 256 bytes
+ xdld $r3 $r5
+ xdwait
+
+ // execute a single list entry
+ ctx_mmio_pull:
+ ld b32 $r14 D[$r4 + xfer_data + 0x00]
+ ld b32 $r15 D[$r4 + xfer_data + 0x04]
+ call nv_wr32
+
+ // next!
+ add b32 $r3 8
+ sub b32 $r1 1
+ bra ne ctx_mmio_loop
+
+ // set transfer base back to the current context
+ ctx_mmio_done:
+ ld b32 $r3 D[$r0 + ctx_current]
+ iowr I[$r2 + 0x000] $r3 // MEM_BASE
+
+ // disable the mmio list now, we don't need/want to execute it again
+ st b32 D[$r0 + chan_mmio_count] $r0
+ mov $r1 chan_data
+ sethi $r1 0x00060000 // 256 bytes
+ xdst $r0 $r1
+ xdwait
+ ret
+
+// Transfer HUB context data between GPU and storage area
+//
+// In: $r2 channel address
+// $p1 clear on save, set on load
+// $p2 set if opposite direction done/will be done, so:
+// on save it means: "a load will follow this save"
+// on load it means: "a save preceeded this load"
+//
+ctx_xfer:
+ bra not $p1 ctx_xfer_pre
+ bra $p2 ctx_xfer_pre_load
+ ctx_xfer_pre:
+ mov $r15 0x10
+ call ctx_86c
+ call ctx_4160s
+ bra not $p1 ctx_xfer_exec
+
+ ctx_xfer_pre_load:
+ mov $r15 2
+ call ctx_4170s
+ call ctx_4170w
+ call ctx_redswitch
+ clear b32 $r15
+ call ctx_4170s
+ call ctx_load
+
+ // fetch context pointer, and initiate xfer on all GPCs
+ ctx_xfer_exec:
+ ld b32 $r1 D[$r0 + ctx_current]
+ mov $r2 0x414
+ shl b32 $r2 6
+ iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset
+ mov $r14 -0x5b00
+ sethi $r14 0x410000
+ mov b32 $r15 $r1
+ call nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer
+ add b32 $r14 4
+ xbit $r15 $flags $p1
+ xbit $r2 $flags $p2
+ shl b32 $r2 1
+ or $r15 $r2
+ call nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
+
+ // strands
+ mov $r1 0x4afc
+ sethi $r1 0x20000
+ mov $r2 0xc
+ iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
+ call strand_wait
+ mov $r2 0x47fc
+ sethi $r2 0x20000
+ iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
+ xbit $r2 $flags $p1
+ add b32 $r2 3
+ iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+
+ // mmio context
+ xbit $r10 $flags $p1 // direction
+ or $r10 6 // first, last
+ mov $r11 0 // base = 0
+ ld b32 $r12 D[$r0 + hub_mmio_list_head]
+ ld b32 $r13 D[$r0 + hub_mmio_list_tail]
+ mov $r14 0 // not multi
+ call mmctx_xfer
+
+ // wait for GPCs to all complete
+ mov $r10 8 // DONE_BAR
+ call wait_doneo
+
+ // wait for strand xfer to complete
+ call strand_wait
+
+ // post-op
+ bra $p1 ctx_xfer_post
+ mov $r10 12 // DONE_UNK12
+ call wait_donez
+ mov $r1 0xa10
+ shl b32 $r1 6
+ mov $r2 5
+ iowr I[$r1] $r2 // MEM_CMD
+ ctx_xfer_post_save_wait:
+ iord $r2 I[$r1]
+ or $r2 $r2
+ bra ne ctx_xfer_post_save_wait
+
+ bra $p2 ctx_xfer_done
+ ctx_xfer_post:
+ mov $r15 2
+ call ctx_4170s
+ clear b32 $r15
+ call ctx_86c
+ call strand_post
+ call ctx_4170w
+ clear b32 $r15
+ call ctx_4170s
+
+ bra not $p1 ctx_xfer_no_post_mmio
+ ld b32 $r1 D[$r0 + chan_mmio_count]
+ or $r1 $r1
+ bra e ctx_xfer_no_post_mmio
+ call ctx_mmio_exec
+
+ ctx_xfer_no_post_mmio:
+ call ctx_4160c
+
+ ctx_xfer_done:
+ ret
+
+.align 256
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
new file mode 100644
index 0000000..b3b541b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
@@ -0,0 +1,838 @@
+uint32_t nvc0_grhub_data[] = {
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x000000c0,
+ 0x012c0090,
+ 0x000000c1,
+ 0x01300090,
+ 0x000000c3,
+ 0x012c0090,
+ 0x000000c4,
+ 0x012c0090,
+ 0x000000c8,
+ 0x012c0090,
+ 0x000000ce,
+ 0x012c0090,
+ 0x00000000,
+ 0x0417e91c,
+ 0x04400204,
+ 0x28404004,
+ 0x00404044,
+ 0x34404094,
+ 0x184040d0,
+ 0x004040f8,
+ 0x08404130,
+ 0x08404150,
+ 0x04404164,
+ 0x08404174,
+ 0x1c404200,
+ 0x34404404,
+ 0x0c404460,
+ 0x00404480,
+ 0x00404498,
+ 0x0c404604,
+ 0x7c404618,
+ 0x50404698,
+ 0x044046f0,
+ 0x54404700,
+ 0x00405800,
+ 0x08405830,
+ 0x00405854,
+ 0x0c405870,
+ 0x04405a00,
+ 0x00405a18,
+ 0x00406020,
+ 0x0c406028,
+ 0x044064a8,
+ 0x044064b4,
+ 0x00407804,
+ 0x1440780c,
+ 0x004078bc,
+ 0x18408000,
+ 0x00408064,
+ 0x08408800,
+ 0x0c408900,
+ 0x00408980,
+ 0x044064c0,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
+
+uint32_t nvc0_grhub_code[] = {
+ 0x03090ef5,
+ 0x9800d898,
+ 0x86f001d9,
+ 0x0489b808,
+ 0xf00c1bf4,
+ 0x21f502f7,
+ 0x00f802ec,
+ 0xb60798c4,
+ 0x8dbb0384,
+ 0x0880b600,
+ 0x80008e80,
+ 0x90b6018f,
+ 0x0f94f001,
+ 0xf801d980,
+ 0x0131f400,
+ 0x9800d898,
+ 0x89b801d9,
+ 0x210bf404,
+ 0xb60789c4,
+ 0x9dbb0394,
+ 0x0890b600,
+ 0x98009e98,
+ 0x80b6019f,
+ 0x0f84f001,
+ 0xf400d880,
+ 0x00f80132,
+ 0x0728b7f1,
+ 0xb906b4b6,
+ 0xc9f002ec,
+ 0x00bcd01f,
+ 0xc800bccf,
+ 0x1bf41fcc,
+ 0x06a7f0fa,
+ 0x010321f5,
+ 0xf840bfcf,
+ 0x28b7f100,
+ 0x06b4b607,
+ 0xb980bfd0,
+ 0xc9f002ec,
+ 0x1ec9f01f,
+ 0xcf00bcd0,
+ 0xccc800bc,
+ 0xfa1bf41f,
+ 0x87f100f8,
+ 0x84b60430,
+ 0x1ff9f006,
+ 0xf8008fd0,
+ 0x3087f100,
+ 0x0684b604,
+ 0xf80080d0,
+ 0x3c87f100,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d000,
+ 0x081887f1,
+ 0xd00684b6,
+ 0x87f1008a,
+ 0x84b60400,
+ 0x0088cf06,
+ 0xf4888aff,
+ 0x87f1f31b,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00099,
+ 0xf100f800,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00099f0,
+ 0x87f10089,
+ 0x84b60818,
+ 0x008ad006,
+ 0x040087f1,
+ 0xcf0684b6,
+ 0x8aff0088,
+ 0xf30bf488,
+ 0x085c87f1,
+ 0xbd0684b6,
+ 0x0099f094,
+ 0xf80089d0,
+ 0x9894bd00,
+ 0x85b600e8,
+ 0x0180b61a,
+ 0xbb0284b6,
+ 0xe0b60098,
+ 0x04efb804,
+ 0xb9eb1bf4,
+ 0x00f8029f,
+ 0x083c87f1,
+ 0xbd0684b6,
+ 0x0199f094,
+ 0xf10089d0,
+ 0xb6071087,
+ 0x94bd0684,
+ 0xf405bbfd,
+ 0x8bd0090b,
+ 0x0099f000,
+ 0xf405eefd,
+ 0x8ed00c0b,
+ 0xc08fd080,
+ 0xb70199f0,
+ 0xc8010080,
+ 0xb4b600ab,
+ 0x0cb9f010,
+ 0xb601aec8,
+ 0xbefd11e4,
+ 0x008bd005,
+ 0xf0008ecf,
+ 0x0bf41fe4,
+ 0x00ce98fa,
+ 0xd005e9fd,
+ 0xc0b6c08e,
+ 0x04cdb804,
+ 0xc8e81bf4,
+ 0x1bf402ab,
+ 0x008bcf18,
+ 0xb01fb4f0,
+ 0x1bf410b4,
+ 0x02a7f0f7,
+ 0xf4c921f4,
+ 0xabc81b0e,
+ 0x10b4b600,
+ 0xf00cb9f0,
+ 0x8bd012b9,
+ 0x008bcf00,
+ 0xf412bbc8,
+ 0x87f1fa1b,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00199,
+ 0xf900f800,
+ 0x02a7f0a0,
+ 0xfcc921f4,
+ 0xf100f8a0,
+ 0xf04afc87,
+ 0x97f00283,
+ 0x0089d00c,
+ 0x020721f5,
+ 0x87f100f8,
+ 0x83f04afc,
+ 0x0d97f002,
+ 0xf50089d0,
+ 0xf8020721,
+ 0xfca7f100,
+ 0x02a3f04f,
+ 0x0500aba2,
+ 0xd00fc7f0,
+ 0xc7f000ac,
+ 0x00bcd00b,
+ 0x020721f5,
+ 0xf000aed0,
+ 0xbcd00ac7,
+ 0x0721f500,
+ 0xf100f802,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00399f0,
+ 0x21f50089,
+ 0xe7f00213,
+ 0x3921f503,
+ 0xfca7f102,
+ 0x02a3f046,
+ 0x0400aba0,
+ 0xf040a0d0,
+ 0xbcd001c7,
+ 0x0721f500,
+ 0x010c9202,
+ 0xf000acd0,
+ 0xbcd002c7,
+ 0x0721f500,
+ 0x2621f502,
+ 0x8087f102,
+ 0x0684b608,
+ 0xb70089cf,
+ 0x95220080,
+ 0x8ed008fe,
+ 0x408ed000,
+ 0xb6808acf,
+ 0xa0b606a5,
+ 0x00eabb01,
+ 0xb60480b6,
+ 0x1bf40192,
+ 0x08e4b6e8,
+ 0xf1f2efbc,
+ 0xb6085c87,
+ 0x94bd0684,
+ 0xd00399f0,
+ 0x00f80089,
+ 0xe7f1e0f9,
+ 0xe4b60814,
+ 0x00efd006,
+ 0x0c1ce7f1,
+ 0xf006e4b6,
+ 0xefd001f7,
+ 0xf8e0fc00,
+ 0xfe04bd00,
+ 0x07fe0004,
+ 0x0017f100,
+ 0x0227f012,
+ 0xf10012d0,
+ 0xfe05b917,
+ 0x17f10010,
+ 0x10d00400,
+ 0x0437f1c0,
+ 0x0634b604,
+ 0x200327f1,
+ 0xf10032d0,
+ 0xd0200427,
+ 0x27f10132,
+ 0x32d0200b,
+ 0x0c27f102,
+ 0x0732d020,
+ 0x0c2427f1,
+ 0xb90624b6,
+ 0x23d00003,
+ 0x0427f100,
+ 0x0023f087,
+ 0xb70012d0,
+ 0xf0010012,
+ 0x12d00427,
+ 0x1031f400,
+ 0x9604e7f1,
+ 0xf440e3f0,
+ 0xf1c76821,
+ 0x01018090,
+ 0x801ff4f0,
+ 0x17f0000f,
+ 0x041fbb01,
+ 0xf10112b6,
+ 0xb6040c27,
+ 0x21d00624,
+ 0x4021d000,
+ 0x080027f1,
+ 0xcf0624b6,
+ 0xf7f00022,
+ 0x08f0b654,
+ 0xb800f398,
+ 0x0bf40432,
+ 0x0034b00b,
+ 0xf8f11bf4,
+ 0x0017f100,
+ 0x02fe5801,
+ 0xf003ff58,
+ 0x0e8000e3,
+ 0x150f8014,
+ 0x013d21f5,
+ 0x070037f1,
+ 0x950634b6,
+ 0x34d00814,
+ 0x4034d000,
+ 0x130030b7,
+ 0xb6001fbb,
+ 0x3fd002f5,
+ 0x0815b600,
+ 0xb60110b6,
+ 0x1fb90814,
+ 0x6321f502,
+ 0x001fbb02,
+ 0xf1000398,
+ 0xf0200047,
+ 0x4ea05043,
+ 0x1fb90804,
+ 0x8d21f402,
+ 0x08004ea0,
+ 0xf4022fb9,
+ 0x4ea08d21,
+ 0xf4bd010c,
+ 0xa08d21f4,
+ 0xf401044e,
+ 0x4ea08d21,
+ 0xf7f00100,
+ 0x8d21f402,
+ 0x08004ea0,
+ 0xc86821f4,
+ 0x0bf41fff,
+ 0x044ea0fa,
+ 0x6821f408,
+ 0xb7001fbb,
+ 0xb6800040,
+ 0x1bf40132,
+ 0x0027f1b4,
+ 0x0624b608,
+ 0xb74021d0,
+ 0xbd080020,
+ 0x1f19f014,
+ 0xf40021d0,
+ 0x28f40031,
+ 0x08d7f000,
+ 0xf43921f4,
+ 0xe4b1f401,
+ 0x1bf54001,
+ 0x87f100d1,
+ 0x84b6083c,
+ 0xf094bd06,
+ 0x89d00499,
+ 0x0017f100,
+ 0x0614b60b,
+ 0xcf4012cf,
+ 0x13c80011,
+ 0x7e0bf41f,
+ 0xf41f23c8,
+ 0x20f95a0b,
+ 0xf10212b9,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00799f0,
+ 0x32f40089,
+ 0x0231f401,
+ 0x082921f5,
+ 0x085c87f1,
+ 0xbd0684b6,
+ 0x0799f094,
+ 0xfc0089d0,
+ 0x3c87f120,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d006,
+ 0xf50131f4,
+ 0xf1082921,
+ 0xb6085c87,
+ 0x94bd0684,
+ 0xd00699f0,
+ 0x0ef40089,
+ 0xb920f931,
+ 0x32f40212,
+ 0x0232f401,
+ 0x082921f5,
+ 0x17f120fc,
+ 0x14b60b00,
+ 0x0012d006,
+ 0xc8130ef4,
+ 0x0bf41f23,
+ 0x0131f40d,
+ 0xf50232f4,
+ 0xf1082921,
+ 0xb60b0c17,
+ 0x27f00614,
+ 0x0012d001,
+ 0x085c87f1,
+ 0xbd0684b6,
+ 0x0499f094,
+ 0xf50089d0,
+ 0xb0ff200e,
+ 0x1bf401e4,
+ 0x02f2b90d,
+ 0x07b521f5,
+ 0xb0420ef4,
+ 0x1bf402e4,
+ 0x3c87f12e,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d007,
+ 0xf40132f4,
+ 0x21f50232,
+ 0x87f10829,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00799,
+ 0x110ef400,
+ 0xf010ef94,
+ 0x21f501f5,
+ 0x0ef502ec,
+ 0x17f1fed1,
+ 0x14b60820,
+ 0xf024bd06,
+ 0x12d01f29,
+ 0xbe0ef500,
+ 0xfe80f9fe,
+ 0x80f90188,
+ 0xa0f990f9,
+ 0xd0f9b0f9,
+ 0xf0f9e0f9,
+ 0xc4800acf,
+ 0x0bf404ab,
+ 0x00b7f11d,
+ 0x08d7f019,
+ 0xcf40becf,
+ 0x21f400bf,
+ 0x00b0b704,
+ 0x01e7f004,
+ 0xe400bed0,
+ 0xf40100ab,
+ 0xd7f00d0b,
+ 0x01e7f108,
+ 0x0421f440,
+ 0x0104b7f1,
+ 0xabffb0bd,
+ 0x0d0bf4b4,
+ 0x0c1ca7f1,
+ 0xd006a4b6,
+ 0x0ad000ab,
+ 0xfcf0fc40,
+ 0xfcd0fce0,
+ 0xfca0fcb0,
+ 0xfe80fc90,
+ 0x80fc0088,
+ 0xf80032f4,
+ 0x60e7f101,
+ 0x40e3f041,
+ 0xf401f7f0,
+ 0x21f48d21,
+ 0x04ffc868,
+ 0xf8fa0bf4,
+ 0x60e7f100,
+ 0x40e3f041,
+ 0x21f4f4bd,
+ 0xf100f88d,
+ 0xf04170e7,
+ 0xf5f040e3,
+ 0x8d21f410,
+ 0xe7f100f8,
+ 0xe3f04170,
+ 0x6821f440,
+ 0xf410f4f0,
+ 0x00f8f31b,
+ 0x0614e7f1,
+ 0xf106e4b6,
+ 0xd00270f7,
+ 0xf7f000ef,
+ 0x01f2b608,
+ 0xf1fd1bf4,
+ 0xd00770f7,
+ 0x00f800ef,
+ 0x086ce7f1,
+ 0xd006e4b6,
+ 0xe7f100ef,
+ 0xe3f08a14,
+ 0x8d21f440,
+ 0xa86ce7f1,
+ 0xf441e3f0,
+ 0x00f88d21,
+ 0x083c87f1,
+ 0xbd0684b6,
+ 0x0599f094,
+ 0xf00089d0,
+ 0x21f40ca7,
+ 0x2417f1c9,
+ 0x0614b60a,
+ 0xf10010d0,
+ 0xb60b0037,
+ 0x32d00634,
+ 0x0c17f140,
+ 0x0614b60a,
+ 0xd00747f0,
+ 0x14d00012,
+ 0x4014cf40,
+ 0xf41f44f0,
+ 0x32d0fa1b,
+ 0x000bfe00,
+ 0xb61f2af0,
+ 0x20b60424,
+ 0x3c87f102,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d008,
+ 0x0a0417f1,
+ 0xd00614b6,
+ 0x17f10012,
+ 0x14b60a20,
+ 0x0227f006,
+ 0x800023f1,
+ 0xf00012d0,
+ 0x27f11017,
+ 0x23f00300,
+ 0x0512fa02,
+ 0x87f103f8,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00899,
+ 0xc1019800,
+ 0x981814b6,
+ 0x25b6c002,
+ 0x0512fd08,
+ 0xf1160180,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00999f0,
+ 0x27f10089,
+ 0x24b60a04,
+ 0x0021d006,
+ 0xf10127f0,
+ 0xb60a2017,
+ 0x12d00614,
+ 0x0017f100,
+ 0x0613f002,
+ 0xf80501fa,
+ 0x5c87f103,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d009,
+ 0x085c87f1,
+ 0xbd0684b6,
+ 0x0599f094,
+ 0xf80089d0,
+ 0x3121f500,
+ 0xb821f506,
+ 0x0ca7f006,
+ 0xf1c921f4,
+ 0xb60a1017,
+ 0x27f00614,
+ 0x0012d005,
+ 0xfd0012cf,
+ 0x1bf40522,
+ 0x4921f5fa,
+ 0x9800f806,
+ 0x27f18103,
+ 0x24b60a04,
+ 0x0023d006,
+ 0x34c434bd,
+ 0x0f1bf4ff,
+ 0x030057f1,
+ 0xfa0653f0,
+ 0x03f80535,
+ 0x98c04e98,
+ 0x21f4c14f,
+ 0x0830b68d,
+ 0xf40112b6,
+ 0x0398df1b,
+ 0x0023d016,
+ 0xf1800080,
+ 0xf0020017,
+ 0x01fa0613,
+ 0xf803f806,
+ 0x0611f400,
+ 0xf01102f4,
+ 0x21f510f7,
+ 0x21f50698,
+ 0x11f40631,
+ 0x02f7f01c,
+ 0x065721f5,
+ 0x066621f5,
+ 0x067821f5,
+ 0x21f5f4bd,
+ 0x21f50657,
+ 0x019806b8,
+ 0x1427f116,
+ 0x0624b604,
+ 0xf10020d0,
+ 0xf0a500e7,
+ 0x1fb941e3,
+ 0x8d21f402,
+ 0xf004e0b6,
+ 0x2cf001fc,
+ 0x0124b602,
+ 0xf405f2fd,
+ 0x17f18d21,
+ 0x13f04afc,
+ 0x0c27f002,
+ 0xf50012d0,
+ 0xf1020721,
+ 0xf047fc27,
+ 0x20d00223,
+ 0x012cf000,
+ 0xd00320b6,
+ 0xacf00012,
+ 0x06a5f001,
+ 0x9800b7f0,
+ 0x0d98140c,
+ 0x00e7f015,
+ 0x015c21f5,
+ 0xf508a7f0,
+ 0xf5010321,
+ 0xf4020721,
+ 0xa7f02201,
+ 0xc921f40c,
+ 0x0a1017f1,
+ 0xf00614b6,
+ 0x12d00527,
+ 0x0012cf00,
+ 0xf40522fd,
+ 0x02f4fa1b,
+ 0x02f7f032,
+ 0x065721f5,
+ 0x21f5f4bd,
+ 0x21f50698,
+ 0x21f50226,
+ 0xf4bd0666,
+ 0x065721f5,
+ 0x981011f4,
+ 0x11fd8001,
+ 0x070bf405,
+ 0x07df21f5,
+ 0x064921f5,
+ 0x000000f8,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 82357d2..b701c43 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -32,7 +32,6 @@ struct nvc0_instmem_priv {
struct nouveau_channel *bar1;
struct nouveau_gpuobj *bar3_pgd;
struct nouveau_channel *bar3;
- struct nouveau_gpuobj *chan_pgd;
};
int
@@ -181,17 +180,11 @@ nvc0_instmem_init(struct drm_device *dev)
goto error;
/* channel vm */
- ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, &vm);
+ ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
+ &dev_priv->chan_vm);
if (ret)
goto error;
- ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, 0, &priv->chan_pgd);
- if (ret)
- goto error;
-
- nouveau_vm_ref(vm, &dev_priv->chan_vm, priv->chan_pgd);
- nouveau_vm_ref(NULL, &vm, NULL);
-
nvc0_instmem_resume(dev);
return 0;
error:
@@ -211,8 +204,7 @@ nvc0_instmem_takedown(struct drm_device *dev)
nv_wr32(dev, 0x1704, 0x00000000);
nv_wr32(dev, 0x1714, 0x00000000);
- nouveau_vm_ref(NULL, &dev_priv->chan_vm, priv->chan_pgd);
- nouveau_gpuobj_ref(NULL, &priv->chan_pgd);
+ nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
nvc0_channel_del(&priv->bar1);
nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index a179e6c..9e35294 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -105,7 +105,11 @@ nvc0_vm_flush(struct nouveau_vm *vm)
struct drm_device *dev = vm->dev;
struct nouveau_vm_pgd *vpgd;
unsigned long flags;
- u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5;
+ u32 engine;
+
+ engine = 1;
+ if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm)
+ engine |= 4;
pinstmem->flush(vm->dev);
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index 67c6ec6..e45a24d 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -61,9 +61,7 @@ nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
u32 type, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
- struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
- struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm *mm = dev_priv->engine.vram.mm;
struct nouveau_mm_node *r;
struct nouveau_mem *mem;
int ret;
@@ -105,9 +103,15 @@ int
nvc0_vram_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+ u32 length;
dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
- dev_priv->vram_rblock_size = 4096;
- return 0;
+
+ length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
+
+ return nouveau_mm_init(&vram->mm, rsvd_head, length, 1);
}
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 3896ef8..9f363e0 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -5,6 +5,7 @@
ccflags-y := -Iinclude/drm
hostprogs-y := mkregtable
+clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
quiet_cmd_mkregtable = MKREGTABLE $@
cmd_mkregtable = $(obj)/mkregtable $< > $@
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index ebdb0fd..e88c644 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1245,6 +1245,9 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
char name[512];
int i;
+ if (!ctx)
+ return NULL;
+
ctx->card = card;
ctx->bios = bios;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9541995..c742944 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -764,7 +764,7 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
}
static void atombios_crtc_program_pll(struct drm_crtc *crtc,
- int crtc_id,
+ u32 crtc_id,
int pll_id,
u32 encoder_mode,
u32 encoder_id,
@@ -851,8 +851,7 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
args.v5.ucPpll = pll_id;
break;
case 6:
- args.v6.ulCrtcPclkFreq.ucCRTC = crtc_id;
- args.v6.ulCrtcPclkFreq.ulPixelClock = cpu_to_le32(clock / 10);
+ args.v6.ulDispEngClkFreq = cpu_to_le32(crtc_id << 24 | clock / 10);
args.v6.ucRefDiv = ref_div;
args.v6.usFbDiv = cpu_to_le16(fb_div);
args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 8c0f9e3..7ad43c6 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -613,6 +613,18 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
return true;
}
+bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+ if (!radeon_dp_get_link_status(radeon_connector, link_status))
+ return false;
+ if (dp_channel_eq_ok(link_status, dig->dp_lane_count))
+ return false;
+ return true;
+}
+
struct radeon_dp_link_train_info {
struct radeon_device *rdev;
struct drm_encoder *encoder;
@@ -627,6 +639,7 @@ struct radeon_dp_link_train_info {
u8 train_set[4];
u8 link_status[DP_LINK_STATUS_SIZE];
u8 tries;
+ bool use_dpencoder;
};
static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info)
@@ -646,7 +659,7 @@ static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
int rtp = 0;
/* set training pattern on the source */
- if (ASIC_IS_DCE4(dp_info->rdev)) {
+ if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) {
switch (tp) {
case DP_TRAINING_PATTERN_1:
rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1;
@@ -706,7 +719,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
/* start training on the source */
- if (ASIC_IS_DCE4(dp_info->rdev))
+ if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
atombios_dig_encoder_setup(dp_info->encoder,
ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
else
@@ -731,7 +744,7 @@ static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info
DP_TRAINING_PATTERN_DISABLE);
/* disable the training pattern on the source */
- if (ASIC_IS_DCE4(dp_info->rdev))
+ if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
atombios_dig_encoder_setup(dp_info->encoder,
ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
else
@@ -869,7 +882,8 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
struct radeon_dp_link_train_info dp_info;
- u8 tmp;
+ int index;
+ u8 tmp, frev, crev;
if (!radeon_encoder->enc_priv)
return;
@@ -884,6 +898,18 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
(dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP))
return;
+ /* DPEncoderService newer than 1.1 can't program properly the
+ * training pattern. When facing such version use the
+ * DIGXEncoderControl (X== 1 | 2)
+ */
+ dp_info.use_dpencoder = true;
+ index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
+ if (atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) {
+ if (crev > 1) {
+ dp_info.use_dpencoder = false;
+ }
+ }
+
dp_info.enc_id = 0;
if (dig->dig_encoder)
dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 15bd047..dc0a5b5 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -41,6 +41,31 @@ static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
+{
+ u16 ctl, v;
+ int cap, err;
+
+ cap = pci_pcie_cap(rdev->pdev);
+ if (!cap)
+ return;
+
+ err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl);
+ if (err)
+ return;
+
+ v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
+
+ /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
+ * to avoid hangs or perfomance issues
+ */
+ if ((v == 0) || (v == 6) || (v == 7)) {
+ ctl &= ~PCI_EXP_DEVCTL_READRQ;
+ ctl |= (2 << 12);
+ pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl);
+ }
+}
+
void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
{
/* enable the pflip int */
@@ -743,7 +768,7 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
!evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
!evergreen_check_latency_hiding(&wm) ||
(rdev->disp_priority == 2)) {
- DRM_INFO("force priority to high\n");
+ DRM_DEBUG_KMS("force priority to high\n");
priority_a_cnt |= PRIORITY_ALWAYS_ON;
priority_b_cnt |= PRIORITY_ALWAYS_ON;
}
@@ -1357,6 +1382,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
SOFT_RESET_PA |
SOFT_RESET_SH |
SOFT_RESET_VGT |
+ SOFT_RESET_SPI |
SOFT_RESET_SX));
RREG32(GRBM_SOFT_RESET);
mdelay(15);
@@ -1382,9 +1408,6 @@ int evergreen_cp_resume(struct radeon_device *rdev)
/* set the wb address wether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
-#ifdef __BIG_ENDIAN
- RB_RPTR_SWAP(2) |
-#endif
((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
@@ -1865,6 +1888,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+ evergreen_fix_pci_max_read_req_size(rdev);
+
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
cc_gc_shader_pipe_config |=
@@ -2047,6 +2072,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.tile_config |=
((gb_addr_config & 0x30000000) >> 28) << 12;
+ rdev->config.evergreen.backend_map = gb_backend_map;
WREG32(GB_BACKEND_MAP, gb_backend_map);
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
@@ -2761,6 +2787,9 @@ int evergreen_irq_process(struct radeon_device *rdev)
return IRQ_NONE;
}
restart_ih:
+ /* Order reading of wptr vs. reading of IH ring data */
+ rmb();
+
/* display interrupts */
evergreen_irq_ack(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 23d3641..a134790 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -428,7 +428,7 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
i = (reg >> 7);
- if (i > last_reg) {
+ if (i >= last_reg) {
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
@@ -856,7 +856,6 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
case SQ_PGM_START_PS:
case SQ_PGM_START_HS:
case SQ_PGM_START_LS:
- case GDS_ADDR_BASE:
case SQ_CONST_MEM_BASE:
case SQ_ALU_CONST_CACHE_GS_0:
case SQ_ALU_CONST_CACHE_GS_1:
@@ -946,6 +945,34 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
}
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
+ case SX_MEMORY_EXPORT_BASE:
+ if (p->rdev->family >= CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONFIG_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONFIG_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
+ case CAYMAN_SX_SCATTER_EXPORT_BASE:
+ if (p->rdev->family < CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
default:
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
@@ -1153,6 +1180,34 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return r;
}
break;
+ case PACKET3_DISPATCH_DIRECT:
+ if (pkt->count != 3) {
+ DRM_ERROR("bad DISPATCH_DIRECT\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ return r;
+ }
+ break;
+ case PACKET3_DISPATCH_INDIRECT:
+ if (pkt->count != 1) {
+ DRM_ERROR("bad DISPATCH_INDIRECT\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad DISPATCH_INDIRECT\n");
+ return -EINVAL;
+ }
+ ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
case PACKET3_WAIT_REG_MEM:
if (pkt->count != 5) {
DRM_ERROR("bad WAIT_REG_MEM\n");
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index b7b2714..7363d9d 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -351,6 +351,7 @@
#define COLOR_BUFFER_SIZE(x) ((x) << 0)
#define POSITION_BUFFER_SIZE(x) ((x) << 8)
#define SMX_BUFFER_SIZE(x) ((x) << 16)
+#define SX_MEMORY_EXPORT_BASE 0x9010
#define SX_MISC 0x28350
#define CB_PERF_CTR0_SEL_0 0x9A20
@@ -1122,6 +1123,7 @@
#define CAYMAN_PA_SC_AA_CONFIG 0x28BE0
#define CAYMAN_MSAA_NUM_SAMPLES_SHIFT 0
#define CAYMAN_MSAA_NUM_SAMPLES_MASK 0x7
+#define CAYMAN_SX_SCATTER_EXPORT_BASE 0x28358
/* cayman packet3 addition */
#define CAYMAN_PACKET3_DEALLOC_STATE 0x14
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 559dbd4..cbf57d7 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -39,6 +39,7 @@ extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
extern void evergreen_mc_program(struct radeon_device *rdev);
extern void evergreen_irq_suspend(struct radeon_device *rdev);
extern int evergreen_mc_init(struct radeon_device *rdev);
+extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
@@ -669,6 +670,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+ evergreen_fix_pci_max_read_req_size(rdev);
+
mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
@@ -833,6 +836,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.tile_config |=
((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
+ rdev->config.cayman.backend_map = gb_backend_map;
WREG32(GB_BACKEND_MAP, gb_backend_map);
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
@@ -1158,6 +1162,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
SOFT_RESET_PA |
SOFT_RESET_SH |
SOFT_RESET_VGT |
+ SOFT_RESET_SPI |
SOFT_RESET_SX));
RREG32(GRBM_SOFT_RESET);
mdelay(15);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index bc54b26..aa5571b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1662,6 +1662,7 @@ void r600_gpu_init(struct radeon_device *rdev)
R6XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
rdev->config.r600.tile_config = tiling_config;
+ rdev->config.r600.backend_map = backend_map;
tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
@@ -2212,9 +2213,6 @@ int r600_cp_resume(struct radeon_device *rdev)
/* set the wb address whether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
-#ifdef __BIG_ENDIAN
- RB_RPTR_SWAP(2) |
-#endif
((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
@@ -2994,10 +2992,6 @@ int r600_irq_init(struct radeon_device *rdev)
/* RPTR_REARM only works if msi's are enabled */
if (rdev->msi_enabled)
ih_cntl |= RPTR_REARM;
-
-#ifdef __BIG_ENDIAN
- ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
-#endif
WREG32(IH_CNTL, ih_cntl);
/* force the active interrupt state to all disabled */
@@ -3308,6 +3302,10 @@ int r600_irq_process(struct radeon_device *rdev)
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
+ /* No MSIs, need a dummy read to flush PCI DMAs */
+ if (!rdev->msi_enabled)
+ RREG32(IH_RB_WPTR);
+
wptr = r600_get_ih_wptr(rdev);
rptr = rdev->ih.rptr;
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
@@ -3320,6 +3318,9 @@ int r600_irq_process(struct radeon_device *rdev)
}
restart_ih:
+ /* Order reading of wptr vs. reading of IH ring data */
+ rmb();
+
/* display interrupts */
r600_irq_ack(rdev);
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index c3ab959..45fd592 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1802,8 +1802,8 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
/* Set ring buffer size */
#ifdef __BIG_ENDIAN
RADEON_WRITE(R600_CP_RB_CNTL,
- RADEON_BUF_SWAP_32BIT |
- RADEON_RB_NO_UPDATE |
+ R600_BUF_SWAP_32BIT |
+ R600_RB_NO_UPDATE |
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#else
@@ -1820,15 +1820,15 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
#ifdef __BIG_ENDIAN
RADEON_WRITE(R600_CP_RB_CNTL,
- RADEON_BUF_SWAP_32BIT |
- RADEON_RB_NO_UPDATE |
- RADEON_RB_RPTR_WR_ENA |
+ R600_BUF_SWAP_32BIT |
+ R600_RB_NO_UPDATE |
+ R600_RB_RPTR_WR_ENA |
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#else
RADEON_WRITE(R600_CP_RB_CNTL,
- RADEON_RB_NO_UPDATE |
- RADEON_RB_RPTR_WR_ENA |
+ R600_RB_NO_UPDATE |
+ R600_RB_RPTR_WR_ENA |
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#endif
@@ -1851,13 +1851,8 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
- ((unsigned long) dev->sg->virtual)
+ dev_priv->gart_vm_start;
}
- RADEON_WRITE(R600_CP_RB_RPTR_ADDR,
-#ifdef __BIG_ENDIAN
- (2 << 0) |
-#endif
- (rptr_addr & 0xfffffffc));
- RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI,
- upper_32_bits(rptr_addr));
+ RADEON_WRITE(R600_CP_RB_RPTR_ADDR, (rptr_addr & 0xfffffffc));
+ RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, upper_32_bits(rptr_addr));
#ifdef __BIG_ENDIAN
RADEON_WRITE(R600_CP_RB_CNTL,
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 909bda8..cf83aa0 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -915,12 +915,11 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
{
struct r600_cs_track *track = (struct r600_cs_track *)p->track;
struct radeon_cs_reloc *reloc;
- u32 last_reg = ARRAY_SIZE(r600_reg_safe_bm);
u32 m, i, tmp, *ib;
int r;
i = (reg >> 7);
- if (i > last_reg) {
+ if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
@@ -1200,6 +1199,15 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
}
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
+ case SX_MEMORY_EXPORT_BASE:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONFIG_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
default:
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ef0e0e0..32807ba 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -60,7 +60,7 @@
* are considered as fatal)
*/
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
@@ -1003,6 +1003,7 @@ struct r600_asic {
unsigned tiling_npipes;
unsigned tiling_group_size;
unsigned tile_config;
+ unsigned backend_map;
struct r100_gpu_lockup lockup;
};
@@ -1028,6 +1029,7 @@ struct rv770_asic {
unsigned tiling_npipes;
unsigned tiling_group_size;
unsigned tile_config;
+ unsigned backend_map;
struct r100_gpu_lockup lockup;
};
@@ -1054,6 +1056,7 @@ struct evergreen_asic {
unsigned tiling_npipes;
unsigned tiling_group_size;
unsigned tile_config;
+ unsigned backend_map;
struct r100_gpu_lockup lockup;
};
@@ -1174,7 +1177,7 @@ struct radeon_device {
/* Register mmio */
resource_size_t rmmio_base;
resource_size_t rmmio_size;
- void *rmmio;
+ void __iomem *rmmio;
radeon_rreg_t mc_rreg;
radeon_wreg_t mc_wreg;
radeon_rreg_t pll_rreg;
@@ -1251,20 +1254,20 @@ int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
{
if (reg < rdev->rmmio_size)
- return readl(((void __iomem *)rdev->rmmio) + reg);
+ return readl((rdev->rmmio) + reg);
else {
- writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
- return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ writel(reg, (rdev->rmmio) + RADEON_MM_INDEX);
+ return readl((rdev->rmmio) + RADEON_MM_DATA);
}
}
static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
if (reg < rdev->rmmio_size)
- writel(v, ((void __iomem *)rdev->rmmio) + reg);
+ writel(v, (rdev->rmmio) + reg);
else {
- writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
- writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ writel(reg, (rdev->rmmio) + RADEON_MM_INDEX);
+ writel(v, (rdev->rmmio) + RADEON_MM_DATA);
}
}
@@ -1296,10 +1299,10 @@ static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
/*
* Registers read & write functions.
*/
-#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
-#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
-#define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg))
-#define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg))
+#define RREG8(reg) readb((rdev->rmmio) + (reg))
+#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
+#define RREG16(reg) readw((rdev->rmmio) + (reg))
+#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
#define RREG32(reg) r100_mm_rreg(rdev, (reg))
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index b244962..df8218b 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -625,7 +625,7 @@ static struct radeon_asic r600_asic = {
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
- .copy_dma = &r600_copy_blit,
+ .copy_dma = NULL,
.copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
@@ -672,7 +672,7 @@ static struct radeon_asic rs780_asic = {
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
- .copy_dma = &r600_copy_blit,
+ .copy_dma = NULL,
.copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
@@ -719,7 +719,7 @@ static struct radeon_asic rv770_asic = {
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
- .copy_dma = &r600_copy_blit,
+ .copy_dma = NULL,
.copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
@@ -766,7 +766,7 @@ static struct radeon_asic evergreen_asic = {
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &evergreen_copy_blit,
- .copy_dma = &evergreen_copy_blit,
+ .copy_dma = NULL,
.copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
@@ -813,7 +813,7 @@ static struct radeon_asic sumo_asic = {
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &evergreen_copy_blit,
- .copy_dma = &evergreen_copy_blit,
+ .copy_dma = NULL,
.copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
@@ -860,7 +860,7 @@ static struct radeon_asic btc_asic = {
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &evergreen_copy_blit,
- .copy_dma = &evergreen_copy_blit,
+ .copy_dma = NULL,
.copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
@@ -907,7 +907,7 @@ static struct radeon_asic cayman_asic = {
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &evergreen_copy_blit,
- .copy_dma = &evergreen_copy_blit,
+ .copy_dma = NULL,
.copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 2d48e7a..b6e18c8 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -96,7 +96,7 @@ uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
* Read XTAL (ref clock), SCLK and MCLK from Open Firmware device
* tree. Hopefully, ATI OF driver is kind enough to fill these
*/
-static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
+static bool radeon_read_clocks_OF(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
struct device_node *dp = rdev->pdev->dev.of_node;
@@ -166,7 +166,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
return true;
}
#else
-static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
+static bool radeon_read_clocks_OF(struct drm_device *dev)
{
return false;
}
@@ -219,6 +219,9 @@ void radeon_get_clock_info(struct drm_device *dev)
} else {
DRM_INFO("Using generic clock info\n");
+ /* may need to be per card */
+ rdev->clock.max_pixel_clock = 35000;
+
if (rdev->flags & RADEON_IS_IGP) {
p1pll->reference_freq = 1432;
p2pll->reference_freq = 1432;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index e459467..6367524 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -779,7 +779,8 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
}
}
}
- } else if (rdev->family >= CHIP_R200) {
+ } else if ((rdev->family == CHIP_R200) ||
+ (rdev->family >= CHIP_R300)) {
/* 0x68 */
i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
@@ -2556,6 +2557,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
u16 offset, misc, misc2 = 0;
u8 rev, blocks, tmp;
int state_index = 0;
+ struct radeon_i2c_bus_rec i2c_bus;
rdev->pm.default_power_state_index = -1;
@@ -2574,7 +2576,6 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
if (offset) {
u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0;
- struct radeon_i2c_bus_rec i2c_bus;
rev = RBIOS8(offset);
@@ -2616,6 +2617,25 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
}
}
+ } else {
+ /* boards with a thermal chip, but no overdrive table */
+
+ /* Asus 9600xt has an f75375 on the monid bus */
+ if ((dev->pdev->device == 0x4152) &&
+ (dev->pdev->subsystem_vendor == 0x1043) &&
+ (dev->pdev->subsystem_device == 0xc002)) {
+ i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = "f75375";
+ info.addr = 0x28;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+ name, info.addr);
+ }
+ }
}
if (rdev->flags & RADEON_IS_MOBILITY) {
@@ -3278,6 +3298,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
rdev->pdev->subsystem_device == 0x30a4)
return;
+ /* quirk for rs4xx Compaq Presario V5245EU laptop to make it resume
+ * - it hangs on resume inside the dynclk 1 table.
+ */
+ if (rdev->family == CHIP_RS480 &&
+ rdev->pdev->subsystem_vendor == 0x103c &&
+ rdev->pdev->subsystem_device == 0x30ae)
+ return;
+
/* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
if (table)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 9792d4f..c4b8741 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -60,18 +60,20 @@ void radeon_connector_hotplug(struct drm_connector *connector)
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
- /* powering up/down the eDP panel generates hpd events which
- * can interfere with modesetting.
- */
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ /* if the connector is already off, don't turn it back on */
+ if (connector->dpms != DRM_MODE_DPMS_ON)
return;
- /* pre-r600 did not always have the hpd pins mapped accurately to connectors */
- if (rdev->family >= CHIP_R600) {
- if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+ /* just deal with DP (not eDP) here. */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ int saved_dpms = connector->dpms;
+
+ if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
+ radeon_dp_needs_link_train(radeon_connector))
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
else
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ connector->dpms = saved_dpms;
}
}
@@ -430,16 +432,73 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
return 0;
}
+/*
+ * Some integrated ATI Radeon chipset implementations (e. g.
+ * Asus M2A-VM HDMI) may indicate the availability of a DDC,
+ * even when there's no monitor connected. For these connectors
+ * following DDC probe extension will be applied: check also for the
+ * availability of EDID with at least a correct EDID header. Only then,
+ * DDC is assumed to be available. This prevents drm_get_edid() and
+ * drm_edid_block_valid() from periodically dumping data and kernel
+ * errors into the logs and onto the terminal.
+ */
+static bool radeon_connector_needs_extended_probe(struct radeon_device *dev,
+ uint32_t supported_device,
+ int connector_type)
+{
+ /* Asus M2A-VM HDMI board sends data to i2c bus even,
+ * if HDMI add-on card is not plugged in or HDMI is disabled in
+ * BIOS. Valid DDC can only be assumed, if also a valid EDID header
+ * can be retrieved via i2c bus during DDC probe */
+ if ((dev->pdev->device == 0x791e) &&
+ (dev->pdev->subsystem_vendor == 0x1043) &&
+ (dev->pdev->subsystem_device == 0x826d)) {
+ if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+ (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
+ return true;
+ }
+ /* ECS A740GM-M with ATI RADEON 2100 sends data to i2c bus
+ * for a DVI connector that is not implemented */
+ if ((dev->pdev->device == 0x796e) &&
+ (dev->pdev->subsystem_vendor == 0x1019) &&
+ (dev->pdev->subsystem_device == 0x2615)) {
+ if ((connector_type == DRM_MODE_CONNECTOR_DVID) &&
+ (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
+ return true;
+ }
+ /* TOSHIBA Satellite L300D with ATI Mobility Radeon x1100
+ * (RS690M) sends data to i2c bus for a HDMI connector that
+ * is not implemented */
+ if ((dev->pdev->device == 0x791f) &&
+ (dev->pdev->subsystem_vendor == 0x1179) &&
+ (dev->pdev->subsystem_device == 0xff68)) {
+ if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+ (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
+ return true;
+ }
+
+ /* Default: no EDID header probe required for DDC probing */
+ return false;
+}
+
static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+ struct drm_display_mode *t, *mode;
+
+ /* If the EDID preferred mode doesn't match the native mode, use it */
+ list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+ if (mode->type & DRM_MODE_TYPE_PREFERRED) {
+ if (mode->hdisplay != native_mode->hdisplay ||
+ mode->vdisplay != native_mode->vdisplay)
+ memcpy(native_mode, mode, sizeof(*mode));
+ }
+ }
/* Try to get native mode details from EDID if necessary */
if (!native_mode->clock) {
- struct drm_display_mode *t, *mode;
-
list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
if (mode->hdisplay == native_mode->hdisplay &&
mode->vdisplay == native_mode->vdisplay) {
@@ -450,6 +509,7 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
}
}
}
+
if (!native_mode->clock) {
DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
radeon_encoder->rmx_type = RMX_OFF;
@@ -661,7 +721,8 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
ret = connector_status_disconnected;
if (radeon_connector->ddc_bus)
- dret = radeon_ddc_probe(radeon_connector);
+ dret = radeon_ddc_probe(radeon_connector,
+ radeon_connector->requires_extended_probe);
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
@@ -833,7 +894,8 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
bool dret = false;
if (radeon_connector->ddc_bus)
- dret = radeon_ddc_probe(radeon_connector);
+ dret = radeon_ddc_probe(radeon_connector,
+ radeon_connector->requires_extended_probe);
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
@@ -1235,12 +1297,33 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
- } else {
- /* need to setup ddc on the bridge */
- if (radeon_connector_encoder_is_dp_bridge(connector)) {
+ } else if (radeon_connector_encoder_is_dp_bridge(connector)) {
+ /* DP bridges are always DP */
+ radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+ /* get the DPCD from the bridge */
+ radeon_dp_getdpcd(radeon_connector);
+
+ if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+ ret = connector_status_connected;
+ else {
+ /* need to setup ddc on the bridge */
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
+ if (radeon_ddc_probe(radeon_connector,
+ radeon_connector->requires_extended_probe))
+ ret = connector_status_connected;
+ }
+
+ if ((ret == connector_status_disconnected) &&
+ radeon_connector->dac_load_detect) {
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ if (encoder) {
+ encoder_funcs = encoder->helper_private;
+ ret = encoder_funcs->detect(encoder, connector);
+ }
}
+ } else {
radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
ret = connector_status_connected;
@@ -1251,20 +1334,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
} else {
- if (radeon_ddc_probe(radeon_connector))
+ if (radeon_ddc_probe(radeon_connector,
+ radeon_connector->requires_extended_probe))
ret = connector_status_connected;
}
}
-
- if ((ret == connector_status_disconnected) &&
- radeon_connector->dac_load_detect) {
- struct drm_encoder *encoder = radeon_best_single_encoder(connector);
- struct drm_encoder_helper_funcs *encoder_funcs;
- if (encoder) {
- encoder_funcs = encoder->helper_private;
- ret = encoder_funcs->detect(encoder, connector);
- }
- }
}
radeon_connector_update_scratch_regs(connector, ret);
@@ -1406,6 +1480,9 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->shared_ddc = shared_ddc;
radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
+ radeon_connector->requires_extended_probe =
+ radeon_connector_needs_extended_probe(rdev, supported_device,
+ connector_type);
radeon_connector->router = *router;
if (router->ddc_valid || router->cd_valid) {
radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
@@ -1752,6 +1829,9 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_connector->devices = supported_device;
radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
+ radeon_connector->requires_extended_probe =
+ radeon_connector_needs_extended_probe(rdev, supported_device,
+ connector_type);
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 7586779..045ec59 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2115,7 +2115,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
if (drm_pci_device_is_agp(dev))
dev_priv->flags |= RADEON_IS_AGP;
- else if (drm_pci_device_is_pcie(dev))
+ else if (pci_is_pcie(dev->pdev))
dev_priv->flags |= RADEON_IS_PCIE;
else
dev_priv->flags |= RADEON_IS_PCI;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7cfaa7e..b51e157 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -32,6 +32,7 @@
#include <drm/radeon_drm.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
+#include <linux/efi.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
@@ -300,6 +301,8 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size)
+ mc->real_vram_size = radeon_vram_limit;
dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
@@ -348,6 +351,9 @@ bool radeon_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
+ if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
+ return false;
+
/* first check CRTCs */
if (ASIC_IS_DCE41(rdev)) {
reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
@@ -704,8 +710,9 @@ int radeon_device_init(struct radeon_device *rdev,
rdev->gpu_lockup = false;
rdev->accel_working = false;
- DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n",
- radeon_family_name[rdev->family], pdev->vendor, pdev->device);
+ DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
+ radeon_family_name[rdev->family], pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
/* mutex initialization are all done here so we
* can recall function without having locking issues */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 292f73f..6cc17fb 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -282,7 +282,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
work = radeon_crtc->unpin_work;
if (work == NULL ||
- !radeon_fence_signaled(work->fence)) {
+ (work->fence && !radeon_fence_signaled(work->fence))) {
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
return;
}
@@ -348,7 +348,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
struct radeon_framebuffer *new_radeon_fb;
struct drm_gem_object *obj;
struct radeon_bo *rbo;
- struct radeon_fence *fence;
struct radeon_unpin_work *work;
unsigned long flags;
u32 tiling_flags, pitch_pixels;
@@ -359,16 +358,9 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
if (work == NULL)
return -ENOMEM;
- r = radeon_fence_create(rdev, &fence);
- if (unlikely(r != 0)) {
- kfree(work);
- DRM_ERROR("flip queue: failed to create fence.\n");
- return -ENOMEM;
- }
work->event = event;
work->rdev = rdev;
work->crtc_id = radeon_crtc->crtc_id;
- work->fence = radeon_fence_ref(fence);
old_radeon_fb = to_radeon_framebuffer(crtc->fb);
new_radeon_fb = to_radeon_framebuffer(fb);
/* schedule unpin of the old buffer */
@@ -377,6 +369,10 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
drm_gem_object_reference(obj);
rbo = gem_to_radeon_bo(obj);
work->old_rbo = rbo;
+ obj = new_radeon_fb->obj;
+ rbo = gem_to_radeon_bo(obj);
+ if (rbo->tbo.sync_obj)
+ work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
INIT_WORK(&work->work, radeon_unpin_work_func);
/* We borrow the event spin lock for protecting unpin_work */
@@ -391,9 +387,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
spin_unlock_irqrestore(&dev->event_lock, flags);
/* pin the new buffer */
- obj = new_radeon_fb->obj;
- rbo = gem_to_radeon_bo(obj);
-
DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
work->old_rbo, rbo);
@@ -461,37 +454,18 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
goto pflip_cleanup1;
}
- /* 32 ought to cover us */
- r = radeon_ring_lock(rdev, 32);
- if (r) {
- DRM_ERROR("failed to lock the ring before flip\n");
- goto pflip_cleanup2;
- }
-
- /* emit the fence */
- radeon_fence_emit(rdev, fence);
/* set the proper interrupt */
radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
- /* fire the ring */
- radeon_ring_unlock_commit(rdev);
return 0;
-pflip_cleanup2:
- drm_vblank_put(dev, radeon_crtc->crtc_id);
-
pflip_cleanup1:
- r = radeon_bo_reserve(rbo, false);
- if (unlikely(r != 0)) {
+ if (unlikely(radeon_bo_reserve(rbo, false) != 0)) {
DRM_ERROR("failed to reserve new rbo in error path\n");
goto pflip_cleanup;
}
- r = radeon_bo_unpin(rbo);
- if (unlikely(r != 0)) {
- radeon_bo_unreserve(rbo);
- r = -EINVAL;
+ if (unlikely(radeon_bo_unpin(rbo) != 0)) {
DRM_ERROR("failed to unpin new rbo in error path\n");
- goto pflip_cleanup;
}
radeon_bo_unreserve(rbo);
@@ -501,7 +475,7 @@ pflip_cleanup:
unlock_free:
drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
spin_unlock_irqrestore(&dev->event_lock, flags);
- radeon_fence_unref(&fence);
+ radeon_fence_unref(&work->fence);
kfree(work);
return r;
@@ -733,16 +707,21 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
radeon_router_select_ddc_port(radeon_connector);
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
- (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
+ radeon_connector_encoder_is_dp_bridge(&radeon_connector->base)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
- }
- if (!radeon_connector->ddc_bus)
- return -1;
- if (!radeon_connector->edid) {
- radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &dig->dp_i2c_bus->adapter);
+ else if (radeon_connector->ddc_bus && !radeon_connector->edid)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
+ } else {
+ if (radeon_connector->ddc_bus && !radeon_connector->edid)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
}
if (!radeon_connector->edid) {
@@ -777,8 +756,17 @@ static int radeon_ddc_dump(struct drm_connector *connector)
if (!radeon_connector->ddc_bus)
return -1;
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
+ /* Log EDID retrieval status here. In particular with regard to
+ * connectors with requires_extended_probe flag set, that will prevent
+ * function radeon_dvi_detect() to fetch EDID on this connector,
+ * as long as there is no valid EDID header found */
if (edid) {
+ DRM_INFO("Radeon display connector %s: Found valid EDID",
+ drm_get_connector_name(connector));
kfree(edid);
+ } else {
+ DRM_INFO("Radeon display connector %s: No monitor connected or invalid EDID",
+ drm_get_connector_name(connector));
}
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 73dfbe8..e71d2ed 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -51,9 +51,10 @@
* 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
* 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
* 2.10.0 - fusion 2D tiling
+ * 2.11.0 - backend map, initial compute support for the CS checker
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 10
+#define KMS_DRIVER_MINOR 11
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index b293487..319d85d 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -2323,6 +2323,9 @@ radeon_add_atom_encoder(struct drm_device *dev,
default:
encoder->possible_crtcs = 0x3;
break;
+ case 4:
+ encoder->possible_crtcs = 0xf;
+ break;
case 6:
encoder->possible_crtcs = 0x3f;
break;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 9a9f9fc..3475a09 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -29,7 +29,7 @@
* Dave Airlie
*/
#include <linux/seq_file.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 781196d..6c111c1 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -32,17 +32,17 @@
* radeon_ddc_probe
*
*/
-bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
+bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_extended_probe)
{
- u8 out_buf[] = { 0x0, 0x0};
- u8 buf[2];
+ u8 out = 0x0;
+ u8 buf[8];
int ret;
struct i2c_msg msgs[] = {
{
.addr = 0x50,
.flags = 0,
.len = 1,
- .buf = out_buf,
+ .buf = &out,
},
{
.addr = 0x50,
@@ -52,15 +52,31 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
}
};
+ /* Read 8 bytes from i2c for extended probe of EDID header */
+ if (requires_extended_probe)
+ msgs[1].len = 8;
+
/* on hw with routers, select right port */
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
- if (ret == 2)
- return true;
-
- return false;
+ if (ret != 2)
+ /* Couldn't find an accessible DDC on this connector */
+ return false;
+ if (requires_extended_probe) {
+ /* Probe also for valid EDID header
+ * EDID header starts with:
+ * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
+ * Only the first 6 bytes must be valid as
+ * drm_edid_block_valid() can fix the last 2 bytes */
+ if (drm_edid_header_is_valid(buf) < 6) {
+ /* Couldn't find an accessible EDID on this
+ * connector */
+ return false;
+ }
+ }
+ return true;
}
/* bit banging i2c */
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index bd58af6..be2c122 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -60,7 +60,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
/* update BUS flag */
if (drm_pci_device_is_agp(dev)) {
flags |= RADEON_IS_AGP;
- } else if (drm_pci_device_is_pcie(dev)) {
+ } else if (pci_is_pcie(dev->pdev)) {
flags |= RADEON_IS_PCIE;
} else {
flags |= RADEON_IS_PCI;
@@ -237,6 +237,19 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case RADEON_INFO_FUSION_GART_WORKING:
value = 1;
break;
+ case RADEON_INFO_BACKEND_MAP:
+ if (rdev->family >= CHIP_CAYMAN)
+ value = rdev->config.cayman.backend_map;
+ else if (rdev->family >= CHIP_CEDAR)
+ value = rdev->config.evergreen.backend_map;
+ else if (rdev->family >= CHIP_RV770)
+ value = rdev->config.rv770.backend_map;
+ else if (rdev->family >= CHIP_R600)
+ value = rdev->config.r600.backend_map;
+ else {
+ return -EINVAL;
+ }
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 6df4e3c..68820f5 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -438,6 +438,9 @@ struct radeon_connector {
struct radeon_i2c_chan *ddc_bus;
/* some systems have an hdmi and vga port with a shared ddc line */
bool shared_ddc;
+ /* for some Radeon chip families we apply an additional EDID header
+ check as part of the DDC probe */
+ bool requires_extended_probe;
bool use_digital;
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
@@ -476,6 +479,7 @@ extern void radeon_dp_set_link_config(struct drm_connector *connector,
struct drm_display_mode *mode);
extern void radeon_dp_link_train(struct drm_encoder *encoder,
struct drm_connector *connector);
+extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
@@ -514,7 +518,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
u8 val);
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
-extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
+extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector,
+ bool requires_extended_probe);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index aaa19dc..6fabe89 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -594,6 +594,9 @@ int radeon_pm_init(struct radeon_device *rdev)
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
SET_VOLTAGE_TYPE_ASIC_VDDC);
+ if (rdev->pm.default_vddci)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+ SET_VOLTAGE_TYPE_ASIC_VDDCI);
if (rdev->pm.default_sclk)
radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
if (rdev->pm.default_mclk)
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index bc44a3d..b4ce864 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -3295,7 +3295,7 @@
# define RADEON_RB_BUFSZ_MASK (0x3f << 0)
# define RADEON_RB_BLKSZ_SHIFT 8
# define RADEON_RB_BLKSZ_MASK (0x3f << 8)
-# define RADEON_BUF_SWAP_32BIT (1 << 17)
+# define RADEON_BUF_SWAP_32BIT (2 << 16)
# define RADEON_MAX_FETCH_SHIFT 18
# define RADEON_MAX_FETCH_MASK (0x3 << 18)
# define RADEON_RB_NO_UPDATE (1 << 27)
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index dee4a0c..602fa35 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -40,10 +40,14 @@ void radeon_test_moves(struct radeon_device *rdev)
size = 1024 * 1024;
/* Number of tests =
- * (Total GTT - IB pool - writeback page - ring buffer) / test size
+ * (Total GTT - IB pool - writeback page - ring buffers) / test size
*/
- n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
- rdev->cp.ring_size)) / size;
+ n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - rdev->cp.ring_size;
+ if (rdev->wb.wb_obj)
+ n -= RADEON_GPU_PAGE_SIZE;
+ if (rdev->ih.ring_obj)
+ n -= rdev->ih.ring_size;
+ n /= size;
gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
if (!gtt_obj) {
@@ -132,9 +136,15 @@ void radeon_test_moves(struct radeon_device *rdev)
gtt_start++, vram_start++) {
if (*vram_start != gtt_start) {
DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
- "expected 0x%p (GTT map 0x%p-0x%p)\n",
- i, *vram_start, gtt_start, gtt_map,
- gtt_end);
+ "expected 0x%p (GTT/VRAM offset "
+ "0x%16llx/0x%16llx)\n",
+ i, *vram_start, gtt_start,
+ (unsigned long long)
+ (gtt_addr - rdev->mc.gtt_start +
+ (void*)gtt_start - gtt_map),
+ (unsigned long long)
+ (vram_addr - rdev->mc.vram_start +
+ (void*)gtt_start - gtt_map));
radeon_bo_kunmap(vram_obj);
goto out_cleanup;
}
@@ -175,9 +185,15 @@ void radeon_test_moves(struct radeon_device *rdev)
gtt_start++, vram_start++) {
if (*gtt_start != vram_start) {
DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
- "expected 0x%p (VRAM map 0x%p-0x%p)\n",
- i, *gtt_start, vram_start, vram_map,
- vram_end);
+ "expected 0x%p (VRAM/GTT offset "
+ "0x%16llx/0x%16llx)\n",
+ i, *gtt_start, vram_start,
+ (unsigned long long)
+ (vram_addr - rdev->mc.vram_start +
+ (void*)vram_start - vram_map),
+ (unsigned long long)
+ (gtt_addr - rdev->mc.gtt_start +
+ (void*)vram_start - vram_map));
radeon_bo_kunmap(gtt_obj[i]);
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 60125dd..9b86fb0 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -450,6 +450,29 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
return -EINVAL;
mem->bus.base = rdev->mc.aper_base;
mem->bus.is_iomem = true;
+#ifdef __alpha__
+ /*
+ * Alpha: use bus.addr to hold the ioremap() return,
+ * so we can modify bus.base below.
+ */
+ if (mem->placement & TTM_PL_FLAG_WC)
+ mem->bus.addr =
+ ioremap_wc(mem->bus.base + mem->bus.offset,
+ mem->bus.size);
+ else
+ mem->bus.addr =
+ ioremap_nocache(mem->bus.base + mem->bus.offset,
+ mem->bus.size);
+
+ /*
+ * Alpha: Use just the bus offset plus
+ * the hose/domain memory base for bus.base.
+ * It then can be used to build PTEs for VRAM
+ * access, as done in ttm_bo_vm_fault().
+ */
+ mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
+ rdev->ddev->hose->dense_mem_base;
+#endif
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index 0aa8e85..2316977 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -208,6 +208,7 @@ cayman 0x9400
0x0002834C PA_SC_VPORT_ZMAX_15
0x00028350 SX_MISC
0x00028354 SX_SURFACE_SYNC
+0x0002835C SX_SCATTER_EXPORT_SIZE
0x00028380 SQ_VTX_SEMANTIC_0
0x00028384 SQ_VTX_SEMANTIC_1
0x00028388 SQ_VTX_SEMANTIC_2
@@ -432,6 +433,7 @@ cayman 0x9400
0x00028700 SPI_STACK_MGMT
0x00028704 SPI_WAVE_MGMT_1
0x00028708 SPI_WAVE_MGMT_2
+0x00028720 GDS_ADDR_BASE
0x00028724 GDS_ADDR_SIZE
0x00028780 CB_BLEND0_CONTROL
0x00028784 CB_BLEND1_CONTROL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index 0e28cae..161737a 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -44,6 +44,7 @@ evergreen 0x9400
0x00008E28 SQ_STATIC_THREAD_MGMT_3
0x00008E2C SQ_LDS_RESOURCE_MGMT
0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
+0x00009014 SX_MEMORY_EXPORT_SIZE
0x00009100 SPI_CONFIG_CNTL
0x0000913C SPI_CONFIG_CNTL_1
0x00009508 TA_CNTL_AUX
@@ -442,7 +443,9 @@ evergreen 0x9400
0x000286EC SPI_COMPUTE_NUM_THREAD_X
0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
+0x00028720 GDS_ADDR_BASE
0x00028724 GDS_ADDR_SIZE
+0x00028728 GDS_ORDERED_WAVE_PER_SE
0x00028780 CB_BLEND0_CONTROL
0x00028784 CB_BLEND1_CONTROL
0x00028788 CB_BLEND2_CONTROL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index ea49752..0380c5c 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -429,6 +429,7 @@ r600 0x9400
0x00028438 SX_ALPHA_REF
0x00028410 SX_ALPHA_TEST_CONTROL
0x00028350 SX_MISC
+0x00009014 SX_MEMORY_EXPORT_SIZE
0x00009604 TC_INVALIDATE
0x00009400 TD_FILTER4
0x00009404 TD_FILTER4_1
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 1f5850e..4b5d0e6 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -530,7 +530,7 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
addr = addr & 0xFFFFFFFFFFFFF000ULL;
addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
- writeq(addr, ((void __iomem *)ptr) + (i * 8));
+ writeq(addr, ptr + (i * 8));
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4de5189..4720d00 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -778,6 +778,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
(cc_rb_backend_disable >> 16));
rdev->config.rv770.tile_config = gb_tiling_config;
+ rdev->config.rv770.backend_map = backend_map;
gb_tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, gb_tiling_config);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2e618b5..a4d38d8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -37,7 +37,7 @@
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/module.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define TTM_ASSERT_LOCKED(param)
#define TTM_DEBUG(fmt, arg...)
@@ -353,8 +353,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
ret = ttm_tt_set_user(bo->ttm, current,
bo->buffer_start, bo->num_pages);
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
+ }
break;
default:
printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
@@ -390,10 +392,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
* Create and bind a ttm if required.
*/
- if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
- ret = ttm_bo_add_ttm(bo, false);
- if (ret)
- goto out_err;
+ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
+ if (bo->ttm == NULL) {
+ ret = ttm_bo_add_ttm(bo, false);
+ if (ret)
+ goto out_err;
+ }
ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
if (ret)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 77dbf40..ae3c6f5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -635,13 +635,13 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
if (ret)
return ret;
- ttm_bo_free_old_node(bo);
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
(bo->ttm != NULL)) {
ttm_tt_unbind(bo->ttm);
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
+ ttm_bo_free_old_node(bo);
} else {
/**
* This should help pipeline ordinary buffer moves.
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index de41e55..075daf4 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -30,7 +30,7 @@
#include "ttm/ttm_lock.h"
#include "ttm/ttm_module.h"
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/sched.h>
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index ebddd44..93577f2 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -55,7 +55,7 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
struct ttm_object_file {
struct ttm_object_device *tdev;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index d948575..727e93d 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -40,7 +40,7 @@
#include <linux/slab.h>
#include <linux/dma-mapping.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_page_alloc.h"
@@ -355,7 +355,7 @@ restart:
if (nr_free)
goto restart;
- /* Not allowed to fall tough or break because
+ /* Not allowed to fall through or break because
* following context is inside spinlock while we are
* outside here.
*/
@@ -556,7 +556,7 @@ out:
}
/**
- * Fill the given pool if there isn't enough pages and requested number of
+ * Fill the given pool if there aren't enough pages and the requested number of
* pages is small.
*/
static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
@@ -576,8 +576,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
pool->fill_lock = true;
- /* If allocation request is small and there is not enough
- * pages in pool we fill the pool first */
+ /* If allocation request is small and there are not enough
+ * pages in a pool we fill the pool up first. */
if (count < _manager->options.small
&& count > pool->npages) {
struct list_head new_pages;
@@ -614,9 +614,9 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
}
/**
- * Cut count nubmer of pages from the pool and put them to return list
+ * Cut 'count' number of pages from the pool and put them on the return list.
*
- * @return count of pages still to allocate to fill the request.
+ * @return count of pages still required to fulfill the request.
*/
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
struct list_head *pages, int ttm_flags,
@@ -637,7 +637,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
goto out;
}
/* find the last pages to include for requested number of pages. Split
- * pool to begin and halves to reduce search space. */
+ * pool to begin and halve it to reduce search space. */
if (count <= pool->npages/2) {
i = 0;
list_for_each(p, &pool->list) {
@@ -651,7 +651,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
break;
}
}
- /* Cut count number of pages from pool */
+ /* Cut 'count' number of pages from the pool */
list_cut_position(pages, &pool->list, p);
pool->npages -= count;
count = 0;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 306b15f..1130a89 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -589,6 +589,7 @@ config HID_WACOM_POWER_SUPPLY
config HID_WIIMOTE
tristate "Nintendo Wii Remote support"
depends on BT_HIDP
+ depends on LEDS_CLASS
---help---
Support for the Nintendo Wii Remote bluetooth device.
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index b85744f..18b3bc6 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -444,6 +444,12 @@ static const struct hid_device_id apple_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
APPLE_RDESC_JIS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS),
+ .driver_data = APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 1a5cf0c..242353d 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1340,6 +1340,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index db63ccf..7484e1b 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -109,6 +109,9 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245
#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246
#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247
+#define USB_DEVICE_ID_APPLE_ALU_REVB_ANSI 0x024f
+#define USB_DEVICE_ID_APPLE_ALU_REVB_ISO 0x0250
+#define USB_DEVICE_ID_APPLE_ALU_REVB_JIS 0x0251
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
@@ -274,6 +277,7 @@
#define USB_DEVICE_ID_PENPOWER 0x00f4
#define USB_VENDOR_ID_GREENASIA 0x0e8f
+#define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD 0x3013
#define USB_VENDOR_ID_GRETAGMACBETH 0x0971
#define USB_DEVICE_ID_GRETAGMACBETH_HUEY 0x2005
@@ -576,6 +580,9 @@
#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
#define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600
+#define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f
+#define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002
+
#define USB_VENDOR_ID_SKYCABLE 0x1223
#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 0ec91c1..f0fbd7b 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -81,6 +81,28 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
#define NO_TOUCHES -1
#define SINGLE_TOUCH_UP -2
+/* Touch surface information. Dimension is in hundredths of a mm, min and max
+ * are in units. */
+#define MOUSE_DIMENSION_X (float)9056
+#define MOUSE_MIN_X -1100
+#define MOUSE_MAX_X 1258
+#define MOUSE_RES_X ((MOUSE_MAX_X - MOUSE_MIN_X) / (MOUSE_DIMENSION_X / 100))
+#define MOUSE_DIMENSION_Y (float)5152
+#define MOUSE_MIN_Y -1589
+#define MOUSE_MAX_Y 2047
+#define MOUSE_RES_Y ((MOUSE_MAX_Y - MOUSE_MIN_Y) / (MOUSE_DIMENSION_Y / 100))
+
+#define TRACKPAD_DIMENSION_X (float)13000
+#define TRACKPAD_MIN_X -2909
+#define TRACKPAD_MAX_X 3167
+#define TRACKPAD_RES_X \
+ ((TRACKPAD_MAX_X - TRACKPAD_MIN_X) / (TRACKPAD_DIMENSION_X / 100))
+#define TRACKPAD_DIMENSION_Y (float)11000
+#define TRACKPAD_MIN_Y -2456
+#define TRACKPAD_MAX_Y 2565
+#define TRACKPAD_RES_Y \
+ ((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100))
+
/**
* struct magicmouse_sc - Tracks Magic Mouse-specific data.
* @input: Input device through which we report events.
@@ -406,17 +428,31 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
* inverse of the reported Y.
*/
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
- input_set_abs_params(input, ABS_MT_POSITION_X, -1100,
- 1358, 4, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, -1589,
- 2047, 4, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0);
+
+ input_abs_set_res(input, ABS_MT_POSITION_X,
+ MOUSE_RES_X);
+ input_abs_set_res(input, ABS_MT_POSITION_Y,
+ MOUSE_RES_Y);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
- input_set_abs_params(input, ABS_X, -2909, 3167, 4, 0);
- input_set_abs_params(input, ABS_Y, -2456, 2565, 4, 0);
- input_set_abs_params(input, ABS_MT_POSITION_X, -2909,
- 3167, 4, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, -2456,
- 2565, 4, 0);
+ input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
+ TRACKPAD_MAX_X, 4, 0);
+ input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
+ TRACKPAD_MAX_Y, 4, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0);
+
+ input_abs_set_res(input, ABS_X, TRACKPAD_RES_X);
+ input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y);
+ input_abs_set_res(input, ABS_MT_POSITION_X,
+ TRACKPAD_RES_X);
+ input_abs_set_res(input, ABS_MT_POSITION_Y,
+ TRACKPAD_RES_Y);
}
input_set_events_per_packet(input, 60);
@@ -501,9 +537,17 @@ static int magicmouse_probe(struct hid_device *hdev,
}
report->size = 6;
+ /*
+ * Some devices repond with 'invalid report id' when feature
+ * report switching it into multitouch mode is sent to it.
+ *
+ * This results in -EIO from the _raw low-level transport callback,
+ * but there seems to be no other way of switching the mode.
+ * Thus the super-ugly hacky success check below.
+ */
ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
HID_FEATURE_REPORT);
- if (ret != sizeof(feature)) {
+ if (ret != -EIO && ret != sizeof(feature)) {
hid_err(hdev, "unable to request touch data (%d)\n", ret);
goto err_stop_hw;
}
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 0688832..a597039 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -353,11 +353,7 @@ static int wacom_probe(struct hid_device *hdev,
if (ret) {
hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n",
ret);
- /*
- * battery attribute is not critical for the tablet, but if it
- * failed then there is no need to create ac attribute
- */
- goto move_on;
+ goto err_battery;
}
wdata->ac.properties = wacom_ac_props;
@@ -371,14 +367,8 @@ static int wacom_probe(struct hid_device *hdev,
if (ret) {
hid_warn(hdev,
"can't create ac battery attribute, err: %d\n", ret);
- /*
- * ac attribute is not critical for the tablet, but if it
- * failed then we don't want to battery attribute to exist
- */
- power_supply_unregister(&wdata->battery);
+ goto err_ac;
}
-
-move_on:
#endif
hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
input = hidinput->input;
@@ -416,6 +406,13 @@ move_on:
return 0;
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+err_ac:
+ power_supply_unregister(&wdata->battery);
+err_battery:
+ device_remove_file(&hdev->dev, &dev_attr_speed);
+ hid_hw_stop(hdev);
+#endif
err_free:
kfree(wdata);
return ret;
@@ -426,6 +423,7 @@ static void wacom_remove(struct hid_device *hdev)
#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
struct wacom_data *wdata = hid_get_drvdata(hdev);
#endif
+ device_remove_file(&hdev->dev, &dev_attr_speed);
hid_hw_stop(hdev);
#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote.c
index a594383..85a02e5 100644
--- a/drivers/hid/hid-wiimote.c
+++ b/drivers/hid/hid-wiimote.c
@@ -10,10 +10,10 @@
* any later version.
*/
-#include <linux/atomic.h>
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/input.h>
+#include <linux/leds.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include "hid-ids.h"
@@ -33,9 +33,9 @@ struct wiimote_state {
};
struct wiimote_data {
- atomic_t ready;
struct hid_device *hdev;
struct input_dev *input;
+ struct led_classdev *leds[4];
spinlock_t qlock;
__u8 head;
@@ -53,8 +53,15 @@ struct wiimote_data {
#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
+/* return flag for led \num */
+#define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1))
+
enum wiiproto_reqs {
+ WIIPROTO_REQ_NULL = 0x0,
WIIPROTO_REQ_LED = 0x11,
+ WIIPROTO_REQ_DRM = 0x12,
+ WIIPROTO_REQ_STATUS = 0x20,
+ WIIPROTO_REQ_RETURN = 0x22,
WIIPROTO_REQ_DRM_K = 0x30,
};
@@ -87,9 +94,6 @@ static __u16 wiiproto_keymap[] = {
BTN_MODE, /* WIIPROTO_KEY_HOME */
};
-#define dev_to_wii(pdev) hid_get_drvdata(container_of(pdev, struct hid_device, \
- dev))
-
static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer,
size_t count)
{
@@ -192,66 +196,96 @@ static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
wiimote_queue(wdata, cmd, sizeof(cmd));
}
-#define wiifs_led_show_set(num) \
-static ssize_t wiifs_led_show_##num(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct wiimote_data *wdata = dev_to_wii(dev); \
- unsigned long flags; \
- int state; \
- \
- if (!atomic_read(&wdata->ready)) \
- return -EBUSY; \
- \
- spin_lock_irqsave(&wdata->state.lock, flags); \
- state = !!(wdata->state.flags & WIIPROTO_FLAG_LED##num); \
- spin_unlock_irqrestore(&wdata->state.lock, flags); \
- \
- return sprintf(buf, "%d\n", state); \
-} \
-static ssize_t wiifs_led_set_##num(struct device *dev, \
- struct device_attribute *attr, const char *buf, size_t count) \
-{ \
- struct wiimote_data *wdata = dev_to_wii(dev); \
- int tmp = simple_strtoul(buf, NULL, 10); \
- unsigned long flags; \
- __u8 state; \
- \
- if (!atomic_read(&wdata->ready)) \
- return -EBUSY; \
- \
- spin_lock_irqsave(&wdata->state.lock, flags); \
- \
- state = wdata->state.flags; \
- \
- if (tmp) \
- wiiproto_req_leds(wdata, state | WIIPROTO_FLAG_LED##num);\
- else \
- wiiproto_req_leds(wdata, state & ~WIIPROTO_FLAG_LED##num);\
- \
- spin_unlock_irqrestore(&wdata->state.lock, flags); \
- \
- return count; \
-} \
-static DEVICE_ATTR(led##num, S_IRUGO | S_IWUSR, wiifs_led_show_##num, \
- wiifs_led_set_##num)
-
-wiifs_led_show_set(1);
-wiifs_led_show_set(2);
-wiifs_led_show_set(3);
-wiifs_led_show_set(4);
+/*
+ * Check what peripherals of the wiimote are currently
+ * active and select a proper DRM that supports all of
+ * the requested data inputs.
+ */
+static __u8 select_drm(struct wiimote_data *wdata)
+{
+ return WIIPROTO_REQ_DRM_K;
+}
+
+static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
+{
+ __u8 cmd[3];
+
+ if (drm == WIIPROTO_REQ_NULL)
+ drm = select_drm(wdata);
+
+ cmd[0] = WIIPROTO_REQ_DRM;
+ cmd[1] = 0;
+ cmd[2] = drm;
+
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+static enum led_brightness wiimote_leds_get(struct led_classdev *led_dev)
+{
+ struct wiimote_data *wdata;
+ struct device *dev = led_dev->dev->parent;
+ int i;
+ unsigned long flags;
+ bool value = false;
+
+ wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev));
+
+ for (i = 0; i < 4; ++i) {
+ if (wdata->leds[i] == led_dev) {
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ value = wdata->state.flags & WIIPROTO_FLAG_LED(i + 1);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+ break;
+ }
+ }
+
+ return value ? LED_FULL : LED_OFF;
+}
+
+static void wiimote_leds_set(struct led_classdev *led_dev,
+ enum led_brightness value)
+{
+ struct wiimote_data *wdata;
+ struct device *dev = led_dev->dev->parent;
+ int i;
+ unsigned long flags;
+ __u8 state, flag;
+
+ wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev));
+
+ for (i = 0; i < 4; ++i) {
+ if (wdata->leds[i] == led_dev) {
+ flag = WIIPROTO_FLAG_LED(i + 1);
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ state = wdata->state.flags;
+ if (value == LED_OFF)
+ wiiproto_req_leds(wdata, state & ~flag);
+ else
+ wiiproto_req_leds(wdata, state | flag);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+ break;
+ }
+ }
+}
static int wiimote_input_event(struct input_dev *dev, unsigned int type,
unsigned int code, int value)
{
+ return 0;
+}
+
+static int wiimote_input_open(struct input_dev *dev)
+{
struct wiimote_data *wdata = input_get_drvdata(dev);
- if (!atomic_read(&wdata->ready))
- return -EBUSY;
- /* smp_rmb: Make sure wdata->xy is available when wdata->ready is 1 */
- smp_rmb();
+ return hid_hw_open(wdata->hdev);
+}
- return 0;
+static void wiimote_input_close(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+
+ hid_hw_close(wdata->hdev);
}
static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
@@ -281,6 +315,26 @@ static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
input_sync(wdata->input);
}
+static void handler_status(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+
+ /* on status reports the drm is reset so we need to resend the drm */
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+}
+
+static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
+{
+ __u8 err = payload[3];
+ __u8 cmd = payload[2];
+
+ handler_keys(wdata, payload);
+
+ if (err)
+ hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err,
+ cmd);
+}
+
struct wiiproto_handler {
__u8 id;
size_t size;
@@ -288,6 +342,8 @@ struct wiiproto_handler {
};
static struct wiiproto_handler handlers[] = {
+ { .id = WIIPROTO_REQ_STATUS, .size = 6, .func = handler_status },
+ { .id = WIIPROTO_REQ_RETURN, .size = 4, .func = handler_return },
{ .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys },
{ .id = 0 }
};
@@ -300,11 +356,6 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
int i;
unsigned long flags;
- if (!atomic_read(&wdata->ready))
- return -EBUSY;
- /* smp_rmb: Make sure wdata->xy is available when wdata->ready is 1 */
- smp_rmb();
-
if (size < 1)
return -EINVAL;
@@ -321,6 +372,58 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
return 0;
}
+static void wiimote_leds_destroy(struct wiimote_data *wdata)
+{
+ int i;
+ struct led_classdev *led;
+
+ for (i = 0; i < 4; ++i) {
+ if (wdata->leds[i]) {
+ led = wdata->leds[i];
+ wdata->leds[i] = NULL;
+ led_classdev_unregister(led);
+ kfree(led);
+ }
+ }
+}
+
+static int wiimote_leds_create(struct wiimote_data *wdata)
+{
+ int i, ret;
+ struct device *dev = &wdata->hdev->dev;
+ size_t namesz = strlen(dev_name(dev)) + 9;
+ struct led_classdev *led;
+ char *name;
+
+ for (i = 0; i < 4; ++i) {
+ led = kzalloc(sizeof(struct led_classdev) + namesz, GFP_KERNEL);
+ if (!led) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ name = (void*)&led[1];
+ snprintf(name, namesz, "%s:blue:p%d", dev_name(dev), i);
+ led->name = name;
+ led->brightness = 0;
+ led->max_brightness = 1;
+ led->brightness_get = wiimote_leds_get;
+ led->brightness_set = wiimote_leds_set;
+
+ ret = led_classdev_register(dev, led);
+ if (ret) {
+ kfree(led);
+ goto err;
+ }
+ wdata->leds[i] = led;
+ }
+
+ return 0;
+
+err:
+ wiimote_leds_destroy(wdata);
+ return ret;
+}
+
static struct wiimote_data *wiimote_create(struct hid_device *hdev)
{
struct wiimote_data *wdata;
@@ -341,6 +444,8 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
input_set_drvdata(wdata->input, wdata);
wdata->input->event = wiimote_input_event;
+ wdata->input->open = wiimote_input_open;
+ wdata->input->close = wiimote_input_close;
wdata->input->dev.parent = &wdata->hdev->dev;
wdata->input->id.bustype = wdata->hdev->bus;
wdata->input->id.vendor = wdata->hdev->vendor;
@@ -362,6 +467,12 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
static void wiimote_destroy(struct wiimote_data *wdata)
{
+ wiimote_leds_destroy(wdata);
+
+ input_unregister_device(wdata->input);
+ cancel_work_sync(&wdata->worker);
+ hid_hw_stop(wdata->hdev);
+
kfree(wdata);
}
@@ -377,19 +488,6 @@ static int wiimote_hid_probe(struct hid_device *hdev,
return -ENOMEM;
}
- ret = device_create_file(&hdev->dev, &dev_attr_led1);
- if (ret)
- goto err;
- ret = device_create_file(&hdev->dev, &dev_attr_led2);
- if (ret)
- goto err;
- ret = device_create_file(&hdev->dev, &dev_attr_led3);
- if (ret)
- goto err;
- ret = device_create_file(&hdev->dev, &dev_attr_led4);
- if (ret)
- goto err;
-
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "HID parse failed\n");
@@ -408,9 +506,10 @@ static int wiimote_hid_probe(struct hid_device *hdev,
goto err_stop;
}
- /* smp_wmb: Write wdata->xy first before wdata->ready is set to 1 */
- smp_wmb();
- atomic_set(&wdata->ready, 1);
+ ret = wiimote_leds_create(wdata);
+ if (ret)
+ goto err_free;
+
hid_info(hdev, "New device registered\n");
/* by default set led1 after device initialization */
@@ -420,15 +519,15 @@ static int wiimote_hid_probe(struct hid_device *hdev,
return 0;
+err_free:
+ wiimote_destroy(wdata);
+ return ret;
+
err_stop:
hid_hw_stop(hdev);
err:
input_free_device(wdata->input);
- device_remove_file(&hdev->dev, &dev_attr_led1);
- device_remove_file(&hdev->dev, &dev_attr_led2);
- device_remove_file(&hdev->dev, &dev_attr_led3);
- device_remove_file(&hdev->dev, &dev_attr_led4);
- wiimote_destroy(wdata);
+ kfree(wdata);
return ret;
}
@@ -437,16 +536,6 @@ static void wiimote_hid_remove(struct hid_device *hdev)
struct wiimote_data *wdata = hid_get_drvdata(hdev);
hid_info(hdev, "Device removed\n");
-
- device_remove_file(&hdev->dev, &dev_attr_led1);
- device_remove_file(&hdev->dev, &dev_attr_led2);
- device_remove_file(&hdev->dev, &dev_attr_led3);
- device_remove_file(&hdev->dev, &dev_attr_led4);
-
- hid_hw_stop(hdev);
- input_unregister_device(wdata->input);
-
- cancel_work_sync(&wdata->worker);
wiimote_destroy(wdata);
}
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 621959d..3146fdc 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -47,6 +47,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
{ USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
{ USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
@@ -89,6 +90,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
{ 0, 0 }
};
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 0598cd2..0b62c3c 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -623,7 +623,7 @@ config SENSORS_LM90
LM86, LM89 and LM99, Analog Devices ADM1032, ADT7461, and ADT7461A,
Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659,
MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, ON Semiconductor NCT1008,
- and Winbond/Nuvoton W83L771W/G/AWG/ASG sensor chips.
+ Winbond/Nuvoton W83L771W/G/AWG/ASG and Philips SA56004 sensor chips.
This driver can also be built as a module. If so, the module
will be called lm90.
@@ -694,14 +694,24 @@ config SENSORS_LTC4261
be called ltc4261.
config SENSORS_LM95241
- tristate "National Semiconductor LM95241 sensor chip"
+ tristate "National Semiconductor LM95241 and compatibles"
depends on I2C
help
- If you say yes here you get support for LM95241 sensor chip.
+ If you say yes here you get support for LM95231 and LM95241 sensor
+ chips.
This driver can also be built as a module. If so, the module
will be called lm95241.
+config SENSORS_LM95245
+ tristate "National Semiconductor LM95245 sensor chip"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for LM95245 sensor chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called lm95245.
+
config SENSORS_MAX1111
tristate "Maxim MAX1111 Multichannel, Serial 8-bit ADC chip"
depends on SPI_MASTER
@@ -736,6 +746,16 @@ config SENSORS_MAX1619
This driver can also be built as a module. If so, the module
will be called max1619.
+config SENSORS_MAX1668
+ tristate "Maxim MAX1668 and compatibles"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for MAX1668, MAX1989 and
+ MAX1805 chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called max1668.
+
config SENSORS_MAX6639
tristate "Maxim MAX6639 sensor chip"
depends on I2C && EXPERIMENTAL
@@ -767,6 +787,20 @@ config SENSORS_MAX6650
This driver can also be built as a module. If so, the module
will be called max6650.
+config SENSORS_NTC_THERMISTOR
+ tristate "NTC thermistor support"
+ depends on EXPERIMENTAL
+ help
+ This driver supports NTC thermistors sensor reading and its
+ interpretation. The driver can also monitor the temperature and
+ send notifications about the temperature.
+
+ Currently, this driver supports
+ NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, and NCP15WL333.
+
+ This driver can also be built as a module. If so, the module
+ will be called ntc-thermistor.
+
config SENSORS_PC87360
tristate "National Semiconductor PC87360 family"
select HWMON_VID
@@ -807,92 +841,7 @@ config SENSORS_PCF8591
These devices are hard to detect and rarely found on mainstream
hardware. If unsure, say N.
-config PMBUS
- tristate "PMBus support"
- depends on I2C && EXPERIMENTAL
- default n
- help
- Say yes here if you want to enable PMBus support.
-
- This driver can also be built as a module. If so, the module will
- be called pmbus_core.
-
-if PMBUS
-
-config SENSORS_PMBUS
- tristate "Generic PMBus devices"
- default n
- help
- If you say yes here you get hardware monitoring support for generic
- PMBus devices, including but not limited to BMR450, BMR451, BMR453,
- BMR454, and LTC2978.
-
- This driver can also be built as a module. If so, the module will
- be called pmbus.
-
-config SENSORS_ADM1275
- tristate "Analog Devices ADM1275"
- default n
- help
- If you say yes here you get hardware monitoring support for Analog
- Devices ADM1275 Hot-Swap Controller and Digital Power Monitor.
-
- This driver can also be built as a module. If so, the module will
- be called adm1275.
-
-config SENSORS_MAX16064
- tristate "Maxim MAX16064"
- default n
- help
- If you say yes here you get hardware monitoring support for Maxim
- MAX16064.
-
- This driver can also be built as a module. If so, the module will
- be called max16064.
-
-config SENSORS_MAX34440
- tristate "Maxim MAX34440/MAX34441"
- default n
- help
- If you say yes here you get hardware monitoring support for Maxim
- MAX34440 and MAX34441.
-
- This driver can also be built as a module. If so, the module will
- be called max34440.
-
-config SENSORS_MAX8688
- tristate "Maxim MAX8688"
- default n
- help
- If you say yes here you get hardware monitoring support for Maxim
- MAX8688.
-
- This driver can also be built as a module. If so, the module will
- be called max8688.
-
-config SENSORS_UCD9000
- tristate "TI UCD90120, UCD90124, UCD9090, UCD90910"
- default n
- help
- If you say yes here you get hardware monitoring support for TI
- UCD90120, UCD90124, UCD9090, UCD90910 Sequencer and System Health
- Controllers.
-
- This driver can also be built as a module. If so, the module will
- be called ucd9000.
-
-config SENSORS_UCD9200
- tristate "TI UCD9220, UCD9222, UCD9224, UCD9240, UCD9244, UCD9246, UCD9248"
- default n
- help
- If you say yes here you get hardware monitoring support for TI
- UCD9220, UCD9222, UCD9224, UCD9240, UCD9244, UCD9246, and UCD9248
- Digital PWM System Controllers.
-
- This driver can also be built as a module. If so, the module will
- be called ucd9200.
-
-endif # PMBUS
+source drivers/hwmon/pmbus/Kconfig
config SENSORS_SHT15
tristate "Sensiron humidity and temperature sensors. SHT15 and compat."
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index d7995a1..3c9ccef 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -80,6 +80,7 @@ obj-$(CONFIG_SENSORS_LM90) += lm90.o
obj-$(CONFIG_SENSORS_LM92) += lm92.o
obj-$(CONFIG_SENSORS_LM93) += lm93.o
obj-$(CONFIG_SENSORS_LM95241) += lm95241.o
+obj-$(CONFIG_SENSORS_LM95245) += lm95245.o
obj-$(CONFIG_SENSORS_LTC4151) += ltc4151.o
obj-$(CONFIG_SENSORS_LTC4215) += ltc4215.o
obj-$(CONFIG_SENSORS_LTC4245) += ltc4245.o
@@ -87,10 +88,12 @@ obj-$(CONFIG_SENSORS_LTC4261) += ltc4261.o
obj-$(CONFIG_SENSORS_MAX1111) += max1111.o
obj-$(CONFIG_SENSORS_MAX16065) += max16065.o
obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
+obj-$(CONFIG_SENSORS_MAX1668) += max1668.o
obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
+obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
obj-$(CONFIG_SENSORS_PC87427) += pc87427.o
obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
@@ -121,15 +124,7 @@ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o
obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o
obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o
-# PMBus drivers
-obj-$(CONFIG_PMBUS) += pmbus_core.o
-obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
-obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
-obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
-obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
-obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
-obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
-obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
+obj-$(CONFIG_PMBUS) += pmbus/
ccflags-$(CONFIG_HWMON_DEBUG_CHIP) := -DDEBUG
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 0070d54..59d83e8 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -44,7 +44,9 @@
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */
#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
-#define MAX_ATTRS 5 /* Maximum no of per-core attrs */
+#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
+#define MAX_THRESH_ATTRS 3 /* Maximum no of Threshold attrs */
+#define TOTAL_ATTRS (MAX_CORE_ATTRS + MAX_THRESH_ATTRS)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
#ifdef CONFIG_SMP
@@ -67,6 +69,9 @@
* This value is passed as "id" field to rdmsr/wrmsr functions.
* @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
* from where the temperature values should be read.
+ * @intrpt_reg: One of IA32_THERM_INTERRUPT or IA32_PACKAGE_THERM_INTERRUPT,
+ * from where the thresholds are read.
+ * @attr_size: Total number of pre-core attrs displayed in the sysfs.
* @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.
* Otherwise, temp_data holds coretemp data.
* @valid: If this is 1, the current temperature is valid.
@@ -74,15 +79,18 @@
struct temp_data {
int temp;
int ttarget;
+ int tmin;
int tjmax;
unsigned long last_updated;
unsigned int cpu;
u32 cpu_core_id;
u32 status_reg;
+ u32 intrpt_reg;
+ int attr_size;
bool is_pkg_data;
bool valid;
- struct sensor_device_attribute sd_attrs[MAX_ATTRS];
- char attr_name[MAX_ATTRS][CORETEMP_NAME_LENGTH];
+ struct sensor_device_attribute sd_attrs[TOTAL_ATTRS];
+ char attr_name[TOTAL_ATTRS][CORETEMP_NAME_LENGTH];
struct mutex update_lock;
};
@@ -135,6 +143,19 @@ static ssize_t show_crit_alarm(struct device *dev,
return sprintf(buf, "%d\n", (eax >> 5) & 1);
}
+static ssize_t show_max_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ u32 eax, edx;
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct platform_data *pdata = dev_get_drvdata(dev);
+ struct temp_data *tdata = pdata->core_data[attr->index];
+
+ rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
+
+ return sprintf(buf, "%d\n", !!(eax & THERM_STATUS_THRESHOLD1));
+}
+
static ssize_t show_tjmax(struct device *dev,
struct device_attribute *devattr, char *buf)
{
@@ -153,6 +174,83 @@ static ssize_t show_ttarget(struct device *dev,
return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget);
}
+static ssize_t store_ttarget(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct platform_data *pdata = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct temp_data *tdata = pdata->core_data[attr->index];
+ u32 eax, edx;
+ unsigned long val;
+ int diff;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ /*
+ * THERM_MASK_THRESHOLD1 is 7 bits wide. Values are entered in terms
+ * of milli degree celsius. Hence don't accept val > (127 * 1000)
+ */
+ if (val > tdata->tjmax || val > 127000)
+ return -EINVAL;
+
+ diff = (tdata->tjmax - val) / 1000;
+
+ mutex_lock(&tdata->update_lock);
+ rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
+ eax = (eax & ~THERM_MASK_THRESHOLD1) |
+ (diff << THERM_SHIFT_THRESHOLD1);
+ wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
+ tdata->ttarget = val;
+ mutex_unlock(&tdata->update_lock);
+
+ return count;
+}
+
+static ssize_t show_tmin(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct platform_data *pdata = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tmin);
+}
+
+static ssize_t store_tmin(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct platform_data *pdata = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct temp_data *tdata = pdata->core_data[attr->index];
+ u32 eax, edx;
+ unsigned long val;
+ int diff;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ /*
+ * THERM_MASK_THRESHOLD0 is 7 bits wide. Values are entered in terms
+ * of milli degree celsius. Hence don't accept val > (127 * 1000)
+ */
+ if (val > tdata->tjmax || val > 127000)
+ return -EINVAL;
+
+ diff = (tdata->tjmax - val) / 1000;
+
+ mutex_lock(&tdata->update_lock);
+ rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
+ eax = (eax & ~THERM_MASK_THRESHOLD0) |
+ (diff << THERM_SHIFT_THRESHOLD0);
+ wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
+ tdata->tmin = val;
+ mutex_unlock(&tdata->update_lock);
+
+ return count;
+}
+
static ssize_t show_temp(struct device *dev,
struct device_attribute *devattr, char *buf)
{
@@ -344,23 +442,31 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
int attr_no)
{
int err, i;
- static ssize_t (*rd_ptr[MAX_ATTRS]) (struct device *dev,
+ static ssize_t (*rd_ptr[TOTAL_ATTRS]) (struct device *dev,
struct device_attribute *devattr, char *buf) = {
- show_label, show_crit_alarm, show_ttarget,
- show_temp, show_tjmax };
- static const char *names[MAX_ATTRS] = {
+ show_label, show_crit_alarm, show_temp, show_tjmax,
+ show_max_alarm, show_ttarget, show_tmin };
+ static ssize_t (*rw_ptr[TOTAL_ATTRS]) (struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count) = { NULL, NULL, NULL, NULL, NULL,
+ store_ttarget, store_tmin };
+ static const char *names[TOTAL_ATTRS] = {
"temp%d_label", "temp%d_crit_alarm",
- "temp%d_max", "temp%d_input",
- "temp%d_crit" };
+ "temp%d_input", "temp%d_crit",
+ "temp%d_max_alarm", "temp%d_max",
+ "temp%d_max_hyst" };
- for (i = 0; i < MAX_ATTRS; i++) {
+ for (i = 0; i < tdata->attr_size; i++) {
snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
attr_no);
sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
+ if (rw_ptr[i]) {
+ tdata->sd_attrs[i].dev_attr.attr.mode |= S_IWUSR;
+ tdata->sd_attrs[i].dev_attr.store = rw_ptr[i];
+ }
tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
- tdata->sd_attrs[i].dev_attr.store = NULL;
tdata->sd_attrs[i].index = attr_no;
err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
if (err)
@@ -374,38 +480,6 @@ exit_free:
return err;
}
-static void update_ttarget(__u8 cpu_model, struct temp_data *tdata,
- struct device *dev)
-{
- int err;
- u32 eax, edx;
-
- /*
- * Initialize ttarget value. Eventually this will be
- * initialized with the value from MSR_IA32_THERM_INTERRUPT
- * register. If IA32_TEMPERATURE_TARGET is supported, this
- * value will be over written below.
- * To Do: Patch to initialize ttarget from MSR_IA32_THERM_INTERRUPT
- */
- tdata->ttarget = tdata->tjmax - 20000;
-
- /*
- * Read the still undocumented IA32_TEMPERATURE_TARGET. It exists
- * on older CPUs but not in this register,
- * Atoms don't have it either.
- */
- if (cpu_model > 0xe && cpu_model != 0x1c) {
- err = rdmsr_safe_on_cpu(tdata->cpu,
- MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
- if (err) {
- dev_warn(dev,
- "Unable to read IA32_TEMPERATURE_TARGET MSR\n");
- } else {
- tdata->ttarget = tdata->tjmax -
- ((eax >> 8) & 0xff) * 1000;
- }
- }
-}
static int __devinit chk_ucode_version(struct platform_device *pdev)
{
@@ -464,9 +538,12 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
MSR_IA32_THERM_STATUS;
+ tdata->intrpt_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_INTERRUPT :
+ MSR_IA32_THERM_INTERRUPT;
tdata->is_pkg_data = pkg_flag;
tdata->cpu = cpu;
tdata->cpu_core_id = TO_CORE_ID(cpu);
+ tdata->attr_size = MAX_CORE_ATTRS;
mutex_init(&tdata->update_lock);
return tdata;
}
@@ -516,7 +593,17 @@ static int create_core_data(struct platform_data *pdata,
else
tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
- update_ttarget(c->x86_model, tdata, &pdev->dev);
+ /*
+ * Test if we can access the intrpt register. If so, increase the
+ * 'size' enough to have ttarget/tmin/max_alarm interfaces.
+ * Initialize ttarget with bits 16:22 of MSR_IA32_THERM_INTERRUPT
+ */
+ err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx);
+ if (!err) {
+ tdata->attr_size += MAX_THRESH_ATTRS;
+ tdata->ttarget = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000;
+ }
+
pdata->core_data[attr_no] = tdata;
/* Create sysfs interfaces */
@@ -553,7 +640,7 @@ static void coretemp_remove_core(struct platform_data *pdata,
struct temp_data *tdata = pdata->core_data[indx];
/* Remove the sysfs attributes */
- for (i = 0; i < MAX_ATTRS; i++)
+ for (i = 0; i < tdata->attr_size; i++)
device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
kfree(pdata->core_data[indx]);
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index c4c40be..d22f241 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -114,7 +114,6 @@ struct i5k_amb_data {
void __iomem *amb_mmio;
struct i5k_device_attribute *attrs;
unsigned int num_attrs;
- unsigned long chipset_id;
};
static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
@@ -444,8 +443,6 @@ static int __devinit i5k_find_amb_registers(struct i5k_amb_data *data,
goto out;
}
- data->chipset_id = devid;
-
res = 0;
out:
pci_dev_put(pcidev);
@@ -478,23 +475,13 @@ out:
return res;
}
-static unsigned long i5k_channel_pci_id(struct i5k_amb_data *data,
- unsigned long channel)
-{
- switch (data->chipset_id) {
- case PCI_DEVICE_ID_INTEL_5000_ERR:
- return PCI_DEVICE_ID_INTEL_5000_FBD0 + channel;
- case PCI_DEVICE_ID_INTEL_5400_ERR:
- return PCI_DEVICE_ID_INTEL_5400_FBD0 + channel;
- default:
- BUG();
- }
-}
-
-static unsigned long chipset_ids[] = {
- PCI_DEVICE_ID_INTEL_5000_ERR,
- PCI_DEVICE_ID_INTEL_5400_ERR,
- 0
+static struct {
+ unsigned long err;
+ unsigned long fbd0;
+} chipset_ids[] __devinitdata = {
+ { PCI_DEVICE_ID_INTEL_5000_ERR, PCI_DEVICE_ID_INTEL_5000_FBD0 },
+ { PCI_DEVICE_ID_INTEL_5400_ERR, PCI_DEVICE_ID_INTEL_5400_FBD0 },
+ { 0, 0 }
};
#ifdef MODULE
@@ -510,8 +497,7 @@ static int __devinit i5k_amb_probe(struct platform_device *pdev)
{
struct i5k_amb_data *data;
struct resource *reso;
- int i;
- int res = -ENODEV;
+ int i, res;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
@@ -520,22 +506,22 @@ static int __devinit i5k_amb_probe(struct platform_device *pdev)
/* Figure out where the AMB registers live */
i = 0;
do {
- res = i5k_find_amb_registers(data, chipset_ids[i]);
+ res = i5k_find_amb_registers(data, chipset_ids[i].err);
+ if (res == 0)
+ break;
i++;
- } while (res && chipset_ids[i]);
+ } while (chipset_ids[i].err);
if (res)
goto err;
/* Copy the DIMM presence map for the first two channels */
- res = i5k_channel_probe(&data->amb_present[0],
- i5k_channel_pci_id(data, 0));
+ res = i5k_channel_probe(&data->amb_present[0], chipset_ids[i].fbd0);
if (res)
goto err;
/* Copy the DIMM presence map for the optional second two channels */
- i5k_channel_probe(&data->amb_present[2],
- i5k_channel_pci_id(data, 1));
+ i5k_channel_probe(&data->amb_present[2], chipset_ids[i].fbd0 + 1);
/* Set up resource regions */
reso = request_mem_region(data->amb_base, data->amb_len, DRVNAME);
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 1a409c5..c316294 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -432,13 +432,15 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
aem_send_message(ipmi);
res = wait_for_completion_timeout(&ipmi->read_complete, IPMI_TIMEOUT);
- if (!res)
- return -ETIMEDOUT;
+ if (!res) {
+ res = -ETIMEDOUT;
+ goto out;
+ }
if (ipmi->rx_result || ipmi->rx_msg_len != rs_size ||
memcmp(&rs_resp->id, &system_x_id, sizeof(system_x_id))) {
- kfree(rs_resp);
- return -ENOENT;
+ res = -ENOENT;
+ goto out;
}
switch (size) {
@@ -463,8 +465,11 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
break;
}
}
+ res = 0;
- return 0;
+out:
+ kfree(rs_resp);
+ return res;
}
/* Update AEM energy registers */
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 2f94f95..90ddb87 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -54,6 +54,9 @@
* and extended mode. They are mostly compatible with LM90 except for a data
* format difference for the temperature value registers.
*
+ * This driver also supports the SA56004 from Philips. This device is
+ * pin-compatible with the LM86, the ED/EDP parts are also address-compatible.
+ *
* Since the LM90 was the first chipset supported by this driver, most
* comments will refer to this chipset, but are actually general and
* concern all supported chipsets, unless mentioned otherwise.
@@ -96,13 +99,15 @@
* MAX6659 can have address 0x4c, 0x4d or 0x4e.
* MAX6680 and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b,
* 0x4c, 0x4d or 0x4e.
+ * SA56004 can have address 0x48 through 0x4F.
*/
static const unsigned short normal_i2c[] = {
- 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
+ 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x48, 0x49, 0x4a, 0x4b, 0x4c,
+ 0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
- max6646, w83l771, max6696 };
+ max6646, w83l771, max6696, sa56004 };
/*
* The LM90 registers
@@ -152,6 +157,10 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define MAX6659_REG_R_LOCAL_EMERG 0x17
#define MAX6659_REG_W_LOCAL_EMERG 0x17
+/* SA56004 registers */
+
+#define SA56004_REG_R_LOCAL_TEMPL 0x22
+
#define LM90_DEF_CONVRATE_RVAL 6 /* Def conversion rate register value */
#define LM90_MAX_CONVRATE_MS 16000 /* Maximum conversion rate in ms */
@@ -161,7 +170,6 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define LM90_FLAG_ADT7461_EXT (1 << 0) /* ADT7461 extended mode */
/* Device features */
#define LM90_HAVE_OFFSET (1 << 1) /* temperature offset register */
-#define LM90_HAVE_LOCAL_EXT (1 << 2) /* extended local temperature */
#define LM90_HAVE_REM_LIMIT_EXT (1 << 3) /* extended remote limit */
#define LM90_HAVE_EMERGENCY (1 << 4) /* 3rd upper (emergency) limit */
#define LM90_HAVE_EMERGENCY_ALARM (1 << 5)/* emergency alarm */
@@ -192,6 +200,7 @@ static const struct i2c_device_id lm90_id[] = {
{ "max6696", max6696 },
{ "nct1008", adt7461 },
{ "w83l771", w83l771 },
+ { "sa56004", sa56004 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm90_id);
@@ -204,6 +213,7 @@ struct lm90_params {
u16 alert_alarms; /* Which alarm bits trigger ALERT# */
/* Upper 8 bits for max6695/96 */
u8 max_convrate; /* Maximum conversion rate register value */
+ u8 reg_local_ext; /* Extended local temp register (optional) */
};
static const struct lm90_params lm90_params[] = {
@@ -235,19 +245,20 @@ static const struct lm90_params lm90_params[] = {
.max_convrate = 9,
},
[max6646] = {
- .flags = LM90_HAVE_LOCAL_EXT,
.alert_alarms = 0x7c,
.max_convrate = 6,
+ .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6657] = {
- .flags = LM90_HAVE_LOCAL_EXT,
.alert_alarms = 0x7c,
.max_convrate = 8,
+ .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6659] = {
- .flags = LM90_HAVE_LOCAL_EXT | LM90_HAVE_EMERGENCY,
+ .flags = LM90_HAVE_EMERGENCY,
.alert_alarms = 0x7c,
.max_convrate = 8,
+ .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6680] = {
.flags = LM90_HAVE_OFFSET,
@@ -255,16 +266,23 @@ static const struct lm90_params lm90_params[] = {
.max_convrate = 7,
},
[max6696] = {
- .flags = LM90_HAVE_LOCAL_EXT | LM90_HAVE_EMERGENCY
+ .flags = LM90_HAVE_EMERGENCY
| LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3,
.alert_alarms = 0x187c,
.max_convrate = 6,
+ .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[w83l771] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
.alert_alarms = 0x7c,
.max_convrate = 8,
},
+ [sa56004] = {
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+ .alert_alarms = 0x7b,
+ .max_convrate = 9,
+ .reg_local_ext = SA56004_REG_R_LOCAL_TEMPL,
+ },
};
/*
@@ -286,6 +304,7 @@ struct lm90_data {
u16 alert_alarms; /* Which alarm bits trigger ALERT# */
/* Upper 8 bits for max6695/96 */
u8 max_convrate; /* Maximum conversion rate */
+ u8 reg_local_ext; /* local extension register offset */
/* registers values */
s8 temp8[8]; /* 0: local low limit
@@ -452,9 +471,9 @@ static struct lm90_data *lm90_update_device(struct device *dev)
lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT, &data->temp8[3]);
lm90_read_reg(client, LM90_REG_R_TCRIT_HYST, &data->temp_hyst);
- if (data->flags & LM90_HAVE_LOCAL_EXT) {
+ if (data->reg_local_ext) {
lm90_read16(client, LM90_REG_R_LOCAL_TEMP,
- MAX6657_REG_R_LOCAL_TEMPL,
+ data->reg_local_ext,
&data->temp11[4]);
} else {
if (lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP,
@@ -1092,7 +1111,7 @@ static int lm90_detect(struct i2c_client *new_client,
struct i2c_adapter *adapter = new_client->adapter;
int address = new_client->addr;
const char *name = NULL;
- int man_id, chip_id, reg_config1, reg_convrate;
+ int man_id, chip_id, reg_config1, reg_config2, reg_convrate;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
@@ -1108,15 +1127,16 @@ static int lm90_detect(struct i2c_client *new_client,
LM90_REG_R_CONVRATE)) < 0)
return -ENODEV;
- if ((address == 0x4C || address == 0x4D)
- && man_id == 0x01) { /* National Semiconductor */
- int reg_config2;
-
+ if (man_id == 0x01 || man_id == 0x5C || man_id == 0x41) {
reg_config2 = i2c_smbus_read_byte_data(new_client,
LM90_REG_R_CONFIG2);
if (reg_config2 < 0)
return -ENODEV;
+ } else
+ reg_config2 = 0; /* Make compiler happy */
+ if ((address == 0x4C || address == 0x4D)
+ && man_id == 0x01) { /* National Semiconductor */
if ((reg_config1 & 0x2A) == 0x00
&& (reg_config2 & 0xF8) == 0x00
&& reg_convrate <= 0x09) {
@@ -1245,13 +1265,6 @@ static int lm90_detect(struct i2c_client *new_client,
} else
if (address == 0x4C
&& man_id == 0x5C) { /* Winbond/Nuvoton */
- int reg_config2;
-
- reg_config2 = i2c_smbus_read_byte_data(new_client,
- LM90_REG_R_CONFIG2);
- if (reg_config2 < 0)
- return -ENODEV;
-
if ((reg_config1 & 0x2A) == 0x00
&& (reg_config2 & 0xF8) == 0x00) {
if (chip_id == 0x01 /* W83L771W/G */
@@ -1263,6 +1276,15 @@ static int lm90_detect(struct i2c_client *new_client,
name = "w83l771";
}
}
+ } else
+ if (address >= 0x48 && address <= 0x4F
+ && man_id == 0xA1) { /* NXP Semiconductor/Philips */
+ if (chip_id == 0x00
+ && (reg_config1 & 0x2A) == 0x00
+ && (reg_config2 & 0xFE) == 0x00
+ && reg_convrate <= 0x09) {
+ name = "sa56004";
+ }
}
if (!name) { /* identification failed */
@@ -1368,6 +1390,7 @@ static int lm90_probe(struct i2c_client *new_client,
/* Set chip capabilities */
data->flags = lm90_params[data->kind].flags;
+ data->reg_local_ext = lm90_params[data->kind].reg_local_ext;
/* Set maximum conversion rate */
data->max_convrate = lm90_params[data->kind].max_convrate;
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index d3b464b..513901d 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -74,8 +74,9 @@ static const unsigned short normal_i2c[] = {
#define TT_OFF 0
#define TT_ON 1
#define TT_MASK 7
-#define MANUFACTURER_ID 0x01
-#define DEFAULT_REVISION 0xA4
+#define NATSEMI_MAN_ID 0x01
+#define LM95231_CHIP_ID 0xA1
+#define LM95241_CHIP_ID 0xA4
static const u8 lm95241_reg_address[] = {
LM95241_REG_R_LOCAL_TEMPH,
@@ -338,20 +339,25 @@ static int lm95241_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
- int address = new_client->addr;
const char *name;
+ int mfg_id, chip_id;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
- if ((i2c_smbus_read_byte_data(new_client, LM95241_REG_R_MAN_ID)
- == MANUFACTURER_ID)
- && (i2c_smbus_read_byte_data(new_client, LM95241_REG_R_CHIP_ID)
- == DEFAULT_REVISION)) {
- name = DEVNAME;
- } else {
- dev_dbg(&adapter->dev, "LM95241 detection failed at 0x%02x\n",
- address);
+ mfg_id = i2c_smbus_read_byte_data(new_client, LM95241_REG_R_MAN_ID);
+ if (mfg_id != NATSEMI_MAN_ID)
+ return -ENODEV;
+
+ chip_id = i2c_smbus_read_byte_data(new_client, LM95241_REG_R_CHIP_ID);
+ switch (chip_id) {
+ case LM95231_CHIP_ID:
+ name = "lm95231";
+ break;
+ case LM95241_CHIP_ID:
+ name = "lm95241";
+ break;
+ default:
return -ENODEV;
}
@@ -431,7 +437,8 @@ static int lm95241_remove(struct i2c_client *client)
/* Driver data (common to all clients) */
static const struct i2c_device_id lm95241_id[] = {
- { DEVNAME, 0 },
+ { "lm95231", 0 },
+ { "lm95241", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm95241_id);
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
new file mode 100644
index 0000000..dce9e68
--- /dev/null
+++ b/drivers/hwmon/lm95245.c
@@ -0,0 +1,543 @@
+/*
+ * Copyright (C) 2011 Alexander Stein <alexander.stein@systec-electronic.com>
+ *
+ * The LM95245 is a sensor chip made by National Semiconductors.
+ * It reports up to two temperatures (its own plus an external one).
+ * Complete datasheet can be obtained from National's website at:
+ * http://www.national.com/ds.cgi/LM/LM95245.pdf
+ *
+ * This driver is based on lm95241.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+
+#define DEVNAME "lm95245"
+
+static const unsigned short normal_i2c[] = {
+ 0x18, 0x19, 0x29, 0x4c, 0x4d, I2C_CLIENT_END };
+
+/* LM95245 registers */
+/* general registers */
+#define LM95245_REG_RW_CONFIG1 0x03
+#define LM95245_REG_RW_CONVERS_RATE 0x04
+#define LM95245_REG_W_ONE_SHOT 0x0F
+
+/* diode configuration */
+#define LM95245_REG_RW_CONFIG2 0xBF
+#define LM95245_REG_RW_REMOTE_OFFH 0x11
+#define LM95245_REG_RW_REMOTE_OFFL 0x12
+
+/* status registers */
+#define LM95245_REG_R_STATUS1 0x02
+#define LM95245_REG_R_STATUS2 0x33
+
+/* limit registers */
+#define LM95245_REG_RW_REMOTE_OS_LIMIT 0x07
+#define LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT 0x20
+#define LM95245_REG_RW_REMOTE_TCRIT_LIMIT 0x19
+#define LM95245_REG_RW_COMMON_HYSTERESIS 0x21
+
+/* temperature signed */
+#define LM95245_REG_R_LOCAL_TEMPH_S 0x00
+#define LM95245_REG_R_LOCAL_TEMPL_S 0x30
+#define LM95245_REG_R_REMOTE_TEMPH_S 0x01
+#define LM95245_REG_R_REMOTE_TEMPL_S 0x10
+/* temperature unsigned */
+#define LM95245_REG_R_REMOTE_TEMPH_U 0x31
+#define LM95245_REG_R_REMOTE_TEMPL_U 0x32
+
+/* id registers */
+#define LM95245_REG_R_MAN_ID 0xFE
+#define LM95245_REG_R_CHIP_ID 0xFF
+
+/* LM95245 specific bitfields */
+#define CFG_STOP 0x40
+#define CFG_REMOTE_TCRIT_MASK 0x10
+#define CFG_REMOTE_OS_MASK 0x08
+#define CFG_LOCAL_TCRIT_MASK 0x04
+#define CFG_LOCAL_OS_MASK 0x02
+
+#define CFG2_OS_A0 0x40
+#define CFG2_DIODE_FAULT_OS 0x20
+#define CFG2_DIODE_FAULT_TCRIT 0x10
+#define CFG2_REMOTE_TT 0x08
+#define CFG2_REMOTE_FILTER_DIS 0x00
+#define CFG2_REMOTE_FILTER_EN 0x06
+
+/* conversation rate in ms */
+#define RATE_CR0063 0x00
+#define RATE_CR0364 0x01
+#define RATE_CR1000 0x02
+#define RATE_CR2500 0x03
+
+#define STATUS1_DIODE_FAULT 0x04
+#define STATUS1_RTCRIT 0x02
+#define STATUS1_LOC 0x01
+
+#define MANUFACTURER_ID 0x01
+#define DEFAULT_REVISION 0xB3
+
+static const u8 lm95245_reg_address[] = {
+ LM95245_REG_R_LOCAL_TEMPH_S,
+ LM95245_REG_R_LOCAL_TEMPL_S,
+ LM95245_REG_R_REMOTE_TEMPH_S,
+ LM95245_REG_R_REMOTE_TEMPL_S,
+ LM95245_REG_R_REMOTE_TEMPH_U,
+ LM95245_REG_R_REMOTE_TEMPL_U,
+ LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT,
+ LM95245_REG_RW_REMOTE_TCRIT_LIMIT,
+ LM95245_REG_RW_COMMON_HYSTERESIS,
+ LM95245_REG_R_STATUS1,
+};
+
+/* Client data (each client gets its own) */
+struct lm95245_data {
+ struct device *hwmon_dev;
+ struct mutex update_lock;
+ unsigned long last_updated; /* in jiffies */
+ unsigned long interval; /* in msecs */
+ bool valid; /* zero until following fields are valid */
+ /* registers values */
+ u8 regs[ARRAY_SIZE(lm95245_reg_address)];
+ u8 config1, config2;
+};
+
+/* Conversions */
+static int temp_from_reg_unsigned(u8 val_h, u8 val_l)
+{
+ return val_h * 1000 + val_l * 1000 / 256;
+}
+
+static int temp_from_reg_signed(u8 val_h, u8 val_l)
+{
+ if (val_h & 0x80)
+ return (val_h - 0x100) * 1000;
+ return temp_from_reg_unsigned(val_h, val_l);
+}
+
+static struct lm95245_data *lm95245_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm95245_data *data = i2c_get_clientdata(client);
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated
+ + msecs_to_jiffies(data->interval)) || !data->valid) {
+ int i;
+
+ dev_dbg(&client->dev, "Updating lm95245 data.\n");
+ for (i = 0; i < ARRAY_SIZE(lm95245_reg_address); i++)
+ data->regs[i]
+ = i2c_smbus_read_byte_data(client,
+ lm95245_reg_address[i]);
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
+
+static unsigned long lm95245_read_conversion_rate(struct i2c_client *client)
+{
+ int rate;
+ unsigned long interval;
+
+ rate = i2c_smbus_read_byte_data(client, LM95245_REG_RW_CONVERS_RATE);
+
+ switch (rate) {
+ case RATE_CR0063:
+ interval = 63;
+ break;
+ case RATE_CR0364:
+ interval = 364;
+ break;
+ case RATE_CR1000:
+ interval = 1000;
+ break;
+ case RATE_CR2500:
+ default:
+ interval = 2500;
+ break;
+ }
+
+ return interval;
+}
+
+static unsigned long lm95245_set_conversion_rate(struct i2c_client *client,
+ unsigned long interval)
+{
+ int rate;
+
+ if (interval <= 63) {
+ interval = 63;
+ rate = RATE_CR0063;
+ } else if (interval <= 364) {
+ interval = 364;
+ rate = RATE_CR0364;
+ } else if (interval <= 1000) {
+ interval = 1000;
+ rate = RATE_CR1000;
+ } else {
+ interval = 2500;
+ rate = RATE_CR2500;
+ }
+
+ i2c_smbus_write_byte_data(client, LM95245_REG_RW_CONVERS_RATE, rate);
+
+ return interval;
+}
+
+/* Sysfs stuff */
+static ssize_t show_input(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct lm95245_data *data = lm95245_update_device(dev);
+ int temp;
+ int index = to_sensor_dev_attr(attr)->index;
+
+ /*
+ * Index 0 (Local temp) is always signed
+ * Index 2 (Remote temp) has both signed and unsigned data
+ * use signed calculation for remote if signed bit is set
+ */
+ if (index == 0 || data->regs[index] & 0x80)
+ temp = temp_from_reg_signed(data->regs[index],
+ data->regs[index + 1]);
+ else
+ temp = temp_from_reg_unsigned(data->regs[index + 2],
+ data->regs[index + 3]);
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n", temp);
+}
+
+static ssize_t show_limit(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct lm95245_data *data = lm95245_update_device(dev);
+ int index = to_sensor_dev_attr(attr)->index;
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n",
+ data->regs[index] * 1000);
+}
+
+static ssize_t set_limit(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm95245_data *data = i2c_get_clientdata(client);
+ int index = to_sensor_dev_attr(attr)->index;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ val /= 1000;
+
+ val = SENSORS_LIMIT(val, 0, (index == 6 ? 127 : 255));
+
+ mutex_lock(&data->update_lock);
+
+ data->valid = 0;
+
+ i2c_smbus_write_byte_data(client, lm95245_reg_address[index], val);
+
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t set_crit_hyst(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm95245_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ val /= 1000;
+
+ val = SENSORS_LIMIT(val, 0, 31);
+
+ mutex_lock(&data->update_lock);
+
+ data->valid = 0;
+
+ /* shared crit hysteresis */
+ i2c_smbus_write_byte_data(client, LM95245_REG_RW_COMMON_HYSTERESIS,
+ val);
+
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_type(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm95245_data *data = i2c_get_clientdata(client);
+
+ return snprintf(buf, PAGE_SIZE - 1,
+ data->config2 & CFG2_REMOTE_TT ? "1\n" : "2\n");
+}
+
+static ssize_t set_type(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm95245_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+ if (val != 1 && val != 2)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+
+ if (val == 1)
+ data->config2 |= CFG2_REMOTE_TT;
+ else
+ data->config2 &= ~CFG2_REMOTE_TT;
+
+ data->valid = 0;
+
+ i2c_smbus_write_byte_data(client, LM95245_REG_RW_CONFIG2,
+ data->config2);
+
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct lm95245_data *data = lm95245_update_device(dev);
+ int index = to_sensor_dev_attr(attr)->index;
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n",
+ !!(data->regs[9] & index));
+}
+
+static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct lm95245_data *data = lm95245_update_device(dev);
+
+ return snprintf(buf, PAGE_SIZE - 1, "%lu\n", data->interval);
+}
+
+static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm95245_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+
+ data->interval = lm95245_set_conversion_rate(client, val);
+
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_limit,
+ set_limit, 6);
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_limit,
+ set_crit_hyst, 8);
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL,
+ STATUS1_LOC);
+
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_input, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_limit,
+ set_limit, 7);
+static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_limit,
+ set_crit_hyst, 8);
+static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL,
+ STATUS1_RTCRIT);
+static SENSOR_DEVICE_ATTR(temp2_type, S_IWUSR | S_IRUGO, show_type,
+ set_type, 0);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL,
+ STATUS1_DIODE_FAULT);
+
+static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
+ set_interval);
+
+static struct attribute *lm95245_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_type.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
+ &dev_attr_update_interval.attr,
+ NULL
+};
+
+static const struct attribute_group lm95245_group = {
+ .attrs = lm95245_attributes,
+};
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int lm95245_detect(struct i2c_client *new_client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = new_client->adapter;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ if (i2c_smbus_read_byte_data(new_client, LM95245_REG_R_MAN_ID)
+ != MANUFACTURER_ID
+ || i2c_smbus_read_byte_data(new_client, LM95245_REG_R_CHIP_ID)
+ != DEFAULT_REVISION)
+ return -ENODEV;
+
+ strlcpy(info->type, DEVNAME, I2C_NAME_SIZE);
+ return 0;
+}
+
+static void lm95245_init_client(struct i2c_client *client)
+{
+ struct lm95245_data *data = i2c_get_clientdata(client);
+
+ data->valid = 0;
+ data->interval = lm95245_read_conversion_rate(client);
+
+ data->config1 = i2c_smbus_read_byte_data(client,
+ LM95245_REG_RW_CONFIG1);
+ data->config2 = i2c_smbus_read_byte_data(client,
+ LM95245_REG_RW_CONFIG2);
+
+ if (data->config1 & CFG_STOP) {
+ /* Clear the standby bit */
+ data->config1 &= ~CFG_STOP;
+ i2c_smbus_write_byte_data(client, LM95245_REG_RW_CONFIG1,
+ data->config1);
+ }
+}
+
+static int lm95245_probe(struct i2c_client *new_client,
+ const struct i2c_device_id *id)
+{
+ struct lm95245_data *data;
+ int err;
+
+ data = kzalloc(sizeof(struct lm95245_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_set_clientdata(new_client, data);
+ mutex_init(&data->update_lock);
+
+ /* Initialize the LM95245 chip */
+ lm95245_init_client(new_client);
+
+ /* Register sysfs hooks */
+ err = sysfs_create_group(&new_client->dev.kobj, &lm95245_group);
+ if (err)
+ goto exit_free;
+
+ data->hwmon_dev = hwmon_device_register(&new_client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto exit_remove_files;
+ }
+
+ return 0;
+
+exit_remove_files:
+ sysfs_remove_group(&new_client->dev.kobj, &lm95245_group);
+exit_free:
+ kfree(data);
+exit:
+ return err;
+}
+
+static int lm95245_remove(struct i2c_client *client)
+{
+ struct lm95245_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &lm95245_group);
+
+ kfree(data);
+ return 0;
+}
+
+/* Driver data (common to all clients) */
+static const struct i2c_device_id lm95245_id[] = {
+ { DEVNAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lm95245_id);
+
+static struct i2c_driver lm95245_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = DEVNAME,
+ },
+ .probe = lm95245_probe,
+ .remove = lm95245_remove,
+ .id_table = lm95245_id,
+ .detect = lm95245_detect,
+ .address_list = normal_i2c,
+};
+
+static int __init sensors_lm95245_init(void)
+{
+ return i2c_add_driver(&lm95245_driver);
+}
+
+static void __exit sensors_lm95245_exit(void)
+{
+ i2c_del_driver(&lm95245_driver);
+}
+
+MODULE_AUTHOR("Alexander Stein <alexander.stein@systec-electronic.com>");
+MODULE_DESCRIPTION("LM95245 sensor driver");
+MODULE_LICENSE("GPL");
+
+module_init(sensors_lm95245_init);
+module_exit(sensors_lm95245_exit);
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index d94a24f..dd2d7b9 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -124,7 +124,7 @@ static inline int MV_TO_LIMIT(int mv, int range)
static inline int ADC_TO_CURR(int adc, int gain)
{
- return adc * 1400000 / gain * 255;
+ return adc * 1400000 / (gain * 255);
}
/*
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
new file mode 100644
index 0000000..20d1b2d
--- /dev/null
+++ b/drivers/hwmon/max1668.c
@@ -0,0 +1,502 @@
+/*
+ Copyright (c) 2011 David George <david.george@ska.ac.za>
+
+ based on adm1021.c
+ some credit to Christoph Scheurer, but largely a rewrite
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+
+/* Addresses to scan */
+static unsigned short max1668_addr_list[] = {
+ 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
+
+/* max1668 registers */
+
+#define MAX1668_REG_TEMP(nr) (nr)
+#define MAX1668_REG_STAT1 0x05
+#define MAX1668_REG_STAT2 0x06
+#define MAX1668_REG_MAN_ID 0xfe
+#define MAX1668_REG_DEV_ID 0xff
+
+/* limits */
+
+/* write high limits */
+#define MAX1668_REG_LIMH_WR(nr) (0x13 + 2 * (nr))
+/* write low limits */
+#define MAX1668_REG_LIML_WR(nr) (0x14 + 2 * (nr))
+/* read high limits */
+#define MAX1668_REG_LIMH_RD(nr) (0x08 + 2 * (nr))
+/* read low limits */
+#define MAX1668_REG_LIML_RD(nr) (0x09 + 2 * (nr))
+
+/* manufacturer and device ID Constants */
+#define MAN_ID_MAXIM 0x4d
+#define DEV_ID_MAX1668 0x3
+#define DEV_ID_MAX1805 0x5
+#define DEV_ID_MAX1989 0xb
+
+/* read only mode module parameter */
+static int read_only;
+module_param(read_only, bool, 0);
+MODULE_PARM_DESC(read_only, "Don't set any values, read only mode");
+
+enum chips { max1668, max1805, max1989 };
+
+struct max1668_data {
+ struct device *hwmon_dev;
+ enum chips type;
+
+ struct mutex update_lock;
+ char valid; /* !=0 if following fields are valid */
+ unsigned long last_updated; /* In jiffies */
+
+ /* 1x local and 4x remote */
+ s8 temp_max[5];
+ s8 temp_min[5];
+ s8 temp[5];
+ u16 alarms;
+};
+
+static struct max1668_data *max1668_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max1668_data *data = i2c_get_clientdata(client);
+ struct max1668_data *ret = data;
+ s32 val;
+ int i;
+
+ mutex_lock(&data->update_lock);
+
+ if (data->valid && !time_after(jiffies,
+ data->last_updated + HZ + HZ / 2))
+ goto abort;
+
+ for (i = 0; i < 5; i++) {
+ val = i2c_smbus_read_byte_data(client, MAX1668_REG_TEMP(i));
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp[i] = (s8) val;
+
+ val = i2c_smbus_read_byte_data(client, MAX1668_REG_LIMH_RD(i));
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp_max[i] = (s8) val;
+
+ val = i2c_smbus_read_byte_data(client, MAX1668_REG_LIML_RD(i));
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp_min[i] = (s8) val;
+ }
+
+ val = i2c_smbus_read_byte_data(client, MAX1668_REG_STAT1);
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->alarms = val << 8;
+
+ val = i2c_smbus_read_byte_data(client, MAX1668_REG_STAT2);
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->alarms |= val;
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+abort:
+ mutex_unlock(&data->update_lock);
+
+ return ret;
+}
+
+static ssize_t show_temp(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct max1668_data *data = max1668_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", data->temp[index] * 1000);
+}
+
+static ssize_t show_temp_max(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct max1668_data *data = max1668_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", data->temp_max[index] * 1000);
+}
+
+static ssize_t show_temp_min(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct max1668_data *data = max1668_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", data->temp_min[index] * 1000);
+}
+
+static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int index = to_sensor_dev_attr(attr)->index;
+ struct max1668_data *data = max1668_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%u\n", (data->alarms >> index) & 0x1);
+}
+
+static ssize_t show_fault(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct max1668_data *data = max1668_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%u\n",
+ (data->alarms & (1 << 12)) && data->temp[index] == 127);
+}
+
+static ssize_t set_temp_max(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max1668_data *data = i2c_get_clientdata(client);
+ long temp;
+ int ret;
+
+ ret = kstrtol(buf, 10, &temp);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&data->update_lock);
+ data->temp_max[index] = SENSORS_LIMIT(temp/1000, -128, 127);
+ if (i2c_smbus_write_byte_data(client,
+ MAX1668_REG_LIMH_WR(index),
+ data->temp_max[index]))
+ count = -EIO;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t set_temp_min(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max1668_data *data = i2c_get_clientdata(client);
+ long temp;
+ int ret;
+
+ ret = kstrtol(buf, 10, &temp);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&data->update_lock);
+ data->temp_min[index] = SENSORS_LIMIT(temp/1000, -128, 127);
+ if (i2c_smbus_write_byte_data(client,
+ MAX1668_REG_LIML_WR(index),
+ data->temp_max[index]))
+ count = -EIO;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max,
+ set_temp_max, 0);
+static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO, show_temp_min,
+ set_temp_min, 0);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO, show_temp_max,
+ set_temp_max, 1);
+static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO, show_temp_min,
+ set_temp_min, 1);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO, show_temp_max,
+ set_temp_max, 2);
+static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO, show_temp_min,
+ set_temp_min, 2);
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO, show_temp_max,
+ set_temp_max, 3);
+static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO, show_temp_min,
+ set_temp_min, 3);
+static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp5_max, S_IRUGO, show_temp_max,
+ set_temp_max, 4);
+static SENSOR_DEVICE_ATTR(temp5_min, S_IRUGO, show_temp_min,
+ set_temp_min, 4);
+
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 14);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 13);
+static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 7);
+static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_alarm, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp5_min_alarm, S_IRUGO, show_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO, show_alarm, NULL, 0);
+
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_fault, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_fault, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_fault, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_fault, NULL, 4);
+
+/* Attributes common to MAX1668, MAX1989 and MAX1805 */
+static struct attribute *max1668_attribute_common[] = {
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_min.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_max.dev_attr.attr,
+ &sensor_dev_attr_temp3_min.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
+ &sensor_dev_attr_temp3_fault.dev_attr.attr,
+ NULL
+};
+
+/* Attributes not present on MAX1805 */
+static struct attribute *max1668_attribute_unique[] = {
+ &sensor_dev_attr_temp4_max.dev_attr.attr,
+ &sensor_dev_attr_temp4_min.dev_attr.attr,
+ &sensor_dev_attr_temp4_input.dev_attr.attr,
+ &sensor_dev_attr_temp5_max.dev_attr.attr,
+ &sensor_dev_attr_temp5_min.dev_attr.attr,
+ &sensor_dev_attr_temp5_input.dev_attr.attr,
+
+ &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_min_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_temp4_fault.dev_attr.attr,
+ &sensor_dev_attr_temp5_fault.dev_attr.attr,
+ NULL
+};
+
+static mode_t max1668_attribute_mode(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ int ret = S_IRUGO;
+ if (read_only)
+ return ret;
+ if (attr == &sensor_dev_attr_temp1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp2_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp3_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp4_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp5_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp2_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp3_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp4_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp5_min.dev_attr.attr)
+ ret |= S_IWUSR;
+ return ret;
+}
+
+static const struct attribute_group max1668_group_common = {
+ .attrs = max1668_attribute_common,
+ .is_visible = max1668_attribute_mode
+};
+
+static const struct attribute_group max1668_group_unique = {
+ .attrs = max1668_attribute_unique,
+ .is_visible = max1668_attribute_mode
+};
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int max1668_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ const char *type_name;
+ int man_id, dev_id;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ /* Check for unsupported part */
+ man_id = i2c_smbus_read_byte_data(client, MAX1668_REG_MAN_ID);
+ if (man_id != MAN_ID_MAXIM)
+ return -ENODEV;
+
+ dev_id = i2c_smbus_read_byte_data(client, MAX1668_REG_DEV_ID);
+ if (dev_id < 0)
+ return -ENODEV;
+
+ type_name = NULL;
+ if (dev_id == DEV_ID_MAX1668)
+ type_name = "max1668";
+ else if (dev_id == DEV_ID_MAX1805)
+ type_name = "max1805";
+ else if (dev_id == DEV_ID_MAX1989)
+ type_name = "max1989";
+
+ if (!type_name)
+ return -ENODEV;
+
+ strlcpy(info->type, type_name, I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int max1668_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct max1668_data *data;
+ int err;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ data = kzalloc(sizeof(struct max1668_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+ data->type = id->driver_data;
+ mutex_init(&data->update_lock);
+
+ /* Register sysfs hooks */
+ err = sysfs_create_group(&client->dev.kobj, &max1668_group_common);
+ if (err)
+ goto error_free;
+
+ if (data->type == max1668 || data->type == max1989) {
+ err = sysfs_create_group(&client->dev.kobj,
+ &max1668_group_unique);
+ if (err)
+ goto error_sysrem0;
+ }
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto error_sysrem1;
+ }
+
+ return 0;
+
+error_sysrem1:
+ if (data->type == max1668 || data->type == max1989)
+ sysfs_remove_group(&client->dev.kobj, &max1668_group_unique);
+error_sysrem0:
+ sysfs_remove_group(&client->dev.kobj, &max1668_group_common);
+error_free:
+ kfree(data);
+ return err;
+}
+
+static int max1668_remove(struct i2c_client *client)
+{
+ struct max1668_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ if (data->type == max1668 || data->type == max1989)
+ sysfs_remove_group(&client->dev.kobj, &max1668_group_unique);
+
+ sysfs_remove_group(&client->dev.kobj, &max1668_group_common);
+
+ kfree(data);
+ return 0;
+}
+
+static const struct i2c_device_id max1668_id[] = {
+ { "max1668", max1668 },
+ { "max1805", max1805 },
+ { "max1989", max1989 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max1668_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver max1668_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "max1668",
+ },
+ .probe = max1668_probe,
+ .remove = max1668_remove,
+ .id_table = max1668_id,
+ .detect = max1668_detect,
+ .address_list = max1668_addr_list,
+};
+
+static int __init sensors_max1668_init(void)
+{
+ return i2c_add_driver(&max1668_driver);
+}
+
+static void __exit sensors_max1668_exit(void)
+{
+ i2c_del_driver(&max1668_driver);
+}
+
+MODULE_AUTHOR("David George <david.george@ska.ac.za>");
+MODULE_DESCRIPTION("MAX1668 remote temperature sensor driver");
+MODULE_LICENSE("GPL");
+
+module_init(sensors_max1668_init)
+module_exit(sensors_max1668_exit)
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
new file mode 100644
index 0000000..eab1161
--- /dev/null
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -0,0 +1,452 @@
+/*
+ * ntc_thermistor.c - NTC Thermistors
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/math64.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+
+#include <linux/platform_data/ntc_thermistor.h>
+
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+struct ntc_compensation {
+ int temp_C;
+ unsigned int ohm;
+};
+
+/*
+ * A compensation table should be sorted by the values of .ohm
+ * in descending order.
+ * The following compensation tables are from the specification of Murata NTC
+ * Thermistors Datasheet
+ */
+const struct ntc_compensation ncpXXwb473[] = {
+ { .temp_C = -40, .ohm = 1747920 },
+ { .temp_C = -35, .ohm = 1245428 },
+ { .temp_C = -30, .ohm = 898485 },
+ { .temp_C = -25, .ohm = 655802 },
+ { .temp_C = -20, .ohm = 483954 },
+ { .temp_C = -15, .ohm = 360850 },
+ { .temp_C = -10, .ohm = 271697 },
+ { .temp_C = -5, .ohm = 206463 },
+ { .temp_C = 0, .ohm = 158214 },
+ { .temp_C = 5, .ohm = 122259 },
+ { .temp_C = 10, .ohm = 95227 },
+ { .temp_C = 15, .ohm = 74730 },
+ { .temp_C = 20, .ohm = 59065 },
+ { .temp_C = 25, .ohm = 47000 },
+ { .temp_C = 30, .ohm = 37643 },
+ { .temp_C = 35, .ohm = 30334 },
+ { .temp_C = 40, .ohm = 24591 },
+ { .temp_C = 45, .ohm = 20048 },
+ { .temp_C = 50, .ohm = 16433 },
+ { .temp_C = 55, .ohm = 13539 },
+ { .temp_C = 60, .ohm = 11209 },
+ { .temp_C = 65, .ohm = 9328 },
+ { .temp_C = 70, .ohm = 7798 },
+ { .temp_C = 75, .ohm = 6544 },
+ { .temp_C = 80, .ohm = 5518 },
+ { .temp_C = 85, .ohm = 4674 },
+ { .temp_C = 90, .ohm = 3972 },
+ { .temp_C = 95, .ohm = 3388 },
+ { .temp_C = 100, .ohm = 2902 },
+ { .temp_C = 105, .ohm = 2494 },
+ { .temp_C = 110, .ohm = 2150 },
+ { .temp_C = 115, .ohm = 1860 },
+ { .temp_C = 120, .ohm = 1615 },
+ { .temp_C = 125, .ohm = 1406 },
+};
+const struct ntc_compensation ncpXXwl333[] = {
+ { .temp_C = -40, .ohm = 1610154 },
+ { .temp_C = -35, .ohm = 1130850 },
+ { .temp_C = -30, .ohm = 802609 },
+ { .temp_C = -25, .ohm = 575385 },
+ { .temp_C = -20, .ohm = 416464 },
+ { .temp_C = -15, .ohm = 304219 },
+ { .temp_C = -10, .ohm = 224193 },
+ { .temp_C = -5, .ohm = 166623 },
+ { .temp_C = 0, .ohm = 124850 },
+ { .temp_C = 5, .ohm = 94287 },
+ { .temp_C = 10, .ohm = 71747 },
+ { .temp_C = 15, .ohm = 54996 },
+ { .temp_C = 20, .ohm = 42455 },
+ { .temp_C = 25, .ohm = 33000 },
+ { .temp_C = 30, .ohm = 25822 },
+ { .temp_C = 35, .ohm = 20335 },
+ { .temp_C = 40, .ohm = 16115 },
+ { .temp_C = 45, .ohm = 12849 },
+ { .temp_C = 50, .ohm = 10306 },
+ { .temp_C = 55, .ohm = 8314 },
+ { .temp_C = 60, .ohm = 6746 },
+ { .temp_C = 65, .ohm = 5503 },
+ { .temp_C = 70, .ohm = 4513 },
+ { .temp_C = 75, .ohm = 3721 },
+ { .temp_C = 80, .ohm = 3084 },
+ { .temp_C = 85, .ohm = 2569 },
+ { .temp_C = 90, .ohm = 2151 },
+ { .temp_C = 95, .ohm = 1809 },
+ { .temp_C = 100, .ohm = 1529 },
+ { .temp_C = 105, .ohm = 1299 },
+ { .temp_C = 110, .ohm = 1108 },
+ { .temp_C = 115, .ohm = 949 },
+ { .temp_C = 120, .ohm = 817 },
+ { .temp_C = 125, .ohm = 707 },
+};
+
+struct ntc_data {
+ struct device *hwmon_dev;
+ struct ntc_thermistor_platform_data *pdata;
+ const struct ntc_compensation *comp;
+ struct device *dev;
+ int n_comp;
+ char name[PLATFORM_NAME_SIZE];
+};
+
+static inline u64 div64_u64_safe(u64 dividend, u64 divisor)
+{
+ if (divisor == 0 && dividend == 0)
+ return 0;
+ if (divisor == 0)
+ return UINT_MAX;
+ return div64_u64(dividend, divisor);
+}
+
+static unsigned int get_ohm_of_thermistor(struct ntc_data *data,
+ unsigned int uV)
+{
+ struct ntc_thermistor_platform_data *pdata = data->pdata;
+ u64 mV = uV / 1000;
+ u64 pmV = pdata->pullup_uV / 1000;
+ u64 N, puO, pdO;
+ puO = pdata->pullup_ohm;
+ pdO = pdata->pulldown_ohm;
+
+ if (mV == 0) {
+ if (pdata->connect == NTC_CONNECTED_POSITIVE)
+ return UINT_MAX;
+ return 0;
+ }
+ if (mV >= pmV)
+ return (pdata->connect == NTC_CONNECTED_POSITIVE) ?
+ 0 : UINT_MAX;
+
+ if (pdata->connect == NTC_CONNECTED_POSITIVE && puO == 0)
+ N = div64_u64_safe(pdO * (pmV - mV), mV);
+ else if (pdata->connect == NTC_CONNECTED_GROUND && pdO == 0)
+ N = div64_u64_safe(puO * mV, pmV - mV);
+ else if (pdata->connect == NTC_CONNECTED_POSITIVE)
+ N = div64_u64_safe(pdO * puO * (pmV - mV),
+ puO * mV - pdO * (pmV - mV));
+ else
+ N = div64_u64_safe(pdO * puO * mV, pdO * (pmV - mV) - puO * mV);
+
+ return (unsigned int) N;
+}
+
+static int lookup_comp(struct ntc_data *data,
+ unsigned int ohm, int *i_low, int *i_high)
+{
+ int start, end, mid = -1;
+
+ /* Do a binary search on compensation table */
+ start = 0;
+ end = data->n_comp;
+
+ while (end > start) {
+ mid = start + (end - start) / 2;
+ if (data->comp[mid].ohm < ohm)
+ end = mid;
+ else if (data->comp[mid].ohm > ohm)
+ start = mid + 1;
+ else
+ break;
+ }
+
+ if (mid == 0) {
+ if (data->comp[mid].ohm > ohm) {
+ *i_high = mid;
+ *i_low = mid + 1;
+ return 0;
+ } else {
+ *i_low = mid;
+ *i_high = -1;
+ return -EINVAL;
+ }
+ }
+ if (mid == (data->n_comp - 1)) {
+ if (data->comp[mid].ohm <= ohm) {
+ *i_low = mid;
+ *i_high = mid - 1;
+ return 0;
+ } else {
+ *i_low = -1;
+ *i_high = mid;
+ return -EINVAL;
+ }
+ }
+
+ if (data->comp[mid].ohm <= ohm) {
+ *i_low = mid;
+ *i_high = mid - 1;
+ } else {
+ *i_low = mid + 1;
+ *i_high = mid;
+ }
+
+ return 0;
+}
+
+static int get_temp_mC(struct ntc_data *data, unsigned int ohm, int *temp)
+{
+ int low, high;
+ int ret;
+
+ ret = lookup_comp(data, ohm, &low, &high);
+ if (ret) {
+ /* Unable to use linear approximation */
+ if (low != -1)
+ *temp = data->comp[low].temp_C * 1000;
+ else if (high != -1)
+ *temp = data->comp[high].temp_C * 1000;
+ else
+ return ret;
+ } else {
+ *temp = data->comp[low].temp_C * 1000 +
+ ((data->comp[high].temp_C - data->comp[low].temp_C) *
+ 1000 * ((int)ohm - (int)data->comp[low].ohm)) /
+ ((int)data->comp[high].ohm - (int)data->comp[low].ohm);
+ }
+
+ return 0;
+}
+
+static int ntc_thermistor_read(struct ntc_data *data, int *temp)
+{
+ int ret;
+ int read_ohm, read_uV;
+ unsigned int ohm = 0;
+
+ if (data->pdata->read_ohm) {
+ read_ohm = data->pdata->read_ohm();
+ if (read_ohm < 0)
+ return read_ohm;
+ ohm = (unsigned int)read_ohm;
+ }
+
+ if (data->pdata->read_uV) {
+ read_uV = data->pdata->read_uV();
+ if (read_uV < 0)
+ return read_uV;
+ ohm = get_ohm_of_thermistor(data, (unsigned int)read_uV);
+ }
+
+ ret = get_temp_mC(data, ohm, temp);
+ if (ret) {
+ dev_dbg(data->dev, "Sensor reading function not available.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static ssize_t ntc_show_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ntc_data *data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", data->name);
+}
+
+static ssize_t ntc_show_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "4\n");
+}
+
+static ssize_t ntc_show_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ntc_data *data = dev_get_drvdata(dev);
+ int temp, ret;
+
+ ret = ntc_thermistor_read(data, &temp);
+ if (ret)
+ return ret;
+ return sprintf(buf, "%d\n", temp);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO, ntc_show_type, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ntc_show_temp, NULL, 0);
+static DEVICE_ATTR(name, S_IRUGO, ntc_show_name, NULL);
+
+static struct attribute *ntc_attributes[] = {
+ &dev_attr_name.attr,
+ &sensor_dev_attr_temp1_type.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ntc_attr_group = {
+ .attrs = ntc_attributes,
+};
+
+static int __devinit ntc_thermistor_probe(struct platform_device *pdev)
+{
+ struct ntc_data *data;
+ struct ntc_thermistor_platform_data *pdata = pdev->dev.platform_data;
+ int ret = 0;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform init data supplied.\n");
+ return -ENODEV;
+ }
+
+ /* Either one of the two is required. */
+ if (!pdata->read_uV && !pdata->read_ohm) {
+ dev_err(&pdev->dev, "Both read_uV and read_ohm missing."
+ "Need either one of the two.\n");
+ return -EINVAL;
+ }
+
+ if (pdata->read_uV && pdata->read_ohm) {
+ dev_warn(&pdev->dev, "Only one of read_uV and read_ohm "
+ "is needed; ignoring read_uV.\n");
+ pdata->read_uV = NULL;
+ }
+
+ if (pdata->read_uV && (pdata->pullup_uV == 0 ||
+ (pdata->pullup_ohm == 0 && pdata->connect ==
+ NTC_CONNECTED_GROUND) ||
+ (pdata->pulldown_ohm == 0 && pdata->connect ==
+ NTC_CONNECTED_POSITIVE) ||
+ (pdata->connect != NTC_CONNECTED_POSITIVE &&
+ pdata->connect != NTC_CONNECTED_GROUND))) {
+ dev_err(&pdev->dev, "Required data to use read_uV not "
+ "supplied.\n");
+ return -EINVAL;
+ }
+
+ data = kzalloc(sizeof(struct ntc_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = &pdev->dev;
+ data->pdata = pdata;
+ strncpy(data->name, pdev->id_entry->name, PLATFORM_NAME_SIZE);
+
+ switch (pdev->id_entry->driver_data) {
+ case TYPE_NCPXXWB473:
+ data->comp = ncpXXwb473;
+ data->n_comp = ARRAY_SIZE(ncpXXwb473);
+ break;
+ case TYPE_NCPXXWL333:
+ data->comp = ncpXXwl333;
+ data->n_comp = ARRAY_SIZE(ncpXXwl333);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unknown device type: %lu(%s)\n",
+ pdev->id_entry->driver_data,
+ pdev->id_entry->name);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ ret = sysfs_create_group(&data->dev->kobj, &ntc_attr_group);
+ if (ret) {
+ dev_err(data->dev, "unable to create sysfs files\n");
+ goto err;
+ }
+
+ data->hwmon_dev = hwmon_device_register(data->dev);
+ if (IS_ERR_OR_NULL(data->hwmon_dev)) {
+ dev_err(data->dev, "unable to register as hwmon device.\n");
+ ret = -EINVAL;
+ goto err_after_sysfs;
+ }
+
+ dev_info(&pdev->dev, "Thermistor %s:%d (type: %s/%lu) successfully probed.\n",
+ pdev->name, pdev->id, pdev->id_entry->name,
+ pdev->id_entry->driver_data);
+ return 0;
+err_after_sysfs:
+ sysfs_remove_group(&data->dev->kobj, &ntc_attr_group);
+err:
+ kfree(data);
+ return ret;
+}
+
+static int __devexit ntc_thermistor_remove(struct platform_device *pdev)
+{
+ struct ntc_data *data = platform_get_drvdata(pdev);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&data->dev->kobj, &ntc_attr_group);
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(data);
+
+ return 0;
+}
+
+static const struct platform_device_id ntc_thermistor_id[] = {
+ { "ncp15wb473", TYPE_NCPXXWB473 },
+ { "ncp18wb473", TYPE_NCPXXWB473 },
+ { "ncp21wb473", TYPE_NCPXXWB473 },
+ { "ncp03wb473", TYPE_NCPXXWB473 },
+ { "ncp15wl333", TYPE_NCPXXWL333 },
+ { },
+};
+
+static struct platform_driver ntc_thermistor_driver = {
+ .driver = {
+ .name = "ntc-thermistor",
+ .owner = THIS_MODULE,
+ },
+ .probe = ntc_thermistor_probe,
+ .remove = __devexit_p(ntc_thermistor_remove),
+ .id_table = ntc_thermistor_id,
+};
+
+static int __init ntc_thermistor_init(void)
+{
+ return platform_driver_register(&ntc_thermistor_driver);
+}
+
+module_init(ntc_thermistor_init);
+
+static void __exit ntc_thermistor_cleanup(void)
+{
+ platform_driver_unregister(&ntc_thermistor_driver);
+}
+
+module_exit(ntc_thermistor_cleanup);
+
+MODULE_DESCRIPTION("NTC Thermistor Driver");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ntc-thermistor");
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
new file mode 100644
index 0000000..c9237b9
--- /dev/null
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -0,0 +1,100 @@
+#
+# PMBus chip drivers configuration
+#
+
+menuconfig PMBUS
+ tristate "PMBus support"
+ depends on I2C && EXPERIMENTAL
+ default n
+ help
+ Say yes here if you want to enable PMBus support.
+
+ This driver can also be built as a module. If so, the module will
+ be called pmbus_core.
+
+if PMBUS
+
+config SENSORS_PMBUS
+ tristate "Generic PMBus devices"
+ default y
+ help
+ If you say yes here you get hardware monitoring support for generic
+ PMBus devices, including but not limited to ADP4000, BMR450, BMR451,
+ BMR453, BMR454, LTC2978, NCP4200, and NCP4208.
+
+ This driver can also be built as a module. If so, the module will
+ be called pmbus.
+
+config SENSORS_ADM1275
+ tristate "Analog Devices ADM1275"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Analog
+ Devices ADM1275 Hot-Swap Controller and Digital Power Monitor.
+
+ This driver can also be built as a module. If so, the module will
+ be called adm1275.
+
+config SENSORS_LM25066
+ tristate "National Semiconductor LM25066 and compatibles"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for National
+ Semiconductor LM25066, LM5064, and LM5066.
+
+ This driver can also be built as a module. If so, the module will
+ be called lm25066.
+
+config SENSORS_MAX16064
+ tristate "Maxim MAX16064"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX16064.
+
+ This driver can also be built as a module. If so, the module will
+ be called max16064.
+
+config SENSORS_MAX34440
+ tristate "Maxim MAX34440/MAX34441"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX34440 and MAX34441.
+
+ This driver can also be built as a module. If so, the module will
+ be called max34440.
+
+config SENSORS_MAX8688
+ tristate "Maxim MAX8688"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX8688.
+
+ This driver can also be built as a module. If so, the module will
+ be called max8688.
+
+config SENSORS_UCD9000
+ tristate "TI UCD90120, UCD90124, UCD9090, UCD90910"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for TI
+ UCD90120, UCD90124, UCD9090, UCD90910 Sequencer and System Health
+ Controllers.
+
+ This driver can also be built as a module. If so, the module will
+ be called ucd9000.
+
+config SENSORS_UCD9200
+ tristate "TI UCD9220, UCD9222, UCD9224, UCD9240, UCD9244, UCD9246, UCD9248"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for TI
+ UCD9220, UCD9222, UCD9224, UCD9240, UCD9244, UCD9246, and UCD9248
+ Digital PWM System Controllers.
+
+ This driver can also be built as a module. If so, the module will
+ be called ucd9200.
+
+endif # PMBUS
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
new file mode 100644
index 0000000..623eedb
--- /dev/null
+++ b/drivers/hwmon/pmbus/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for PMBus chip drivers.
+#
+
+obj-$(CONFIG_PMBUS) += pmbus_core.o
+obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
+obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
+obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
+obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
+obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
+obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
+obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
+obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
diff --git a/drivers/hwmon/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 8bc1bd6..c936e27 100644
--- a/drivers/hwmon/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -23,11 +23,68 @@
#include <linux/i2c.h>
#include "pmbus.h"
+#define ADM1275_PEAK_IOUT 0xd0
+#define ADM1275_PEAK_VIN 0xd1
+#define ADM1275_PEAK_VOUT 0xd2
#define ADM1275_PMON_CONFIG 0xd4
#define ADM1275_VIN_VOUT_SELECT (1 << 6)
#define ADM1275_VRANGE (1 << 5)
+static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+
+ if (page)
+ return -EINVAL;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_IOUT_MAX:
+ ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_IOUT);
+ break;
+ case PMBUS_VIRT_READ_VOUT_MAX:
+ ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_VOUT);
+ break;
+ case PMBUS_VIRT_READ_VIN_MAX:
+ ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_VIN);
+ break;
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ case PMBUS_VIRT_RESET_VIN_HISTORY:
+ ret = 0;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ int ret;
+
+ if (page)
+ return -EINVAL;
+
+ switch (reg) {
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+ ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_IOUT, 0);
+ break;
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_VOUT, 0);
+ break;
+ case PMBUS_VIRT_RESET_VIN_HISTORY:
+ ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_VIN, 0);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
static int adm1275_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -50,14 +107,17 @@ static int adm1275_probe(struct i2c_client *client,
}
info->pages = 1;
- info->direct[PSC_VOLTAGE_IN] = true;
- info->direct[PSC_VOLTAGE_OUT] = true;
- info->direct[PSC_CURRENT_OUT] = true;
+ info->format[PSC_VOLTAGE_IN] = direct;
+ info->format[PSC_VOLTAGE_OUT] = direct;
+ info->format[PSC_CURRENT_OUT] = direct;
info->m[PSC_CURRENT_OUT] = 807;
info->b[PSC_CURRENT_OUT] = 20475;
info->R[PSC_CURRENT_OUT] = -1;
info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
+ info->read_word_data = adm1275_read_word_data;
+ info->write_word_data = adm1275_write_word_data;
+
if (config & ADM1275_VRANGE) {
info->m[PSC_VOLTAGE_IN] = 19199;
info->b[PSC_VOLTAGE_IN] = 0;
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
new file mode 100644
index 0000000..ac254fb
--- /dev/null
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -0,0 +1,352 @@
+/*
+ * Hardware monitoring driver for LM25066 / LM5064 / LM5066
+ *
+ * Copyright (c) 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+enum chips { lm25066, lm5064, lm5066 };
+
+#define LM25066_READ_VAUX 0xd0
+#define LM25066_MFR_READ_IIN 0xd1
+#define LM25066_MFR_READ_PIN 0xd2
+#define LM25066_MFR_IIN_OC_WARN_LIMIT 0xd3
+#define LM25066_MFR_PIN_OP_WARN_LIMIT 0xd4
+#define LM25066_READ_PIN_PEAK 0xd5
+#define LM25066_CLEAR_PIN_PEAK 0xd6
+#define LM25066_DEVICE_SETUP 0xd9
+#define LM25066_READ_AVG_VIN 0xdc
+#define LM25066_READ_AVG_VOUT 0xdd
+#define LM25066_READ_AVG_IIN 0xde
+#define LM25066_READ_AVG_PIN 0xdf
+
+#define LM25066_DEV_SETUP_CL (1 << 4) /* Current limit */
+
+struct lm25066_data {
+ int id;
+ struct pmbus_driver_info info;
+};
+
+#define to_lm25066_data(x) container_of(x, struct lm25066_data, info)
+
+static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ const struct lm25066_data *data = to_lm25066_data(info);
+ int ret;
+
+ if (page > 1)
+ return -EINVAL;
+
+ /* Map READ_VAUX into READ_VOUT register on page 1 */
+ if (page == 1) {
+ switch (reg) {
+ case PMBUS_READ_VOUT:
+ ret = pmbus_read_word_data(client, 0,
+ LM25066_READ_VAUX);
+ if (ret < 0)
+ break;
+ /* Adjust returned value to match VOUT coefficients */
+ switch (data->id) {
+ case lm25066:
+ /* VOUT: 4.54 mV VAUX: 283.2 uV LSB */
+ ret = DIV_ROUND_CLOSEST(ret * 2832, 45400);
+ break;
+ case lm5064:
+ /* VOUT: 4.53 mV VAUX: 700 uV LSB */
+ ret = DIV_ROUND_CLOSEST(ret * 70, 453);
+ break;
+ case lm5066:
+ /* VOUT: 2.18 mV VAUX: 725 uV LSB */
+ ret = DIV_ROUND_CLOSEST(ret * 725, 2180);
+ break;
+ }
+ break;
+ default:
+ /* No other valid registers on page 1 */
+ ret = -EINVAL;
+ break;
+ }
+ goto done;
+ }
+
+ switch (reg) {
+ case PMBUS_READ_IIN:
+ ret = pmbus_read_word_data(client, 0, LM25066_MFR_READ_IIN);
+ break;
+ case PMBUS_READ_PIN:
+ ret = pmbus_read_word_data(client, 0, LM25066_MFR_READ_PIN);
+ break;
+ case PMBUS_IIN_OC_WARN_LIMIT:
+ ret = pmbus_read_word_data(client, 0,
+ LM25066_MFR_IIN_OC_WARN_LIMIT);
+ break;
+ case PMBUS_PIN_OP_WARN_LIMIT:
+ ret = pmbus_read_word_data(client, 0,
+ LM25066_MFR_PIN_OP_WARN_LIMIT);
+ break;
+ case PMBUS_VIRT_READ_VIN_AVG:
+ ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_VIN);
+ break;
+ case PMBUS_VIRT_READ_VOUT_AVG:
+ ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_VOUT);
+ break;
+ case PMBUS_VIRT_READ_IIN_AVG:
+ ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_IIN);
+ break;
+ case PMBUS_VIRT_READ_PIN_AVG:
+ ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_PIN);
+ break;
+ case PMBUS_VIRT_READ_PIN_MAX:
+ ret = pmbus_read_word_data(client, 0, LM25066_READ_PIN_PEAK);
+ break;
+ case PMBUS_VIRT_RESET_PIN_HISTORY:
+ ret = 0;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+done:
+ return ret;
+}
+
+static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ int ret;
+
+ if (page > 1)
+ return -EINVAL;
+
+ switch (reg) {
+ case PMBUS_IIN_OC_WARN_LIMIT:
+ ret = pmbus_write_word_data(client, 0,
+ LM25066_MFR_IIN_OC_WARN_LIMIT,
+ word);
+ break;
+ case PMBUS_PIN_OP_WARN_LIMIT:
+ ret = pmbus_write_word_data(client, 0,
+ LM25066_MFR_PIN_OP_WARN_LIMIT,
+ word);
+ break;
+ case PMBUS_VIRT_RESET_PIN_HISTORY:
+ ret = pmbus_write_byte(client, 0, LM25066_CLEAR_PIN_PEAK);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static int lm25066_write_byte(struct i2c_client *client, int page, u8 value)
+{
+ if (page > 1)
+ return -EINVAL;
+
+ if (page == 0)
+ return pmbus_write_byte(client, 0, value);
+
+ return 0;
+}
+
+static int lm25066_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int config;
+ int ret;
+ struct lm25066_data *data;
+ struct pmbus_driver_info *info;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA))
+ return -ENODEV;
+
+ data = kzalloc(sizeof(struct lm25066_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ config = i2c_smbus_read_byte_data(client, LM25066_DEVICE_SETUP);
+ if (config < 0) {
+ ret = config;
+ goto err_mem;
+ }
+
+ data->id = id->driver_data;
+ info = &data->info;
+
+ info->pages = 2;
+ info->format[PSC_VOLTAGE_IN] = direct;
+ info->format[PSC_VOLTAGE_OUT] = direct;
+ info->format[PSC_CURRENT_IN] = direct;
+ info->format[PSC_TEMPERATURE] = direct;
+ info->format[PSC_POWER] = direct;
+
+ info->m[PSC_TEMPERATURE] = 16;
+ info->b[PSC_TEMPERATURE] = 0;
+ info->R[PSC_TEMPERATURE] = 0;
+
+ info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT
+ | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_PIN | PMBUS_HAVE_IIN
+ | PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+ info->func[1] = PMBUS_HAVE_VOUT;
+
+ info->read_word_data = lm25066_read_word_data;
+ info->write_word_data = lm25066_write_word_data;
+ info->write_byte = lm25066_write_byte;
+
+ switch (id->driver_data) {
+ case lm25066:
+ info->m[PSC_VOLTAGE_IN] = 22070;
+ info->b[PSC_VOLTAGE_IN] = 0;
+ info->R[PSC_VOLTAGE_IN] = -2;
+ info->m[PSC_VOLTAGE_OUT] = 22070;
+ info->b[PSC_VOLTAGE_OUT] = 0;
+ info->R[PSC_VOLTAGE_OUT] = -2;
+
+ if (config & LM25066_DEV_SETUP_CL) {
+ info->m[PSC_CURRENT_IN] = 6852;
+ info->b[PSC_CURRENT_IN] = 0;
+ info->R[PSC_CURRENT_IN] = -2;
+ info->m[PSC_POWER] = 369;
+ info->b[PSC_POWER] = 0;
+ info->R[PSC_POWER] = -2;
+ } else {
+ info->m[PSC_CURRENT_IN] = 13661;
+ info->b[PSC_CURRENT_IN] = 0;
+ info->R[PSC_CURRENT_IN] = -2;
+ info->m[PSC_POWER] = 736;
+ info->b[PSC_POWER] = 0;
+ info->R[PSC_POWER] = -2;
+ }
+ break;
+ case lm5064:
+ info->m[PSC_VOLTAGE_IN] = 22075;
+ info->b[PSC_VOLTAGE_IN] = 0;
+ info->R[PSC_VOLTAGE_IN] = -2;
+ info->m[PSC_VOLTAGE_OUT] = 22075;
+ info->b[PSC_VOLTAGE_OUT] = 0;
+ info->R[PSC_VOLTAGE_OUT] = -2;
+
+ if (config & LM25066_DEV_SETUP_CL) {
+ info->m[PSC_CURRENT_IN] = 6713;
+ info->b[PSC_CURRENT_IN] = 0;
+ info->R[PSC_CURRENT_IN] = -2;
+ info->m[PSC_POWER] = 3619;
+ info->b[PSC_POWER] = 0;
+ info->R[PSC_POWER] = -3;
+ } else {
+ info->m[PSC_CURRENT_IN] = 13426;
+ info->b[PSC_CURRENT_IN] = 0;
+ info->R[PSC_CURRENT_IN] = -2;
+ info->m[PSC_POWER] = 7238;
+ info->b[PSC_POWER] = 0;
+ info->R[PSC_POWER] = -3;
+ }
+ break;
+ case lm5066:
+ info->m[PSC_VOLTAGE_IN] = 4587;
+ info->b[PSC_VOLTAGE_IN] = 0;
+ info->R[PSC_VOLTAGE_IN] = -2;
+ info->m[PSC_VOLTAGE_OUT] = 4587;
+ info->b[PSC_VOLTAGE_OUT] = 0;
+ info->R[PSC_VOLTAGE_OUT] = -2;
+
+ if (config & LM25066_DEV_SETUP_CL) {
+ info->m[PSC_CURRENT_IN] = 10753;
+ info->b[PSC_CURRENT_IN] = 0;
+ info->R[PSC_CURRENT_IN] = -2;
+ info->m[PSC_POWER] = 1204;
+ info->b[PSC_POWER] = 0;
+ info->R[PSC_POWER] = -3;
+ } else {
+ info->m[PSC_CURRENT_IN] = 5405;
+ info->b[PSC_CURRENT_IN] = 0;
+ info->R[PSC_CURRENT_IN] = -2;
+ info->m[PSC_POWER] = 605;
+ info->b[PSC_POWER] = 0;
+ info->R[PSC_POWER] = -3;
+ }
+ break;
+ default:
+ ret = -ENODEV;
+ goto err_mem;
+ }
+
+ ret = pmbus_do_probe(client, id, info);
+ if (ret)
+ goto err_mem;
+ return 0;
+
+err_mem:
+ kfree(data);
+ return ret;
+}
+
+static int lm25066_remove(struct i2c_client *client)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ const struct lm25066_data *data = to_lm25066_data(info);
+ int ret;
+
+ ret = pmbus_do_remove(client);
+ kfree(data);
+ return ret;
+}
+
+static const struct i2c_device_id lm25066_id[] = {
+ {"lm25066", lm25066},
+ {"lm5064", lm5064},
+ {"lm5066", lm5066},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, lm25066_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver lm25066_driver = {
+ .driver = {
+ .name = "lm25066",
+ },
+ .probe = lm25066_probe,
+ .remove = lm25066_remove,
+ .id_table = lm25066_id,
+};
+
+static int __init lm25066_init(void)
+{
+ return i2c_add_driver(&lm25066_driver);
+}
+
+static void __exit lm25066_exit(void)
+{
+ i2c_del_driver(&lm25066_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("PMBus driver for LM25066/LM5064/LM5066");
+MODULE_LICENSE("GPL");
+module_init(lm25066_init);
+module_exit(lm25066_exit);
diff --git a/drivers/hwmon/max16064.c b/drivers/hwmon/pmbus/max16064.c
index 1d6d717..e50b296 100644
--- a/drivers/hwmon/max16064.c
+++ b/drivers/hwmon/pmbus/max16064.c
@@ -25,11 +25,60 @@
#include <linux/i2c.h>
#include "pmbus.h"
+#define MAX16064_MFR_VOUT_PEAK 0xd4
+#define MAX16064_MFR_TEMPERATURE_PEAK 0xd6
+
+static int max16064_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_VOUT_MAX:
+ ret = pmbus_read_word_data(client, page,
+ MAX16064_MFR_VOUT_PEAK);
+ break;
+ case PMBUS_VIRT_READ_TEMP_MAX:
+ ret = pmbus_read_word_data(client, page,
+ MAX16064_MFR_TEMPERATURE_PEAK);
+ break;
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ ret = 0;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static int max16064_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ ret = pmbus_write_word_data(client, page,
+ MAX16064_MFR_VOUT_PEAK, 0);
+ break;
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ ret = pmbus_write_word_data(client, page,
+ MAX16064_MFR_TEMPERATURE_PEAK,
+ 0xffff);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
static struct pmbus_driver_info max16064_info = {
.pages = 4,
- .direct[PSC_VOLTAGE_IN] = true,
- .direct[PSC_VOLTAGE_OUT] = true,
- .direct[PSC_TEMPERATURE] = true,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
.m[PSC_VOLTAGE_IN] = 19995,
.b[PSC_VOLTAGE_IN] = 0,
.R[PSC_VOLTAGE_IN] = -1,
@@ -44,6 +93,8 @@ static struct pmbus_driver_info max16064_info = {
.func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
.func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
.func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .read_word_data = max16064_read_word_data,
+ .write_word_data = max16064_write_word_data,
};
static int max16064_probe(struct i2c_client *client,
diff --git a/drivers/hwmon/max34440.c b/drivers/hwmon/pmbus/max34440.c
index db11e1a..fda621d 100644
--- a/drivers/hwmon/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -27,11 +27,70 @@
enum chips { max34440, max34441 };
+#define MAX34440_MFR_VOUT_PEAK 0xd4
+#define MAX34440_MFR_IOUT_PEAK 0xd5
+#define MAX34440_MFR_TEMPERATURE_PEAK 0xd6
+
#define MAX34440_STATUS_OC_WARN (1 << 0)
#define MAX34440_STATUS_OC_FAULT (1 << 1)
#define MAX34440_STATUS_OT_FAULT (1 << 5)
#define MAX34440_STATUS_OT_WARN (1 << 6)
+static int max34440_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_VOUT_MAX:
+ ret = pmbus_read_word_data(client, page,
+ MAX34440_MFR_VOUT_PEAK);
+ break;
+ case PMBUS_VIRT_READ_IOUT_MAX:
+ ret = pmbus_read_word_data(client, page,
+ MAX34440_MFR_IOUT_PEAK);
+ break;
+ case PMBUS_VIRT_READ_TEMP_MAX:
+ ret = pmbus_read_word_data(client, page,
+ MAX34440_MFR_TEMPERATURE_PEAK);
+ break;
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ ret = 0;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static int max34440_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ ret = pmbus_write_word_data(client, page,
+ MAX34440_MFR_VOUT_PEAK, 0);
+ break;
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+ ret = pmbus_write_word_data(client, page,
+ MAX34440_MFR_IOUT_PEAK, 0);
+ break;
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ ret = pmbus_write_word_data(client, page,
+ MAX34440_MFR_TEMPERATURE_PEAK,
+ 0xffff);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
static int max34440_read_byte_data(struct i2c_client *client, int page, int reg)
{
int ret;
@@ -72,10 +131,10 @@ static int max34440_read_byte_data(struct i2c_client *client, int page, int reg)
static struct pmbus_driver_info max34440_info[] = {
[max34440] = {
.pages = 14,
- .direct[PSC_VOLTAGE_IN] = true,
- .direct[PSC_VOLTAGE_OUT] = true,
- .direct[PSC_TEMPERATURE] = true,
- .direct[PSC_CURRENT_OUT] = true,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
.m[PSC_VOLTAGE_IN] = 1,
.b[PSC_VOLTAGE_IN] = 0,
.R[PSC_VOLTAGE_IN] = 3, /* R = 0 in datasheet reflects mV */
@@ -109,14 +168,16 @@ static struct pmbus_driver_info max34440_info[] = {
.func[12] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[13] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.read_byte_data = max34440_read_byte_data,
+ .read_word_data = max34440_read_word_data,
+ .write_word_data = max34440_write_word_data,
},
[max34441] = {
.pages = 12,
- .direct[PSC_VOLTAGE_IN] = true,
- .direct[PSC_VOLTAGE_OUT] = true,
- .direct[PSC_TEMPERATURE] = true,
- .direct[PSC_CURRENT_OUT] = true,
- .direct[PSC_FAN] = true,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_FAN] = direct,
.m[PSC_VOLTAGE_IN] = 1,
.b[PSC_VOLTAGE_IN] = 0,
.R[PSC_VOLTAGE_IN] = 3,
@@ -150,6 +211,8 @@ static struct pmbus_driver_info max34440_info[] = {
.func[10] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[11] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.read_byte_data = max34440_read_byte_data,
+ .read_word_data = max34440_read_word_data,
+ .write_word_data = max34440_write_word_data,
},
};
diff --git a/drivers/hwmon/max8688.c b/drivers/hwmon/pmbus/max8688.c
index 7fb93f4..c3e72f1 100644
--- a/drivers/hwmon/max8688.c
+++ b/drivers/hwmon/pmbus/max8688.c
@@ -25,6 +25,9 @@
#include <linux/i2c.h>
#include "pmbus.h"
+#define MAX8688_MFR_VOUT_PEAK 0xd4
+#define MAX8688_MFR_IOUT_PEAK 0xd5
+#define MAX8688_MFR_TEMPERATURE_PEAK 0xd6
#define MAX8688_MFG_STATUS 0xd8
#define MAX8688_STATUS_OC_FAULT (1 << 4)
@@ -37,6 +40,62 @@
#define MAX8688_STATUS_OT_FAULT (1 << 13)
#define MAX8688_STATUS_OT_WARNING (1 << 14)
+static int max8688_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+
+ if (page)
+ return -EINVAL;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_VOUT_MAX:
+ ret = pmbus_read_word_data(client, 0, MAX8688_MFR_VOUT_PEAK);
+ break;
+ case PMBUS_VIRT_READ_IOUT_MAX:
+ ret = pmbus_read_word_data(client, 0, MAX8688_MFR_IOUT_PEAK);
+ break;
+ case PMBUS_VIRT_READ_TEMP_MAX:
+ ret = pmbus_read_word_data(client, 0,
+ MAX8688_MFR_TEMPERATURE_PEAK);
+ break;
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ ret = 0;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static int max8688_write_word_data(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ ret = pmbus_write_word_data(client, 0, MAX8688_MFR_VOUT_PEAK,
+ 0);
+ break;
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+ ret = pmbus_write_word_data(client, 0, MAX8688_MFR_IOUT_PEAK,
+ 0);
+ break;
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ ret = pmbus_write_word_data(client, 0,
+ MAX8688_MFR_TEMPERATURE_PEAK,
+ 0xffff);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
static int max8688_read_byte_data(struct i2c_client *client, int page, int reg)
{
int ret = 0;
@@ -91,10 +150,10 @@ static int max8688_read_byte_data(struct i2c_client *client, int page, int reg)
static struct pmbus_driver_info max8688_info = {
.pages = 1,
- .direct[PSC_VOLTAGE_IN] = true,
- .direct[PSC_VOLTAGE_OUT] = true,
- .direct[PSC_TEMPERATURE] = true,
- .direct[PSC_CURRENT_OUT] = true,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
.m[PSC_VOLTAGE_IN] = 19995,
.b[PSC_VOLTAGE_IN] = 0,
.R[PSC_VOLTAGE_IN] = -1,
@@ -111,6 +170,8 @@ static struct pmbus_driver_info max8688_info = {
| PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT
| PMBUS_HAVE_STATUS_TEMP,
.read_byte_data = max8688_read_byte_data,
+ .read_word_data = max8688_read_word_data,
+ .write_word_data = max8688_write_word_data,
};
static int max8688_probe(struct i2c_client *client,
diff --git a/drivers/hwmon/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 9b1f0c3..73de9f1 100644
--- a/drivers/hwmon/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -96,6 +96,8 @@ static void pmbus_find_sensor_groups(struct i2c_client *client,
static int pmbus_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
+ int ret = 0;
+
if (!info->pages) {
/*
* Check if the PAGE command is supported. If it is,
@@ -117,6 +119,27 @@ static int pmbus_identify(struct i2c_client *client,
}
}
+ if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
+ int vout_mode;
+
+ vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
+ if (vout_mode >= 0 && vout_mode != 0xff) {
+ switch (vout_mode >> 5) {
+ case 0:
+ break;
+ case 1:
+ info->format[PSC_VOLTAGE_OUT] = vid;
+ break;
+ case 2:
+ info->format[PSC_VOLTAGE_OUT] = direct;
+ break;
+ default:
+ ret = -ENODEV;
+ goto abort;
+ }
+ }
+ }
+
/*
* We should check if the COEFFICIENTS register is supported.
* If it is, and the chip is configured for direct mode, we can read
@@ -125,13 +148,18 @@ static int pmbus_identify(struct i2c_client *client,
*
* To do this, we will need access to a chip which actually supports the
* COEFFICIENTS command, since the command is too complex to implement
- * without testing it.
+ * without testing it. Until then, abort if a chip configured for direct
+ * mode was detected.
*/
+ if (info->format[PSC_VOLTAGE_OUT] == direct) {
+ ret = -ENODEV;
+ goto abort;
+ }
/* Try to find sensor groups */
pmbus_find_sensor_groups(client, info);
-
- return 0;
+abort:
+ return ret;
}
static int pmbus_probe(struct i2c_client *client,
@@ -172,11 +200,14 @@ static int pmbus_remove(struct i2c_client *client)
* Use driver_data to set the number of pages supported by the chip.
*/
static const struct i2c_device_id pmbus_id[] = {
+ {"adp4000", 1},
{"bmr450", 1},
{"bmr451", 1},
{"bmr453", 1},
{"bmr454", 1},
{"ltc2978", 8},
+ {"ncp4200", 1},
+ {"ncp4208", 1},
{"pmbus", 0},
{}
};
diff --git a/drivers/hwmon/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 50647ab..a6ae20f 100644
--- a/drivers/hwmon/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -126,6 +126,42 @@
#define PMBUS_MFR_SERIAL 0x9E
/*
+ * Virtual registers.
+ * Useful to support attributes which are not supported by standard PMBus
+ * registers but exist as manufacturer specific registers on individual chips.
+ * Must be mapped to real registers in device specific code.
+ *
+ * Semantics:
+ * Virtual registers are all word size.
+ * READ registers are read-only; writes are either ignored or return an error.
+ * RESET registers are read/write. Reading returns zero (used for detection),
+ * writing any value causes the associated history to be reset.
+ */
+#define PMBUS_VIRT_BASE 0x100
+#define PMBUS_VIRT_READ_TEMP_MIN (PMBUS_VIRT_BASE + 0)
+#define PMBUS_VIRT_READ_TEMP_MAX (PMBUS_VIRT_BASE + 1)
+#define PMBUS_VIRT_RESET_TEMP_HISTORY (PMBUS_VIRT_BASE + 2)
+#define PMBUS_VIRT_READ_VIN_AVG (PMBUS_VIRT_BASE + 3)
+#define PMBUS_VIRT_READ_VIN_MIN (PMBUS_VIRT_BASE + 4)
+#define PMBUS_VIRT_READ_VIN_MAX (PMBUS_VIRT_BASE + 5)
+#define PMBUS_VIRT_RESET_VIN_HISTORY (PMBUS_VIRT_BASE + 6)
+#define PMBUS_VIRT_READ_IIN_AVG (PMBUS_VIRT_BASE + 7)
+#define PMBUS_VIRT_READ_IIN_MIN (PMBUS_VIRT_BASE + 8)
+#define PMBUS_VIRT_READ_IIN_MAX (PMBUS_VIRT_BASE + 9)
+#define PMBUS_VIRT_RESET_IIN_HISTORY (PMBUS_VIRT_BASE + 10)
+#define PMBUS_VIRT_READ_PIN_AVG (PMBUS_VIRT_BASE + 11)
+#define PMBUS_VIRT_READ_PIN_MAX (PMBUS_VIRT_BASE + 12)
+#define PMBUS_VIRT_RESET_PIN_HISTORY (PMBUS_VIRT_BASE + 13)
+#define PMBUS_VIRT_READ_VOUT_AVG (PMBUS_VIRT_BASE + 14)
+#define PMBUS_VIRT_READ_VOUT_MIN (PMBUS_VIRT_BASE + 15)
+#define PMBUS_VIRT_READ_VOUT_MAX (PMBUS_VIRT_BASE + 16)
+#define PMBUS_VIRT_RESET_VOUT_HISTORY (PMBUS_VIRT_BASE + 17)
+#define PMBUS_VIRT_READ_IOUT_AVG (PMBUS_VIRT_BASE + 18)
+#define PMBUS_VIRT_READ_IOUT_MIN (PMBUS_VIRT_BASE + 19)
+#define PMBUS_VIRT_READ_IOUT_MAX (PMBUS_VIRT_BASE + 20)
+#define PMBUS_VIRT_RESET_IOUT_HISTORY (PMBUS_VIRT_BASE + 21)
+
+/*
* CAPABILITY
*/
#define PB_CAPABILITY_SMBALERT (1<<4)
@@ -266,11 +302,11 @@ enum pmbus_sensor_classes {
#define PMBUS_HAVE_STATUS_FAN12 (1 << 16)
#define PMBUS_HAVE_STATUS_FAN34 (1 << 17)
+enum pmbus_data_format { linear = 0, direct, vid };
+
struct pmbus_driver_info {
int pages; /* Total number of pages */
- bool direct[PSC_NUM_CLASSES];
- /* true if device uses direct data format
- for the given sensor class */
+ enum pmbus_data_format format[PSC_NUM_CLASSES];
/*
* Support one set of coefficients for each sensor type
* Used for chips providing data in direct mode.
@@ -286,6 +322,10 @@ struct pmbus_driver_info {
* necessary.
*/
int (*read_byte_data)(struct i2c_client *client, int page, int reg);
+ int (*read_word_data)(struct i2c_client *client, int page, int reg);
+ int (*write_word_data)(struct i2c_client *client, int page, int reg,
+ u16 word);
+ int (*write_byte)(struct i2c_client *client, int page, u8 value);
/*
* The identify function determines supported PMBus functionality.
* This function is only necessary if a chip driver supports multiple
@@ -299,6 +339,9 @@ struct pmbus_driver_info {
int pmbus_set_page(struct i2c_client *client, u8 page);
int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
+int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word);
+int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg);
+int pmbus_write_byte(struct i2c_client *client, int page, u8 value);
void pmbus_clear_faults(struct i2c_client *client);
bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg);
bool pmbus_check_word_register(struct i2c_client *client, int page, int reg);
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 8e31a8e..a561c3a 100644
--- a/drivers/hwmon/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -33,14 +33,18 @@
/*
* Constants needed to determine number of sensors, booleans, and labels.
*/
-#define PMBUS_MAX_INPUT_SENSORS 11 /* 6*volt, 3*curr, 2*power */
-#define PMBUS_VOUT_SENSORS_PER_PAGE 5 /* input, min, max, lcrit,
- crit */
-#define PMBUS_IOUT_SENSORS_PER_PAGE 4 /* input, min, max, crit */
+#define PMBUS_MAX_INPUT_SENSORS 22 /* 10*volt, 7*curr, 5*power */
+#define PMBUS_VOUT_SENSORS_PER_PAGE 9 /* input, min, max, lcrit,
+ crit, lowest, highest, avg,
+ reset */
+#define PMBUS_IOUT_SENSORS_PER_PAGE 8 /* input, min, max, crit,
+ lowest, highest, avg,
+ reset */
#define PMBUS_POUT_SENSORS_PER_PAGE 4 /* input, cap, max, crit */
#define PMBUS_MAX_SENSORS_PER_FAN 1 /* input */
-#define PMBUS_MAX_SENSORS_PER_TEMP 5 /* input, min, max, lcrit,
- crit */
+#define PMBUS_MAX_SENSORS_PER_TEMP 8 /* input, min, max, lcrit,
+ crit, lowest, highest,
+ reset */
#define PMBUS_MAX_INPUT_BOOLEANS 7 /* v: min_alarm, max_alarm,
lcrit_alarm, crit_alarm;
@@ -74,11 +78,13 @@
#define PB_STATUS_INPUT_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES)
#define PB_STATUS_TEMP_BASE (PB_STATUS_INPUT_BASE + 1)
+#define PMBUS_NAME_SIZE 24
+
struct pmbus_sensor {
- char name[I2C_NAME_SIZE]; /* sysfs sensor name */
+ char name[PMBUS_NAME_SIZE]; /* sysfs sensor name */
struct sensor_device_attribute attribute;
u8 page; /* page number */
- u8 reg; /* register */
+ u16 reg; /* register */
enum pmbus_sensor_classes class; /* sensor class */
bool update; /* runtime sensor update needed */
int data; /* Sensor data.
@@ -86,14 +92,14 @@ struct pmbus_sensor {
};
struct pmbus_boolean {
- char name[I2C_NAME_SIZE]; /* sysfs boolean name */
+ char name[PMBUS_NAME_SIZE]; /* sysfs boolean name */
struct sensor_device_attribute attribute;
};
struct pmbus_label {
- char name[I2C_NAME_SIZE]; /* sysfs label name */
+ char name[PMBUS_NAME_SIZE]; /* sysfs label name */
struct sensor_device_attribute attribute;
- char label[I2C_NAME_SIZE]; /* label */
+ char label[PMBUS_NAME_SIZE]; /* label */
};
struct pmbus_data {
@@ -162,19 +168,39 @@ int pmbus_set_page(struct i2c_client *client, u8 page)
}
EXPORT_SYMBOL_GPL(pmbus_set_page);
-static int pmbus_write_byte(struct i2c_client *client, u8 page, u8 value)
+int pmbus_write_byte(struct i2c_client *client, int page, u8 value)
{
int rv;
- rv = pmbus_set_page(client, page);
- if (rv < 0)
- return rv;
+ if (page >= 0) {
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
+ }
return i2c_smbus_write_byte(client, value);
}
+EXPORT_SYMBOL_GPL(pmbus_write_byte);
+
+/*
+ * _pmbus_write_byte() is similar to pmbus_write_byte(), but checks if
+ * a device specific mapping funcion exists and calls it if necessary.
+ */
+static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ const struct pmbus_driver_info *info = data->info;
+ int status;
+
+ if (info->write_byte) {
+ status = info->write_byte(client, page, value);
+ if (status != -ENODATA)
+ return status;
+ }
+ return pmbus_write_byte(client, page, value);
+}
-static int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg,
- u16 word)
+int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word)
{
int rv;
@@ -184,6 +210,28 @@ static int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg,
return i2c_smbus_write_word_data(client, reg, word);
}
+EXPORT_SYMBOL_GPL(pmbus_write_word_data);
+
+/*
+ * _pmbus_write_word_data() is similar to pmbus_write_word_data(), but checks if
+ * a device specific mapping function exists and calls it if necessary.
+ */
+static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ const struct pmbus_driver_info *info = data->info;
+ int status;
+
+ if (info->write_word_data) {
+ status = info->write_word_data(client, page, reg, word);
+ if (status != -ENODATA)
+ return status;
+ }
+ if (reg >= PMBUS_VIRT_BASE)
+ return -EINVAL;
+ return pmbus_write_word_data(client, page, reg, word);
+}
int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg)
{
@@ -197,20 +245,61 @@ int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg)
}
EXPORT_SYMBOL_GPL(pmbus_read_word_data);
-static int pmbus_read_byte_data(struct i2c_client *client, u8 page, u8 reg)
+/*
+ * _pmbus_read_word_data() is similar to pmbus_read_word_data(), but checks if
+ * a device specific mapping function exists and calls it if necessary.
+ */
+static int _pmbus_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ const struct pmbus_driver_info *info = data->info;
+ int status;
+
+ if (info->read_word_data) {
+ status = info->read_word_data(client, page, reg);
+ if (status != -ENODATA)
+ return status;
+ }
+ if (reg >= PMBUS_VIRT_BASE)
+ return -EINVAL;
+ return pmbus_read_word_data(client, page, reg);
+}
+
+int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg)
{
int rv;
- rv = pmbus_set_page(client, page);
- if (rv < 0)
- return rv;
+ if (page >= 0) {
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
+ }
return i2c_smbus_read_byte_data(client, reg);
}
+EXPORT_SYMBOL_GPL(pmbus_read_byte_data);
+
+/*
+ * _pmbus_read_byte_data() is similar to pmbus_read_byte_data(), but checks if
+ * a device specific mapping function exists and calls it if necessary.
+ */
+static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ const struct pmbus_driver_info *info = data->info;
+ int status;
+
+ if (info->read_byte_data) {
+ status = info->read_byte_data(client, page, reg);
+ if (status != -ENODATA)
+ return status;
+ }
+ return pmbus_read_byte_data(client, page, reg);
+}
static void pmbus_clear_fault_page(struct i2c_client *client, int page)
{
- pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS);
+ _pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS);
}
void pmbus_clear_faults(struct i2c_client *client)
@@ -223,13 +312,13 @@ void pmbus_clear_faults(struct i2c_client *client)
}
EXPORT_SYMBOL_GPL(pmbus_clear_faults);
-static int pmbus_check_status_cml(struct i2c_client *client, int page)
+static int pmbus_check_status_cml(struct i2c_client *client)
{
int status, status2;
- status = pmbus_read_byte_data(client, page, PMBUS_STATUS_BYTE);
+ status = pmbus_read_byte_data(client, -1, PMBUS_STATUS_BYTE);
if (status < 0 || (status & PB_STATUS_CML)) {
- status2 = pmbus_read_byte_data(client, page, PMBUS_STATUS_CML);
+ status2 = pmbus_read_byte_data(client, -1, PMBUS_STATUS_CML);
if (status2 < 0 || (status2 & PB_CML_FAULT_INVALID_COMMAND))
return -EINVAL;
}
@@ -241,10 +330,10 @@ bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg)
int rv;
struct pmbus_data *data = i2c_get_clientdata(client);
- rv = pmbus_read_byte_data(client, page, reg);
+ rv = _pmbus_read_byte_data(client, page, reg);
if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
- rv = pmbus_check_status_cml(client, page);
- pmbus_clear_fault_page(client, page);
+ rv = pmbus_check_status_cml(client);
+ pmbus_clear_fault_page(client, -1);
return rv >= 0;
}
EXPORT_SYMBOL_GPL(pmbus_check_byte_register);
@@ -254,10 +343,10 @@ bool pmbus_check_word_register(struct i2c_client *client, int page, int reg)
int rv;
struct pmbus_data *data = i2c_get_clientdata(client);
- rv = pmbus_read_word_data(client, page, reg);
+ rv = _pmbus_read_word_data(client, page, reg);
if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
- rv = pmbus_check_status_cml(client, page);
- pmbus_clear_fault_page(client, page);
+ rv = pmbus_check_status_cml(client);
+ pmbus_clear_fault_page(client, -1);
return rv >= 0;
}
EXPORT_SYMBOL_GPL(pmbus_check_word_register);
@@ -270,24 +359,6 @@ const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client *client)
}
EXPORT_SYMBOL_GPL(pmbus_get_driver_info);
-/*
- * _pmbus_read_byte_data() is similar to pmbus_read_byte_data(), but checks if
- * a device specific mapping funcion exists and calls it if necessary.
- */
-static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg)
-{
- struct pmbus_data *data = i2c_get_clientdata(client);
- const struct pmbus_driver_info *info = data->info;
- int status;
-
- if (info->read_byte_data) {
- status = info->read_byte_data(client, page, reg);
- if (status != -ENODATA)
- return status;
- }
- return pmbus_read_byte_data(client, page, reg);
-}
-
static struct pmbus_data *pmbus_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -347,8 +418,9 @@ static struct pmbus_data *pmbus_update_device(struct device *dev)
if (!data->valid || sensor->update)
sensor->data
- = pmbus_read_word_data(client, sensor->page,
- sensor->reg);
+ = _pmbus_read_word_data(client,
+ sensor->page,
+ sensor->reg);
}
pmbus_clear_faults(client);
data->last_updated = jiffies;
@@ -443,15 +515,37 @@ static long pmbus_reg2data_direct(struct pmbus_data *data,
return (val - b) / m;
}
+/*
+ * Convert VID sensor values to milli- or micro-units
+ * depending on sensor type.
+ * We currently only support VR11.
+ */
+static long pmbus_reg2data_vid(struct pmbus_data *data,
+ struct pmbus_sensor *sensor)
+{
+ long val = sensor->data;
+
+ if (val < 0x02 || val > 0xb2)
+ return 0;
+ return DIV_ROUND_CLOSEST(160000 - (val - 2) * 625, 100);
+}
+
static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
{
long val;
- if (data->info->direct[sensor->class])
+ switch (data->info->format[sensor->class]) {
+ case direct:
val = pmbus_reg2data_direct(data, sensor);
- else
+ break;
+ case vid:
+ val = pmbus_reg2data_vid(data, sensor);
+ break;
+ case linear:
+ default:
val = pmbus_reg2data_linear(data, sensor);
-
+ break;
+ }
return val;
}
@@ -561,16 +655,31 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
return val;
}
+static u16 pmbus_data2reg_vid(struct pmbus_data *data,
+ enum pmbus_sensor_classes class, long val)
+{
+ val = SENSORS_LIMIT(val, 500, 1600);
+
+ return 2 + DIV_ROUND_CLOSEST((1600 - val) * 100, 625);
+}
+
static u16 pmbus_data2reg(struct pmbus_data *data,
enum pmbus_sensor_classes class, long val)
{
u16 regval;
- if (data->info->direct[class])
+ switch (data->info->format[class]) {
+ case direct:
regval = pmbus_data2reg_direct(data, class, val);
- else
+ break;
+ case vid:
+ regval = pmbus_data2reg_vid(data, class, val);
+ break;
+ case linear:
+ default:
regval = pmbus_data2reg_linear(data, class, val);
-
+ break;
+ }
return regval;
}
@@ -682,7 +791,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
mutex_lock(&data->update_lock);
regval = pmbus_data2reg(data, sensor->class, val);
- ret = pmbus_write_word_data(client, sensor->page, sensor->reg, regval);
+ ret = _pmbus_write_word_data(client, sensor->page, sensor->reg, regval);
if (ret < 0)
rv = ret;
else
@@ -867,7 +976,8 @@ static void pmbus_find_max_attr(struct i2c_client *client,
* and its associated alarm attribute.
*/
struct pmbus_limit_attr {
- u8 reg; /* Limit register */
+ u16 reg; /* Limit register */
+ bool update; /* True if register needs updates */
const char *attr; /* Attribute name */
const char *alarm; /* Alarm attribute name */
u32 sbit; /* Alarm attribute status bit */
@@ -912,9 +1022,10 @@ static bool pmbus_add_limit_attrs(struct i2c_client *client,
if (pmbus_check_word_register(client, page, l->reg)) {
cindex = data->num_sensors;
pmbus_add_sensor(data, name, l->attr, index, page,
- l->reg, attr->class, attr->update,
+ l->reg, attr->class,
+ attr->update || l->update,
false);
- if (info->func[page] & attr->sfunc) {
+ if (l->sbit && (info->func[page] & attr->sfunc)) {
if (attr->compare) {
pmbus_add_boolean_cmp(data, name,
l->alarm, index,
@@ -953,9 +1064,11 @@ static void pmbus_add_sensor_attrs_one(struct i2c_client *client,
index, page, cbase, attr);
/*
* Add generic alarm attribute only if there are no individual
- * alarm attributes, and if there is a global alarm bit.
+ * alarm attributes, if there is a global alarm bit, and if
+ * the generic status register for this page is accessible.
*/
- if (!have_alarm && attr->gbit)
+ if (!have_alarm && attr->gbit &&
+ pmbus_check_byte_register(client, page, PMBUS_STATUS_BYTE))
pmbus_add_boolean_reg(data, name, "alarm", index,
PB_STATUS_BASE + page,
attr->gbit);
@@ -1008,6 +1121,21 @@ static const struct pmbus_limit_attr vin_limit_attrs[] = {
.attr = "crit",
.alarm = "crit_alarm",
.sbit = PB_VOLTAGE_OV_FAULT,
+ }, {
+ .reg = PMBUS_VIRT_READ_VIN_AVG,
+ .update = true,
+ .attr = "average",
+ }, {
+ .reg = PMBUS_VIRT_READ_VIN_MIN,
+ .update = true,
+ .attr = "lowest",
+ }, {
+ .reg = PMBUS_VIRT_READ_VIN_MAX,
+ .update = true,
+ .attr = "highest",
+ }, {
+ .reg = PMBUS_VIRT_RESET_VIN_HISTORY,
+ .attr = "reset_history",
},
};
@@ -1032,6 +1160,21 @@ static const struct pmbus_limit_attr vout_limit_attrs[] = {
.attr = "crit",
.alarm = "crit_alarm",
.sbit = PB_VOLTAGE_OV_FAULT,
+ }, {
+ .reg = PMBUS_VIRT_READ_VOUT_AVG,
+ .update = true,
+ .attr = "average",
+ }, {
+ .reg = PMBUS_VIRT_READ_VOUT_MIN,
+ .update = true,
+ .attr = "lowest",
+ }, {
+ .reg = PMBUS_VIRT_READ_VOUT_MAX,
+ .update = true,
+ .attr = "highest",
+ }, {
+ .reg = PMBUS_VIRT_RESET_VOUT_HISTORY,
+ .attr = "reset_history",
}
};
@@ -1078,6 +1221,21 @@ static const struct pmbus_limit_attr iin_limit_attrs[] = {
.attr = "crit",
.alarm = "crit_alarm",
.sbit = PB_IIN_OC_FAULT,
+ }, {
+ .reg = PMBUS_VIRT_READ_IIN_AVG,
+ .update = true,
+ .attr = "average",
+ }, {
+ .reg = PMBUS_VIRT_READ_IIN_MIN,
+ .update = true,
+ .attr = "lowest",
+ }, {
+ .reg = PMBUS_VIRT_READ_IIN_MAX,
+ .update = true,
+ .attr = "highest",
+ }, {
+ .reg = PMBUS_VIRT_RESET_IIN_HISTORY,
+ .attr = "reset_history",
}
};
@@ -1097,6 +1255,21 @@ static const struct pmbus_limit_attr iout_limit_attrs[] = {
.attr = "crit",
.alarm = "crit_alarm",
.sbit = PB_IOUT_OC_FAULT,
+ }, {
+ .reg = PMBUS_VIRT_READ_IOUT_AVG,
+ .update = true,
+ .attr = "average",
+ }, {
+ .reg = PMBUS_VIRT_READ_IOUT_MIN,
+ .update = true,
+ .attr = "lowest",
+ }, {
+ .reg = PMBUS_VIRT_READ_IOUT_MAX,
+ .update = true,
+ .attr = "highest",
+ }, {
+ .reg = PMBUS_VIRT_RESET_IOUT_HISTORY,
+ .attr = "reset_history",
}
};
@@ -1132,6 +1305,17 @@ static const struct pmbus_limit_attr pin_limit_attrs[] = {
.attr = "max",
.alarm = "alarm",
.sbit = PB_PIN_OP_WARNING,
+ }, {
+ .reg = PMBUS_VIRT_READ_PIN_AVG,
+ .update = true,
+ .attr = "average",
+ }, {
+ .reg = PMBUS_VIRT_READ_PIN_MAX,
+ .update = true,
+ .attr = "input_highest",
+ }, {
+ .reg = PMBUS_VIRT_RESET_PIN_HISTORY,
+ .attr = "reset_history",
}
};
@@ -1200,6 +1384,39 @@ static const struct pmbus_limit_attr temp_limit_attrs[] = {
.attr = "crit",
.alarm = "crit_alarm",
.sbit = PB_TEMP_OT_FAULT,
+ }, {
+ .reg = PMBUS_VIRT_READ_TEMP_MIN,
+ .attr = "lowest",
+ }, {
+ .reg = PMBUS_VIRT_READ_TEMP_MAX,
+ .attr = "highest",
+ }, {
+ .reg = PMBUS_VIRT_RESET_TEMP_HISTORY,
+ .attr = "reset_history",
+ }
+};
+
+static const struct pmbus_limit_attr temp_limit_attrs23[] = {
+ {
+ .reg = PMBUS_UT_WARN_LIMIT,
+ .attr = "min",
+ .alarm = "min_alarm",
+ .sbit = PB_TEMP_UT_WARNING,
+ }, {
+ .reg = PMBUS_UT_FAULT_LIMIT,
+ .attr = "lcrit",
+ .alarm = "lcrit_alarm",
+ .sbit = PB_TEMP_UT_FAULT,
+ }, {
+ .reg = PMBUS_OT_WARN_LIMIT,
+ .attr = "max",
+ .alarm = "max_alarm",
+ .sbit = PB_TEMP_OT_WARNING,
+ }, {
+ .reg = PMBUS_OT_FAULT_LIMIT,
+ .attr = "crit",
+ .alarm = "crit_alarm",
+ .sbit = PB_TEMP_OT_FAULT,
}
};
@@ -1226,8 +1443,8 @@ static const struct pmbus_sensor_attr temp_attributes[] = {
.sfunc = PMBUS_HAVE_STATUS_TEMP,
.sbase = PB_STATUS_TEMP_BASE,
.gbit = PB_STATUS_TEMPERATURE,
- .limit = temp_limit_attrs,
- .nlimit = ARRAY_SIZE(temp_limit_attrs),
+ .limit = temp_limit_attrs23,
+ .nlimit = ARRAY_SIZE(temp_limit_attrs23),
}, {
.reg = PMBUS_READ_TEMPERATURE_3,
.class = PSC_TEMPERATURE,
@@ -1238,8 +1455,8 @@ static const struct pmbus_sensor_attr temp_attributes[] = {
.sfunc = PMBUS_HAVE_STATUS_TEMP,
.sbase = PB_STATUS_TEMP_BASE,
.gbit = PB_STATUS_TEMPERATURE,
- .limit = temp_limit_attrs,
- .nlimit = ARRAY_SIZE(temp_limit_attrs),
+ .limit = temp_limit_attrs23,
+ .nlimit = ARRAY_SIZE(temp_limit_attrs23),
}
};
@@ -1380,7 +1597,7 @@ static int pmbus_identify_common(struct i2c_client *client,
*/
switch (vout_mode >> 5) {
case 0: /* linear mode */
- if (data->info->direct[PSC_VOLTAGE_OUT])
+ if (data->info->format[PSC_VOLTAGE_OUT] != linear)
return -ENODEV;
exponent = vout_mode & 0x1f;
@@ -1389,8 +1606,12 @@ static int pmbus_identify_common(struct i2c_client *client,
exponent |= ~0x1f;
data->exponent = exponent;
break;
+ case 1: /* VID mode */
+ if (data->info->format[PSC_VOLTAGE_OUT] != vid)
+ return -ENODEV;
+ break;
case 2: /* direct mode */
- if (!data->info->direct[PSC_VOLTAGE_OUT])
+ if (data->info->format[PSC_VOLTAGE_OUT] != direct)
return -ENODEV;
break;
default:
@@ -1457,18 +1678,6 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
ret = -EINVAL;
goto out_data;
}
- /*
- * Bail out if more than one page was configured, but we can not
- * select the highest page. This is an indication that the wrong
- * chip type was selected. Better bail out now than keep
- * returning errors later on.
- */
- if (info->pages > 1 && pmbus_set_page(client, info->pages - 1) < 0) {
- dev_err(&client->dev, "Failed to select page %d\n",
- info->pages - 1);
- ret = -EINVAL;
- goto out_data;
- }
ret = pmbus_identify_common(client, data);
if (ret < 0) {
diff --git a/drivers/hwmon/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index ace1c73..d0ddb60 100644
--- a/drivers/hwmon/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -141,13 +141,11 @@ static int ucd9000_probe(struct i2c_client *client,
block_buffer[ret] = '\0';
dev_info(&client->dev, "Device ID %s\n", block_buffer);
- mid = NULL;
- for (i = 0; i < ARRAY_SIZE(ucd9000_id); i++) {
- mid = &ucd9000_id[i];
+ for (mid = ucd9000_id; mid->name[0]; mid++) {
if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
break;
}
- if (!mid || !strlen(mid->name)) {
+ if (!mid->name[0]) {
dev_err(&client->dev, "Unsupported device\n");
return -ENODEV;
}
diff --git a/drivers/hwmon/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c
index ffcc1cf..c65e9da 100644
--- a/drivers/hwmon/ucd9200.c
+++ b/drivers/hwmon/pmbus/ucd9200.c
@@ -68,13 +68,11 @@ static int ucd9200_probe(struct i2c_client *client,
block_buffer[ret] = '\0';
dev_info(&client->dev, "Device ID %s\n", block_buffer);
- mid = NULL;
- for (i = 0; i < ARRAY_SIZE(ucd9200_id); i++) {
- mid = &ucd9200_id[i];
+ for (mid = ucd9200_id; mid->name[0]; mid++) {
if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
break;
}
- if (!mid || !strlen(mid->name)) {
+ if (!mid->name[0]) {
dev_err(&client->dev, "Unsupported device\n");
return -ENODEV;
}
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 7d231cf..fe4104c 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -32,7 +32,7 @@
#include <linux/sht15.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
/* Commands */
#define SHT15_MEASURE_TEMP 0x03
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 8abfa4a..ce1a32b 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -673,32 +673,33 @@ static s32 pch_i2c_xfer(struct i2c_adapter *i2c_adap,
/* transfer not completed */
adap->pch_i2c_xfer_in_progress = true;
- pmsg = &msgs[0];
- pmsg->flags |= adap->pch_buff_mode_en;
- status = pmsg->flags;
- pch_dbg(adap,
- "After invoking I2C_MODE_SEL :flag= 0x%x\n", status);
- /* calculate sub address length and message length */
- /* these are applicable only for buffer mode */
- subaddrlen = pmsg->buf[0];
- /* calculate actual message length excluding
- * the sub address fields */
- msglen = (pmsg->len) - (subaddrlen + 1);
- if (status & (I2C_M_RD)) {
- pch_dbg(adap, "invoking pch_i2c_readbytes\n");
- ret = pch_i2c_readbytes(i2c_adap, pmsg, (i + 1 == num),
- (i == 0));
- } else {
- pch_dbg(adap, "invoking pch_i2c_writebytes\n");
- ret = pch_i2c_writebytes(i2c_adap, pmsg, (i + 1 == num),
- (i == 0));
+ for (i = 0; i < num && ret >= 0; i++) {
+ pmsg = &msgs[i];
+ pmsg->flags |= adap->pch_buff_mode_en;
+ status = pmsg->flags;
+ pch_dbg(adap,
+ "After invoking I2C_MODE_SEL :flag= 0x%x\n", status);
+ /* calculate sub address length and message length */
+ /* these are applicable only for buffer mode */
+ subaddrlen = pmsg->buf[0];
+ /* calculate actual message length excluding
+ * the sub address fields */
+ msglen = (pmsg->len) - (subaddrlen + 1);
+
+ if ((status & (I2C_M_RD)) != false) {
+ ret = pch_i2c_readbytes(i2c_adap, pmsg, (i + 1 == num),
+ (i == 0));
+ } else {
+ ret = pch_i2c_writebytes(i2c_adap, pmsg, (i + 1 == num),
+ (i == 0));
+ }
}
adap->pch_i2c_xfer_in_progress = false; /* transfer completed */
mutex_unlock(&pch_mutex);
- return ret;
+ return (ret < 0) ? ret : num;
}
/**
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 0c731ca..b228e09 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -146,6 +146,7 @@ struct i2c_nmk_client {
* @stop: stop condition
* @xfer_complete: acknowledge completion for a I2C message
* @result: controller propogated result
+ * @regulator: pointer to i2c regulator
* @busy: Busy doing transfer
*/
struct nmk_i2c_dev {
@@ -417,12 +418,12 @@ static int read_i2c(struct nmk_i2c_dev *dev)
writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask,
dev->virtbase + I2C_IMSCR);
- timeout = wait_for_completion_interruptible_timeout(
+ timeout = wait_for_completion_timeout(
&dev->xfer_complete, dev->adap.timeout);
if (timeout < 0) {
dev_err(&dev->pdev->dev,
- "wait_for_completion_interruptible_timeout"
+ "wait_for_completion_timeout"
"returned %d waiting for event\n", timeout);
status = timeout;
}
@@ -504,12 +505,12 @@ static int write_i2c(struct nmk_i2c_dev *dev)
writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask,
dev->virtbase + I2C_IMSCR);
- timeout = wait_for_completion_interruptible_timeout(
+ timeout = wait_for_completion_timeout(
&dev->xfer_complete, dev->adap.timeout);
if (timeout < 0) {
dev_err(&dev->pdev->dev,
- "wait_for_completion_interruptible_timeout"
+ "wait_for_completion_timeout "
"returned %d waiting for event\n", timeout);
status = timeout;
}
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 1a766cf..2dfb631 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1139,41 +1139,12 @@ omap_i2c_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_SUSPEND
-static int omap_i2c_suspend(struct device *dev)
-{
- if (!pm_runtime_suspended(dev))
- if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
- dev->bus->pm->runtime_suspend(dev);
-
- return 0;
-}
-
-static int omap_i2c_resume(struct device *dev)
-{
- if (!pm_runtime_suspended(dev))
- if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
- dev->bus->pm->runtime_resume(dev);
-
- return 0;
-}
-
-static struct dev_pm_ops omap_i2c_pm_ops = {
- .suspend = omap_i2c_suspend,
- .resume = omap_i2c_resume,
-};
-#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
-#else
-#define OMAP_I2C_PM_OPS NULL
-#endif
-
static struct platform_driver omap_i2c_driver = {
.probe = omap_i2c_probe,
.remove = omap_i2c_remove,
.driver = {
.name = "omap_i2c",
.owner = THIS_MODULE,
- .pm = OMAP_I2C_PM_OPS,
},
};
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 6659d26..b73da6c 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -109,12 +109,15 @@ static int __devinit ce4100_i2c_probe(struct pci_dev *dev,
return -EINVAL;
}
sds = kzalloc(sizeof(*sds), GFP_KERNEL);
- if (!sds)
+ if (!sds) {
+ ret = -ENOMEM;
goto err_mem;
+ }
for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {
sds->pdev[i] = add_i2c_device(dev, i);
if (IS_ERR(sds->pdev[i])) {
+ ret = PTR_ERR(sds->pdev[i]);
while (--i >= 0)
platform_device_unregister(sds->pdev[i]);
goto err_dev_add;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index fb3b4f8..3c94c4a 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/i2c-tegra.h>
+#include <linux/of_i2c.h>
#include <asm/unaligned.h>
@@ -269,14 +270,30 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
/* Rounds down to not include partial word at the end of buf */
words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
- if (words_to_transfer > tx_fifo_avail)
- words_to_transfer = tx_fifo_avail;
- i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
-
- buf += words_to_transfer * BYTES_PER_FIFO_WORD;
- buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
- tx_fifo_avail -= words_to_transfer;
+ /* It's very common to have < 4 bytes, so optimize that case. */
+ if (words_to_transfer) {
+ if (words_to_transfer > tx_fifo_avail)
+ words_to_transfer = tx_fifo_avail;
+
+ /*
+ * Update state before writing to FIFO. If this casues us
+ * to finish writing all bytes (AKA buf_remaining goes to 0) we
+ * have a potential for an interrupt (PACKET_XFER_COMPLETE is
+ * not maskable). We need to make sure that the isr sees
+ * buf_remaining as 0 and doesn't call us back re-entrantly.
+ */
+ buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
+ tx_fifo_avail -= words_to_transfer;
+ i2c_dev->msg_buf_remaining = buf_remaining;
+ i2c_dev->msg_buf = buf +
+ words_to_transfer * BYTES_PER_FIFO_WORD;
+ barrier();
+
+ i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+
+ buf += words_to_transfer * BYTES_PER_FIFO_WORD;
+ }
/*
* If there is a partial word at the end of buf, handle it manually to
@@ -286,14 +303,15 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
if (tx_fifo_avail > 0 && buf_remaining > 0) {
BUG_ON(buf_remaining > 3);
memcpy(&val, buf, buf_remaining);
+
+ /* Again update before writing to FIFO to make sure isr sees. */
+ i2c_dev->msg_buf_remaining = 0;
+ i2c_dev->msg_buf = NULL;
+ barrier();
+
i2c_writel(i2c_dev, val, I2C_TX_FIFO);
- buf_remaining = 0;
- tx_fifo_avail--;
}
- BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0);
- i2c_dev->msg_buf_remaining = buf_remaining;
- i2c_dev->msg_buf = buf;
return 0;
}
@@ -410,9 +428,10 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
}
- if ((status & I2C_INT_PACKET_XFER_COMPLETE) &&
- !i2c_dev->msg_buf_remaining)
+ if (status & I2C_INT_PACKET_XFER_COMPLETE) {
+ BUG_ON(i2c_dev->msg_buf_remaining);
complete(&i2c_dev->msg_complete);
+ }
i2c_writel(i2c_dev, status, I2C_INT_STATUS);
if (i2c_dev->is_dvc)
@@ -530,7 +549,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
static u32 tegra_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm tegra_i2c_algo = {
@@ -546,6 +565,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
struct resource *iomem;
struct clk *clk;
struct clk *i2c_clk;
+ const unsigned int *prop;
void *base;
int irq;
int ret = 0;
@@ -603,7 +623,17 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->irq = irq;
i2c_dev->cont_id = pdev->id;
i2c_dev->dev = &pdev->dev;
- i2c_dev->bus_clk_rate = pdata ? pdata->bus_clk_rate : 100000;
+
+ i2c_dev->bus_clk_rate = 100000; /* default clock rate */
+ if (pdata) {
+ i2c_dev->bus_clk_rate = pdata->bus_clk_rate;
+
+ } else if (i2c_dev->dev->of_node) { /* if there is a device tree node ... */
+ prop = of_get_property(i2c_dev->dev->of_node,
+ "clock-frequency", NULL);
+ if (prop)
+ i2c_dev->bus_clk_rate = be32_to_cpup(prop);
+ }
if (pdev->id == 3)
i2c_dev->is_dvc = 1;
@@ -633,6 +663,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->adapter.algo = &tegra_i2c_algo;
i2c_dev->adapter.dev.parent = &pdev->dev;
i2c_dev->adapter.nr = pdev->id;
+ i2c_dev->adapter.dev.of_node = pdev->dev.of_node;
ret = i2c_add_numbered_adapter(&i2c_dev->adapter);
if (ret) {
@@ -640,6 +671,8 @@ static int tegra_i2c_probe(struct platform_device *pdev)
goto err_free_irq;
}
+ of_i2c_register_devices(&i2c_dev->adapter);
+
return 0;
err_free_irq:
free_irq(i2c_dev->irq, i2c_dev);
@@ -704,6 +737,17 @@ static int tegra_i2c_resume(struct platform_device *pdev)
}
#endif
+#if defined(CONFIG_OF)
+/* Match table for of_platform binding */
+static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
+ { .compatible = "nvidia,tegra20-i2c", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
+#else
+#define tegra_i2c_of_match NULL
+#endif
+
static struct platform_driver tegra_i2c_driver = {
.probe = tegra_i2c_probe,
.remove = tegra_i2c_remove,
@@ -714,6 +758,7 @@ static struct platform_driver tegra_i2c_driver = {
.driver = {
.name = "tegra-i2c",
.owner = THIS_MODULE,
+ .of_match_table = tegra_i2c_of_match,
},
};
diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
index 3be60da..67cbcfa 100644
--- a/drivers/ide/cy82c693.c
+++ b/drivers/ide/cy82c693.c
@@ -141,6 +141,8 @@ static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16);
pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8);
}
+ if (hwif->index > 0)
+ pci_dev_put(dev);
}
static void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c
index 542603b..962693b 100644
--- a/drivers/ide/ide_platform.c
+++ b/drivers/ide/ide_platform.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/ata_platform.h>
#include <linux/platform_device.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
static void __devinit plat_ide_setup_ports(struct ide_hw *hw,
@@ -95,7 +96,10 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
hw.dev = &pdev->dev;
- d.irq_flags = res_irq->flags;
+ d.irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
+ if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
+ d.irq_flags |= IRQF_SHARED;
+
if (mmio)
d.host_flags |= IDE_HFLAG_MMIO;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 0347eed..40c8353 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -31,7 +31,7 @@
*/
#include <rdma/ib_umem.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "iw_cxgb4.h"
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h
index f09914c..54c0d23 100644
--- a/drivers/infiniband/hw/ehca/ehca_tools.h
+++ b/drivers/infiniband/hw/ehca/ehca_tools.h
@@ -58,7 +58,7 @@
#include <linux/cpu.h>
#include <linux/device.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/abs_addr.h>
#include <asm/ibmebus.h>
#include <asm/io.h>
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 73bc184..c118663 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -34,7 +34,7 @@
#define TCPOPT_TIMESTAMP 8
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/tcp.h>
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 7b6985a..b3cc1e0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -45,7 +45,7 @@
#include <net/neighbour.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 43f89ba..fe89c46 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -717,11 +717,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_neigh *neigh;
- struct neighbour *n;
+ struct neighbour *n = NULL;
unsigned long flags;
- n = dst_get_neighbour(skb_dst(skb));
- if (likely(skb_dst(skb) && n)) {
+ if (likely(skb_dst(skb)))
+ n = dst_get_neighbour(skb_dst(skb));
+
+ if (likely(n)) {
if (unlikely(!*to_ipoib_neigh(n))) {
ipoib_path_lookup(skb, dev);
return NETDEV_TX_OK;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 8db008d..9c61b9c 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -101,13 +101,17 @@ iscsi_iser_recv(struct iscsi_conn *conn,
/* verify PDU length */
datalen = ntoh24(hdr->dlength);
- if (datalen != rx_data_len) {
- printk(KERN_ERR "iscsi_iser: datalen %d (hdr) != %d (IB) \n",
- datalen, rx_data_len);
+ if (datalen > rx_data_len || (datalen + 4) < rx_data_len) {
+ iser_err("wrong datalen %d (hdr), %d (IB)\n",
+ datalen, rx_data_len);
rc = ISCSI_ERR_DATALEN;
goto error;
}
+ if (datalen != rx_data_len)
+ iser_dbg("aligned datalen (%d) hdr, %d (IB)\n",
+ datalen, rx_data_len);
+
/* read AHS */
ahslen = hdr->hlength * 4;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 342cbc1..db6f3ce 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -89,7 +89,7 @@
} while (0)
#define SHIFT_4K 12
-#define SIZE_4K (1UL << SHIFT_4K)
+#define SIZE_4K (1ULL << SHIFT_4K)
#define MASK_4K (~(SIZE_4K-1))
/* support up to 512KB in one RDMA */
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 95a08a8..f299de6 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -271,7 +271,7 @@ int iser_send_command(struct iscsi_conn *conn,
unsigned long edtl;
int err;
struct iser_data_buf *data_buf;
- struct iscsi_cmd *hdr = (struct iscsi_cmd *)task->hdr;
+ struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
struct scsi_cmnd *sc = task->sc;
struct iser_tx_desc *tx_desc = &iser_task->desc;
@@ -412,7 +412,7 @@ int iser_send_control(struct iscsi_conn *conn,
memcpy(iser_conn->ib_conn->login_buf, task->data,
task->data_count);
tx_dsg->addr = iser_conn->ib_conn->login_dma;
- tx_dsg->length = data_seg_len;
+ tx_dsg->length = task->data_count;
tx_dsg->lkey = device->mr->lkey;
mdesc->num_sge = 2;
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 7d5109b..0bfa545 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -39,7 +39,7 @@
#include <linux/random.h>
#include <linux/jiffies.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 9882971..358cd7e 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -139,7 +139,7 @@ struct analog_port {
#include <linux/i8253.h>
#define GET_TIME(x) do { if (cpu_has_tsc) rdtscl(x); else x = get_time_pit(); } while (0)
-#define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? CLOCK_TICK_RATE / HZ : 0)))
+#define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
#define TIME_NAME (cpu_has_tsc?"TSC":"PIT")
static unsigned int get_time_pit(void)
{
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 56abf3d..d728875 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -154,10 +154,13 @@ static const struct xpad_device {
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
{ 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+ { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 },
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
{ 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
@@ -236,9 +239,10 @@ static struct usb_device_id xpad_table [] = {
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
+ XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
- XPAD_XBOX360_VENDOR(0x1bad), /* Rock Band Drums */
+ XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
{ }
};
@@ -545,7 +549,7 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
struct usb_endpoint_descriptor *ep_irq_out;
int error;
- if (xpad->xtype != XTYPE_XBOX360 && xpad->xtype != XTYPE_XBOX)
+ if (xpad->xtype == XTYPE_UNKNOWN)
return 0;
xpad->odata = usb_alloc_coherent(xpad->udev, XPAD_PKT_LEN,
@@ -579,13 +583,13 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
static void xpad_stop_output(struct usb_xpad *xpad)
{
- if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX)
+ if (xpad->xtype != XTYPE_UNKNOWN)
usb_kill_urb(xpad->irq_out);
}
static void xpad_deinit_output(struct usb_xpad *xpad)
{
- if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX) {
+ if (xpad->xtype != XTYPE_UNKNOWN) {
usb_free_urb(xpad->irq_out);
usb_free_coherent(xpad->udev, XPAD_PKT_LEN,
xpad->odata, xpad->odata_dma);
@@ -632,6 +636,23 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
return usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
+ case XTYPE_XBOX360W:
+ xpad->odata[0] = 0x00;
+ xpad->odata[1] = 0x01;
+ xpad->odata[2] = 0x0F;
+ xpad->odata[3] = 0xC0;
+ xpad->odata[4] = 0x00;
+ xpad->odata[5] = strong / 256;
+ xpad->odata[6] = weak / 256;
+ xpad->odata[7] = 0x00;
+ xpad->odata[8] = 0x00;
+ xpad->odata[9] = 0x00;
+ xpad->odata[10] = 0x00;
+ xpad->odata[11] = 0x00;
+ xpad->irq_out->transfer_buffer_length = 12;
+
+ return usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
+
default:
dbg("%s - rumble command sent to unsupported xpad type: %d",
__func__, xpad->xtype);
@@ -644,7 +665,7 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
static int xpad_init_ff(struct usb_xpad *xpad)
{
- if (xpad->xtype != XTYPE_XBOX360 && xpad->xtype != XTYPE_XBOX)
+ if (xpad->xtype == XTYPE_UNKNOWN)
return 0;
input_set_capability(xpad->dev, EV_FF, FF_RUMBLE);
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index af45d27..7b404e5 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -9,7 +9,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 6315986..c770826 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -8,7 +8,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 11478eb..19cfc0c 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -1578,14 +1578,14 @@ static int __init atkbd_setup_forced_release(const struct dmi_system_id *id)
atkbd_platform_fixup = atkbd_apply_forced_release_keylist;
atkbd_platform_fixup_data = id->driver_data;
- return 0;
+ return 1;
}
static int __init atkbd_setup_scancode_fixup(const struct dmi_system_id *id)
{
atkbd_platform_scancode_fixup = id->driver_data;
- return 0;
+ return 1;
}
static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index c8242dd..aa17e02 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -20,6 +20,7 @@
* flag.
*/
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 6e6145b..67df91a 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -2,6 +2,7 @@
* Driver for keys on GPIO lines capable of generating interrupts.
*
* Copyright 2005 Phil Blundell
+ * Copyright 2010, 2011 David Jander <david@protonic.nl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -25,6 +26,8 @@
#include <linux/gpio_keys.h>
#include <linux/workqueue.h>
#include <linux/gpio.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
struct gpio_button_data {
struct gpio_keys_button *button;
@@ -415,7 +418,7 @@ static int __devinit gpio_keys_setup_key(struct platform_device *pdev,
if (!button->can_disable)
irqflags |= IRQF_SHARED;
- error = request_any_context_irq(irq, gpio_keys_isr, irqflags, desc, bdata);
+ error = request_threaded_irq(irq, NULL, gpio_keys_isr, irqflags, desc, bdata);
if (error < 0) {
dev_err(dev, "Unable to claim irq %d; error %d\n",
irq, error);
@@ -445,15 +448,120 @@ static void gpio_keys_close(struct input_dev *input)
ddata->disable(input->dev.parent);
}
+/*
+ * Handlers for alternative sources of platform_data
+ */
+#ifdef CONFIG_OF
+/*
+ * Translate OpenFirmware node properties into platform_data
+ */
+static int gpio_keys_get_devtree_pdata(struct device *dev,
+ struct gpio_keys_platform_data *pdata)
+{
+ struct device_node *node, *pp;
+ int i;
+ struct gpio_keys_button *buttons;
+ const u32 *reg;
+ int len;
+
+ node = dev->of_node;
+ if (node == NULL)
+ return -ENODEV;
+
+ memset(pdata, 0, sizeof *pdata);
+
+ pdata->rep = !!of_get_property(node, "autorepeat", &len);
+
+ /* First count the subnodes */
+ pdata->nbuttons = 0;
+ pp = NULL;
+ while ((pp = of_get_next_child(node, pp)))
+ pdata->nbuttons++;
+
+ if (pdata->nbuttons == 0)
+ return -ENODEV;
+
+ buttons = kzalloc(pdata->nbuttons * (sizeof *buttons), GFP_KERNEL);
+ if (!buttons)
+ return -ENOMEM;
+
+ pp = NULL;
+ i = 0;
+ while ((pp = of_get_next_child(node, pp))) {
+ enum of_gpio_flags flags;
+
+ if (!of_find_property(pp, "gpios", NULL)) {
+ pdata->nbuttons--;
+ dev_warn(dev, "Found button without gpios\n");
+ continue;
+ }
+ buttons[i].gpio = of_get_gpio_flags(pp, 0, &flags);
+ buttons[i].active_low = flags & OF_GPIO_ACTIVE_LOW;
+
+ reg = of_get_property(pp, "linux,code", &len);
+ if (!reg) {
+ dev_err(dev, "Button without keycode: 0x%x\n", buttons[i].gpio);
+ goto out_fail;
+ }
+ buttons[i].code = be32_to_cpup(reg);
+
+ buttons[i].desc = of_get_property(pp, "label", &len);
+
+ reg = of_get_property(pp, "linux,input-type", &len);
+ buttons[i].type = reg ? be32_to_cpup(reg) : EV_KEY;
+
+ buttons[i].wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
+
+ reg = of_get_property(pp, "debounce-interval", &len);
+ buttons[i].debounce_interval = reg ? be32_to_cpup(reg) : 5;
+
+ i++;
+ }
+
+ pdata->buttons = buttons;
+
+ return 0;
+
+out_fail:
+ kfree(buttons);
+ return -ENODEV;
+}
+
+static struct of_device_id gpio_keys_of_match[] = {
+ { .compatible = "gpio-keys", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, gpio_keys_of_match);
+
+#else
+
+static int gpio_keys_get_devtree_pdata(struct device *dev,
+ struct gpio_keys_platform_data *altp)
+{
+ return -ENODEV;
+}
+
+#define gpio_keys_of_match NULL
+
+#endif
+
static int __devinit gpio_keys_probe(struct platform_device *pdev)
{
struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
struct gpio_keys_drvdata *ddata;
struct device *dev = &pdev->dev;
+ struct gpio_keys_platform_data alt_pdata;
struct input_dev *input;
int i, error;
int wakeup = 0;
+ if (!pdata) {
+ error = gpio_keys_get_devtree_pdata(dev, &alt_pdata);
+ if (error)
+ return error;
+ pdata = &alt_pdata;
+ }
+
ddata = kzalloc(sizeof(struct gpio_keys_drvdata) +
pdata->nbuttons * sizeof(struct gpio_button_data),
GFP_KERNEL);
@@ -544,13 +652,15 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
fail1:
input_free_device(input);
kfree(ddata);
+ /* If we have no platform_data, we allocated buttons dynamically. */
+ if (!pdev->dev.platform_data)
+ kfree(pdata->buttons);
return error;
}
static int __devexit gpio_keys_remove(struct platform_device *pdev)
{
- struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
struct gpio_keys_drvdata *ddata = platform_get_drvdata(pdev);
struct input_dev *input = ddata->input;
int i;
@@ -559,31 +669,39 @@ static int __devexit gpio_keys_remove(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 0);
- for (i = 0; i < pdata->nbuttons; i++) {
- int irq = gpio_to_irq(pdata->buttons[i].gpio);
+ for (i = 0; i < ddata->n_buttons; i++) {
+ int irq = gpio_to_irq(ddata->data[i].button->gpio);
free_irq(irq, &ddata->data[i]);
if (ddata->data[i].timer_debounce)
del_timer_sync(&ddata->data[i].timer);
cancel_work_sync(&ddata->data[i].work);
- gpio_free(pdata->buttons[i].gpio);
+ gpio_free(ddata->data[i].button->gpio);
}
input_unregister_device(input);
+ /*
+ * If we had no platform_data, we allocated buttons dynamically, and
+ * must free them here. ddata->data[0].button is the pointer to the
+ * beginning of the allocated array.
+ */
+ if (!pdev->dev.platform_data)
+ kfree(ddata->data[0].button);
+
+ kfree(ddata);
+
return 0;
}
-
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int gpio_keys_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
int i;
- if (device_may_wakeup(&pdev->dev)) {
- for (i = 0; i < pdata->nbuttons; i++) {
- struct gpio_keys_button *button = &pdata->buttons[i];
+ if (device_may_wakeup(dev)) {
+ for (i = 0; i < ddata->n_buttons; i++) {
+ struct gpio_keys_button *button = ddata->data[i].button;
if (button->wakeup) {
int irq = gpio_to_irq(button->gpio);
enable_irq_wake(irq);
@@ -596,15 +714,13 @@ static int gpio_keys_suspend(struct device *dev)
static int gpio_keys_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct gpio_keys_drvdata *ddata = platform_get_drvdata(pdev);
- struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
int i;
- for (i = 0; i < pdata->nbuttons; i++) {
+ for (i = 0; i < ddata->n_buttons; i++) {
- struct gpio_keys_button *button = &pdata->buttons[i];
- if (button->wakeup && device_may_wakeup(&pdev->dev)) {
+ struct gpio_keys_button *button = ddata->data[i].button;
+ if (button->wakeup && device_may_wakeup(dev)) {
int irq = gpio_to_irq(button->gpio);
disable_irq_wake(irq);
}
@@ -615,22 +731,18 @@ static int gpio_keys_resume(struct device *dev)
return 0;
}
-
-static const struct dev_pm_ops gpio_keys_pm_ops = {
- .suspend = gpio_keys_suspend,
- .resume = gpio_keys_resume,
-};
#endif
+static SIMPLE_DEV_PM_OPS(gpio_keys_pm_ops, gpio_keys_suspend, gpio_keys_resume);
+
static struct platform_driver gpio_keys_device_driver = {
.probe = gpio_keys_probe,
.remove = __devexit_p(gpio_keys_remove),
.driver = {
.name = "gpio-keys",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &gpio_keys_pm_ops,
-#endif
+ .of_match_table = gpio_keys_of_match,
}
};
@@ -644,10 +756,10 @@ static void __exit gpio_keys_exit(void)
platform_driver_unregister(&gpio_keys_device_driver);
}
-module_init(gpio_keys_init);
+late_initcall(gpio_keys_init);
module_exit(gpio_keys_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Phil Blundell <pb@handhelds.org>");
-MODULE_DESCRIPTION("Keyboard driver for CPU GPIOs");
+MODULE_DESCRIPTION("Keyboard driver for GPIOs");
MODULE_ALIAS("platform:gpio-keys");
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 71f744a8..756348a 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -146,7 +146,6 @@ struct lm8323_chip {
/* device lock */
struct mutex lock;
struct i2c_client *client;
- struct work_struct work;
struct input_dev *idev;
bool kp_enabled;
bool pm_suspend;
@@ -162,7 +161,6 @@ struct lm8323_chip {
#define client_to_lm8323(c) container_of(c, struct lm8323_chip, client)
#define dev_to_lm8323(d) container_of(d, struct lm8323_chip, client->dev)
-#define work_to_lm8323(w) container_of(w, struct lm8323_chip, work)
#define cdev_to_pwm(c) container_of(c, struct lm8323_pwm, cdev)
#define work_to_pwm(w) container_of(w, struct lm8323_pwm, work)
@@ -375,9 +373,9 @@ static void pwm_done(struct lm8323_pwm *pwm)
* Bottom half: handle the interrupt by posting key events, or dealing with
* errors appropriately.
*/
-static void lm8323_work(struct work_struct *work)
+static irqreturn_t lm8323_irq(int irq, void *_lm)
{
- struct lm8323_chip *lm = work_to_lm8323(work);
+ struct lm8323_chip *lm = _lm;
u8 ints;
int i;
@@ -409,16 +407,6 @@ static void lm8323_work(struct work_struct *work)
}
mutex_unlock(&lm->lock);
-}
-
-/*
- * We cannot use I2C in interrupt context, so we just schedule work.
- */
-static irqreturn_t lm8323_irq(int irq, void *data)
-{
- struct lm8323_chip *lm = data;
-
- schedule_work(&lm->work);
return IRQ_HANDLED;
}
@@ -675,7 +663,6 @@ static int __devinit lm8323_probe(struct i2c_client *client,
lm->client = client;
lm->idev = idev;
mutex_init(&lm->lock);
- INIT_WORK(&lm->work, lm8323_work);
lm->size_x = pdata->size_x;
lm->size_y = pdata->size_y;
@@ -746,9 +733,8 @@ static int __devinit lm8323_probe(struct i2c_client *client,
goto fail3;
}
- err = request_irq(client->irq, lm8323_irq,
- IRQF_TRIGGER_FALLING | IRQF_DISABLED,
- "lm8323", lm);
+ err = request_threaded_irq(client->irq, NULL, lm8323_irq,
+ IRQF_TRIGGER_LOW|IRQF_ONESHOT, "lm8323", lm);
if (err) {
dev_err(&client->dev, "could not get IRQ %d\n", client->irq);
goto fail4;
@@ -768,8 +754,11 @@ fail3:
device_remove_file(&client->dev, &dev_attr_disable_kp);
fail2:
while (--pwm >= 0)
- if (lm->pwm[pwm].enabled)
+ if (lm->pwm[pwm].enabled) {
+ device_remove_file(lm->pwm[pwm].cdev.dev,
+ &dev_attr_time);
led_classdev_unregister(&lm->pwm[pwm].cdev);
+ }
fail1:
input_free_device(idev);
kfree(lm);
@@ -783,15 +772,16 @@ static int __devexit lm8323_remove(struct i2c_client *client)
disable_irq_wake(client->irq);
free_irq(client->irq, lm);
- cancel_work_sync(&lm->work);
input_unregister_device(lm->idev);
device_remove_file(&lm->client->dev, &dev_attr_disable_kp);
for (i = 0; i < 3; i++)
- if (lm->pwm[i].enabled)
+ if (lm->pwm[i].enabled) {
+ device_remove_file(lm->pwm[i].cdev.dev, &dev_attr_time);
led_classdev_unregister(&lm->pwm[i].cdev);
+ }
kfree(lm);
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index 0a9e811..1c1615d 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -43,14 +43,15 @@
* enabled capacitance sensing inputs and its run/suspend mode.
*/
#define ELECTRODE_CONF_ADDR 0x5e
+#define ELECTRODE_CONF_QUICK_CHARGE 0x80
#define AUTO_CONFIG_CTRL_ADDR 0x7b
#define AUTO_CONFIG_USL_ADDR 0x7d
#define AUTO_CONFIG_LSL_ADDR 0x7e
#define AUTO_CONFIG_TL_ADDR 0x7f
/* Threshold of touch/release trigger */
-#define TOUCH_THRESHOLD 0x0f
-#define RELEASE_THRESHOLD 0x0a
+#define TOUCH_THRESHOLD 0x08
+#define RELEASE_THRESHOLD 0x05
/* Masks for touch and release triggers */
#define TOUCH_STATUS_MASK 0xfff
/* MPR121 has 12 keys */
@@ -127,7 +128,7 @@ static int __devinit mpr121_phys_init(const struct mpr121_platform_data *pdata,
struct i2c_client *client)
{
const struct mpr121_init_register *reg;
- unsigned char usl, lsl, tl;
+ unsigned char usl, lsl, tl, eleconf;
int i, t, vdd, ret;
/* Set up touch/release threshold for ele0-ele11 */
@@ -163,8 +164,15 @@ static int __devinit mpr121_phys_init(const struct mpr121_platform_data *pdata,
ret = i2c_smbus_write_byte_data(client, AUTO_CONFIG_USL_ADDR, usl);
ret |= i2c_smbus_write_byte_data(client, AUTO_CONFIG_LSL_ADDR, lsl);
ret |= i2c_smbus_write_byte_data(client, AUTO_CONFIG_TL_ADDR, tl);
+
+ /*
+ * Quick charge bit will let the capacitive charge to ready
+ * state quickly, or the buttons may not function after system
+ * boot.
+ */
+ eleconf = mpr121->keycount | ELECTRODE_CONF_QUICK_CHARGE;
ret |= i2c_smbus_write_byte_data(client, ELECTRODE_CONF_ADDR,
- mpr121->keycount);
+ eleconf);
if (ret != 0)
goto err_i2c_write;
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index 6229c3e..e7cc51d 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -700,9 +700,9 @@ static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev)
return 0;
err_pmic_reg_read:
- free_irq(kp->key_stuck_irq, NULL);
+ free_irq(kp->key_stuck_irq, kp);
err_req_stuck_irq:
- free_irq(kp->key_sense_irq, NULL);
+ free_irq(kp->key_sense_irq, kp);
err_gpio_config:
err_get_irq:
input_free_device(kp->input);
@@ -717,8 +717,8 @@ static int __devexit pmic8xxx_kp_remove(struct platform_device *pdev)
struct pmic8xxx_kp *kp = platform_get_drvdata(pdev);
device_init_wakeup(&pdev->dev, 0);
- free_irq(kp->key_stuck_irq, NULL);
- free_irq(kp->key_sense_irq, NULL);
+ free_irq(kp->key_stuck_irq, kp);
+ free_irq(kp->key_sense_irq, kp);
input_unregister_device(kp->input);
kfree(kp);
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index ca7b891..b21bf5b 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -239,8 +239,6 @@ static int __devexit qt1070_remove(struct i2c_client *client)
input_unregister_device(data->input);
kfree(data);
- i2c_set_clientdata(client, NULL);
-
return 0;
}
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 6876700..934aeb583 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -291,7 +291,7 @@ static int __devexit sh_keysc_remove(struct platform_device *pdev)
return 0;
}
-#if CONFIG_PM_SLEEP
+#ifdef CONFIG_PM_SLEEP
static int sh_keysc_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 2b3b73e..a5a7791 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -19,6 +19,7 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/platform_device.h>
@@ -37,7 +38,7 @@
#define KBC_ROW_SCAN_DLY 5
/* KBC uses a 32KHz clock so a cycle = 1/32Khz */
-#define KBC_CYCLE_USEC 32
+#define KBC_CYCLE_MS 32
/* KBC Registers */
@@ -647,7 +648,7 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
scan_time_rows = (KBC_ROW_SCAN_TIME + debounce_cnt) * num_rows;
kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + pdata->repeat_cnt;
- kbc->repoll_dly = ((kbc->repoll_dly * KBC_CYCLE_USEC) + 999) / 1000;
+ kbc->repoll_dly = DIV_ROUND_UP(kbc->repoll_dly, KBC_CYCLE_MS);
input_dev->name = pdev->name;
input_dev->id.bustype = BUS_HOST;
@@ -657,7 +658,7 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
input_set_drvdata(input_dev, kbc);
- input_dev->evbit[0] = BIT_MASK(EV_KEY);
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
input_dev->keycode = kbc->keycode;
@@ -701,7 +702,7 @@ err_iounmap:
err_free_mem_region:
release_mem_region(res->start, resource_size(res));
err_free_mem:
- input_free_device(kbc->idev);
+ input_free_device(input_dev);
kfree(kbc);
return err;
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index c8f097a..1c58681 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -337,5 +337,5 @@ module_exit(keypad_exit);
MODULE_AUTHOR("Cyril Chemparathy");
MODULE_DESCRIPTION("TNETV107X Keypad Driver");
-MODULE_ALIAS("platform: tnetv107x-keypad");
+MODULE_ALIAS("platform:tnetv107x-keypad");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 45dc6aa..c9104bb 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -100,6 +100,27 @@ config INPUT_MAX8925_ONKEY
To compile this driver as a module, choose M here: the module
will be called max8925_onkey.
+config INPUT_MMA8450
+ tristate "MMA8450 - Freescale's 3-Axis, 8/12-bit Digital Accelerometer"
+ depends on I2C
+ select INPUT_POLLDEV
+ help
+ Say Y here if you want to support Freescale's MMA8450 Accelerometer
+ through I2C interface.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mma8450.
+
+config INPUT_MPU3050
+ tristate "MPU3050 Triaxial gyroscope sensor"
+ depends on I2C
+ help
+ Say Y here if you want to support InvenSense MPU3050
+ connected via an I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mpu3050.
+
config INPUT_APANEL
tristate "Fujitsu Lifebook Application Panel buttons"
depends on X86 && I2C && LEDS_CLASS
@@ -209,6 +230,23 @@ config INPUT_KEYSPAN_REMOTE
To compile this driver as a module, choose M here: the module will
be called keyspan_remote.
+config INPUT_KXTJ9
+ tristate "Kionix KXTJ9 tri-axis digital accelerometer"
+ depends on I2C
+ help
+ Say Y here to enable support for the Kionix KXTJ9 digital tri-axis
+ accelerometer.
+
+ To compile this driver as a module, choose M here: the module will
+ be called kxtj9.
+
+config INPUT_KXTJ9_POLLED_MODE
+ bool "Enable polling mode support"
+ depends on INPUT_KXTJ9
+ select INPUT_POLLDEV
+ help
+ Say Y here if you need accelerometer to work in polling mode.
+
config INPUT_POWERMATE
tristate "Griffin PowerMate and Contour Jog support"
depends on USB_ARCH_HAS_HCD
@@ -267,7 +305,7 @@ config INPUT_TWL4030_PWRBUTTON
config INPUT_TWL4030_VIBRA
tristate "Support for TWL4030 Vibrator"
depends on TWL4030_CORE
- select TWL4030_CODEC
+ select MFD_TWL4030_AUDIO
select INPUT_FF_MEMLESS
help
This option enables support for TWL4030 Vibrator Driver.
@@ -275,6 +313,17 @@ config INPUT_TWL4030_VIBRA
To compile this driver as a module, choose M here. The module will
be called twl4030_vibra.
+config INPUT_TWL6040_VIBRA
+ tristate "Support for TWL6040 Vibrator"
+ depends on TWL4030_CORE
+ select TWL6040_CORE
+ select INPUT_FF_MEMLESS
+ help
+ This option enables support for TWL6040 Vibrator Driver.
+
+ To compile this driver as a module, choose M here. The module will
+ be called twl6040_vibra.
+
config INPUT_UINPUT
tristate "User level driver support"
help
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 38efb2c..299ad5e 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -25,8 +25,11 @@ obj-$(CONFIG_INPUT_DM355EVM) += dm355evm_keys.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
+obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
+obj-$(CONFIG_INPUT_MMA8450) += mma8450.o
+obj-$(CONFIG_INPUT_MPU3050) += mpu3050.o
obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o
obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o
@@ -40,9 +43,9 @@ obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o
obj-$(CONFIG_INPUT_TWL4030_PWRBUTTON) += twl4030-pwrbutton.o
obj-$(CONFIG_INPUT_TWL4030_VIBRA) += twl4030-vibra.o
+obj-$(CONFIG_INPUT_TWL6040_VIBRA) += twl6040-vibra.o
obj-$(CONFIG_INPUT_UINPUT) += uinput.o
obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o
obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o
obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o
obj-$(CONFIG_INPUT_YEALINK) += yealink.o
-
diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c
index e21deb1..025417d 100644
--- a/drivers/input/misc/ad714x-i2c.c
+++ b/drivers/input/misc/ad714x-i2c.c
@@ -1,7 +1,7 @@
/*
* AD714X CapTouch Programmable Controller driver (I2C bus)
*
- * Copyright 2009 Analog Devices Inc.
+ * Copyright 2009-2011 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -27,54 +27,49 @@ static int ad714x_i2c_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(ad714x_i2c_pm, ad714x_i2c_suspend, ad714x_i2c_resume);
-static int ad714x_i2c_write(struct device *dev, unsigned short reg,
- unsigned short data)
+static int ad714x_i2c_write(struct ad714x_chip *chip,
+ unsigned short reg, unsigned short data)
{
- struct i2c_client *client = to_i2c_client(dev);
- int ret = 0;
- u8 *_reg = (u8 *)&reg;
- u8 *_data = (u8 *)&data;
-
- u8 tx[4] = {
- _reg[1],
- _reg[0],
- _data[1],
- _data[0]
- };
-
- ret = i2c_master_send(client, tx, 4);
- if (ret < 0)
- dev_err(&client->dev, "I2C write error\n");
-
- return ret;
+ struct i2c_client *client = to_i2c_client(chip->dev);
+ int error;
+
+ chip->xfer_buf[0] = cpu_to_be16(reg);
+ chip->xfer_buf[1] = cpu_to_be16(data);
+
+ error = i2c_master_send(client, (u8 *)chip->xfer_buf,
+ 2 * sizeof(*chip->xfer_buf));
+ if (unlikely(error < 0)) {
+ dev_err(&client->dev, "I2C write error: %d\n", error);
+ return error;
+ }
+
+ return 0;
}
-static int ad714x_i2c_read(struct device *dev, unsigned short reg,
- unsigned short *data)
+static int ad714x_i2c_read(struct ad714x_chip *chip,
+ unsigned short reg, unsigned short *data, size_t len)
{
- struct i2c_client *client = to_i2c_client(dev);
- int ret = 0;
- u8 *_reg = (u8 *)&reg;
- u8 *_data = (u8 *)data;
-
- u8 tx[2] = {
- _reg[1],
- _reg[0]
- };
- u8 rx[2];
-
- ret = i2c_master_send(client, tx, 2);
- if (ret >= 0)
- ret = i2c_master_recv(client, rx, 2);
-
- if (unlikely(ret < 0)) {
- dev_err(&client->dev, "I2C read error\n");
- } else {
- _data[0] = rx[1];
- _data[1] = rx[0];
+ struct i2c_client *client = to_i2c_client(chip->dev);
+ int i;
+ int error;
+
+ chip->xfer_buf[0] = cpu_to_be16(reg);
+
+ error = i2c_master_send(client, (u8 *)chip->xfer_buf,
+ sizeof(*chip->xfer_buf));
+ if (error >= 0)
+ error = i2c_master_recv(client, (u8 *)chip->xfer_buf,
+ len * sizeof(*chip->xfer_buf));
+
+ if (unlikely(error < 0)) {
+ dev_err(&client->dev, "I2C read error: %d\n", error);
+ return error;
}
- return ret;
+ for (i = 0; i < len; i++)
+ data[i] = be16_to_cpu(chip->xfer_buf[i]);
+
+ return 0;
}
static int __devinit ad714x_i2c_probe(struct i2c_client *client,
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c
index 4120dd5..875b508 100644
--- a/drivers/input/misc/ad714x-spi.c
+++ b/drivers/input/misc/ad714x-spi.c
@@ -1,12 +1,12 @@
/*
* AD714X CapTouch Programmable Controller driver (SPI bus)
*
- * Copyright 2009 Analog Devices Inc.
+ * Copyright 2009-2011 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
-#include <linux/input.h> /* BUS_I2C */
+#include <linux/input.h> /* BUS_SPI */
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/pm.h>
@@ -30,30 +30,68 @@ static int ad714x_spi_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume);
-static int ad714x_spi_read(struct device *dev, unsigned short reg,
- unsigned short *data)
+static int ad714x_spi_read(struct ad714x_chip *chip,
+ unsigned short reg, unsigned short *data, size_t len)
{
- struct spi_device *spi = to_spi_device(dev);
- unsigned short tx = AD714x_SPI_CMD_PREFIX | AD714x_SPI_READ | reg;
+ struct spi_device *spi = to_spi_device(chip->dev);
+ struct spi_message message;
+ struct spi_transfer xfer[2];
+ int i;
+ int error;
+
+ spi_message_init(&message);
+ memset(xfer, 0, sizeof(xfer));
+
+ chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX |
+ AD714x_SPI_READ | reg);
+ xfer[0].tx_buf = &chip->xfer_buf[0];
+ xfer[0].len = sizeof(chip->xfer_buf[0]);
+ spi_message_add_tail(&xfer[0], &message);
+
+ xfer[1].rx_buf = &chip->xfer_buf[1];
+ xfer[1].len = sizeof(chip->xfer_buf[1]) * len;
+ spi_message_add_tail(&xfer[1], &message);
+
+ error = spi_sync(spi, &message);
+ if (unlikely(error)) {
+ dev_err(chip->dev, "SPI read error: %d\n", error);
+ return error;
+ }
+
+ for (i = 0; i < len; i++)
+ data[i] = be16_to_cpu(chip->xfer_buf[i + 1]);
- return spi_write_then_read(spi, (u8 *)&tx, 2, (u8 *)data, 2);
+ return 0;
}
-static int ad714x_spi_write(struct device *dev, unsigned short reg,
- unsigned short data)
+static int ad714x_spi_write(struct ad714x_chip *chip,
+ unsigned short reg, unsigned short data)
{
- struct spi_device *spi = to_spi_device(dev);
- unsigned short tx[2] = {
- AD714x_SPI_CMD_PREFIX | reg,
- data
- };
+ struct spi_device *spi = to_spi_device(chip->dev);
+ int error;
+
+ chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX | reg);
+ chip->xfer_buf[1] = cpu_to_be16(data);
+
+ error = spi_write(spi, (u8 *)chip->xfer_buf,
+ 2 * sizeof(*chip->xfer_buf));
+ if (unlikely(error)) {
+ dev_err(chip->dev, "SPI write error: %d\n", error);
+ return error;
+ }
- return spi_write(spi, (u8 *)tx, 4);
+ return 0;
}
static int __devinit ad714x_spi_probe(struct spi_device *spi)
{
struct ad714x_chip *chip;
+ int err;
+
+ spi->bits_per_word = 8;
+ err = spi_setup(spi);
+ if (err < 0)
+ return err;
chip = ad714x_probe(&spi->dev, BUS_SPI, spi->irq,
ad714x_spi_read, ad714x_spi_write);
diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c
index c3a62c4..ca42c7d 100644
--- a/drivers/input/misc/ad714x.c
+++ b/drivers/input/misc/ad714x.c
@@ -1,7 +1,7 @@
/*
* AD714X CapTouch Programmable Controller driver supporting AD7142/3/7/8/7A
*
- * Copyright 2009 Analog Devices Inc.
+ * Copyright 2009-2011 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -59,7 +59,6 @@
#define STAGE11_AMBIENT 0x27D
#define PER_STAGE_REG_NUM 36
-#define STAGE_NUM 12
#define STAGE_CFGREG_NUM 8
#define SYS_CFGREG_NUM 8
@@ -124,27 +123,6 @@ struct ad714x_driver_data {
* information to integrate all things which will be private data
* of spi/i2c device
*/
-struct ad714x_chip {
- unsigned short h_state;
- unsigned short l_state;
- unsigned short c_state;
- unsigned short adc_reg[STAGE_NUM];
- unsigned short amb_reg[STAGE_NUM];
- unsigned short sensor_val[STAGE_NUM];
-
- struct ad714x_platform_data *hw;
- struct ad714x_driver_data *sw;
-
- int irq;
- struct device *dev;
- ad714x_read_t read;
- ad714x_write_t write;
-
- struct mutex mutex;
-
- unsigned product;
- unsigned version;
-};
static void ad714x_use_com_int(struct ad714x_chip *ad714x,
int start_stage, int end_stage)
@@ -154,13 +132,13 @@ static void ad714x_use_com_int(struct ad714x_chip *ad714x,
mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1);
- ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data);
+ ad714x->read(ad714x, STG_COM_INT_EN_REG, &data, 1);
data |= 1 << end_stage;
- ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data);
+ ad714x->write(ad714x, STG_COM_INT_EN_REG, data);
- ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data);
+ ad714x->read(ad714x, STG_HIGH_INT_EN_REG, &data, 1);
data &= ~mask;
- ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data);
+ ad714x->write(ad714x, STG_HIGH_INT_EN_REG, data);
}
static void ad714x_use_thr_int(struct ad714x_chip *ad714x,
@@ -171,13 +149,13 @@ static void ad714x_use_thr_int(struct ad714x_chip *ad714x,
mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1);
- ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data);
+ ad714x->read(ad714x, STG_COM_INT_EN_REG, &data, 1);
data &= ~(1 << end_stage);
- ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data);
+ ad714x->write(ad714x, STG_COM_INT_EN_REG, data);
- ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data);
+ ad714x->read(ad714x, STG_HIGH_INT_EN_REG, &data, 1);
data |= mask;
- ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data);
+ ad714x->write(ad714x, STG_HIGH_INT_EN_REG, data);
}
static int ad714x_cal_highest_stage(struct ad714x_chip *ad714x,
@@ -273,15 +251,16 @@ static void ad714x_slider_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
int i;
+ ad714x->read(ad714x, CDC_RESULT_S0 + hw->start_stage,
+ &ad714x->adc_reg[hw->start_stage],
+ hw->end_stage - hw->start_stage + 1);
+
for (i = hw->start_stage; i <= hw->end_stage; i++) {
- ad714x->read(ad714x->dev, CDC_RESULT_S0 + i,
- &ad714x->adc_reg[i]);
- ad714x->read(ad714x->dev,
- STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
- &ad714x->amb_reg[i]);
-
- ad714x->sensor_val[i] = abs(ad714x->adc_reg[i] -
- ad714x->amb_reg[i]);
+ ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
+ &ad714x->amb_reg[i], 1);
+
+ ad714x->sensor_val[i] =
+ abs(ad714x->adc_reg[i] - ad714x->amb_reg[i]);
}
}
@@ -444,15 +423,16 @@ static void ad714x_wheel_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
int i;
+ ad714x->read(ad714x, CDC_RESULT_S0 + hw->start_stage,
+ &ad714x->adc_reg[hw->start_stage],
+ hw->end_stage - hw->start_stage + 1);
+
for (i = hw->start_stage; i <= hw->end_stage; i++) {
- ad714x->read(ad714x->dev, CDC_RESULT_S0 + i,
- &ad714x->adc_reg[i]);
- ad714x->read(ad714x->dev,
- STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
- &ad714x->amb_reg[i]);
+ ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
+ &ad714x->amb_reg[i], 1);
if (ad714x->adc_reg[i] > ad714x->amb_reg[i])
- ad714x->sensor_val[i] = ad714x->adc_reg[i] -
- ad714x->amb_reg[i];
+ ad714x->sensor_val[i] =
+ ad714x->adc_reg[i] - ad714x->amb_reg[i];
else
ad714x->sensor_val[i] = 0;
}
@@ -597,15 +577,16 @@ static void touchpad_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
int i;
+ ad714x->read(ad714x, CDC_RESULT_S0 + hw->x_start_stage,
+ &ad714x->adc_reg[hw->x_start_stage],
+ hw->x_end_stage - hw->x_start_stage + 1);
+
for (i = hw->x_start_stage; i <= hw->x_end_stage; i++) {
- ad714x->read(ad714x->dev, CDC_RESULT_S0 + i,
- &ad714x->adc_reg[i]);
- ad714x->read(ad714x->dev,
- STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
- &ad714x->amb_reg[i]);
+ ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
+ &ad714x->amb_reg[i], 1);
if (ad714x->adc_reg[i] > ad714x->amb_reg[i])
- ad714x->sensor_val[i] = ad714x->adc_reg[i] -
- ad714x->amb_reg[i];
+ ad714x->sensor_val[i] =
+ ad714x->adc_reg[i] - ad714x->amb_reg[i];
else
ad714x->sensor_val[i] = 0;
}
@@ -891,7 +872,7 @@ static int ad714x_hw_detect(struct ad714x_chip *ad714x)
{
unsigned short data;
- ad714x->read(ad714x->dev, AD714X_PARTID_REG, &data);
+ ad714x->read(ad714x, AD714X_PARTID_REG, &data, 1);
switch (data & 0xFFF0) {
case AD7142_PARTID:
ad714x->product = 0x7142;
@@ -940,23 +921,20 @@ static void ad714x_hw_init(struct ad714x_chip *ad714x)
for (i = 0; i < STAGE_NUM; i++) {
reg_base = AD714X_STAGECFG_REG + i * STAGE_CFGREG_NUM;
for (j = 0; j < STAGE_CFGREG_NUM; j++)
- ad714x->write(ad714x->dev, reg_base + j,
+ ad714x->write(ad714x, reg_base + j,
ad714x->hw->stage_cfg_reg[i][j]);
}
for (i = 0; i < SYS_CFGREG_NUM; i++)
- ad714x->write(ad714x->dev, AD714X_SYSCFG_REG + i,
+ ad714x->write(ad714x, AD714X_SYSCFG_REG + i,
ad714x->hw->sys_cfg_reg[i]);
for (i = 0; i < SYS_CFGREG_NUM; i++)
- ad714x->read(ad714x->dev, AD714X_SYSCFG_REG + i,
- &data);
+ ad714x->read(ad714x, AD714X_SYSCFG_REG + i, &data, 1);
- ad714x->write(ad714x->dev, AD714X_STG_CAL_EN_REG, 0xFFF);
+ ad714x->write(ad714x, AD714X_STG_CAL_EN_REG, 0xFFF);
/* clear all interrupts */
- ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data);
- ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data);
- ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data);
+ ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3);
}
static irqreturn_t ad714x_interrupt_thread(int irq, void *data)
@@ -966,9 +944,7 @@ static irqreturn_t ad714x_interrupt_thread(int irq, void *data)
mutex_lock(&ad714x->mutex);
- ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &ad714x->l_state);
- ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &ad714x->h_state);
- ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &ad714x->c_state);
+ ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3);
for (i = 0; i < ad714x->hw->button_num; i++)
ad714x_button_state_machine(ad714x, i);
@@ -1245,7 +1221,7 @@ int ad714x_disable(struct ad714x_chip *ad714x)
mutex_lock(&ad714x->mutex);
data = ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL] | 0x3;
- ad714x->write(ad714x->dev, AD714X_PWR_CTRL, data);
+ ad714x->write(ad714x, AD714X_PWR_CTRL, data);
mutex_unlock(&ad714x->mutex);
@@ -1255,24 +1231,20 @@ EXPORT_SYMBOL(ad714x_disable);
int ad714x_enable(struct ad714x_chip *ad714x)
{
- unsigned short data;
-
dev_dbg(ad714x->dev, "%s enter\n", __func__);
mutex_lock(&ad714x->mutex);
/* resume to non-shutdown mode */
- ad714x->write(ad714x->dev, AD714X_PWR_CTRL,
+ ad714x->write(ad714x, AD714X_PWR_CTRL,
ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL]);
/* make sure the interrupt output line is not low level after resume,
* otherwise we will get no chance to enter falling-edge irq again
*/
- ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data);
- ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data);
- ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data);
+ ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3);
mutex_unlock(&ad714x->mutex);
diff --git a/drivers/input/misc/ad714x.h b/drivers/input/misc/ad714x.h
index 45c54fb..3c85455 100644
--- a/drivers/input/misc/ad714x.h
+++ b/drivers/input/misc/ad714x.h
@@ -1,7 +1,7 @@
/*
* AD714X CapTouch Programmable Controller driver (bus interfaces)
*
- * Copyright 2009 Analog Devices Inc.
+ * Copyright 2009-2011 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -11,11 +11,40 @@
#include <linux/types.h>
+#define STAGE_NUM 12
+
struct device;
+struct ad714x_platform_data;
+struct ad714x_driver_data;
struct ad714x_chip;
-typedef int (*ad714x_read_t)(struct device *, unsigned short, unsigned short *);
-typedef int (*ad714x_write_t)(struct device *, unsigned short, unsigned short);
+typedef int (*ad714x_read_t)(struct ad714x_chip *, unsigned short, unsigned short *, size_t);
+typedef int (*ad714x_write_t)(struct ad714x_chip *, unsigned short, unsigned short);
+
+struct ad714x_chip {
+ unsigned short l_state;
+ unsigned short h_state;
+ unsigned short c_state;
+ unsigned short adc_reg[STAGE_NUM];
+ unsigned short amb_reg[STAGE_NUM];
+ unsigned short sensor_val[STAGE_NUM];
+
+ struct ad714x_platform_data *hw;
+ struct ad714x_driver_data *sw;
+
+ int irq;
+ struct device *dev;
+ ad714x_read_t read;
+ ad714x_write_t write;
+
+ struct mutex mutex;
+
+ unsigned product;
+ unsigned version;
+
+ __be16 xfer_buf[16] ____cacheline_aligned;
+
+};
int ad714x_disable(struct ad714x_chip *ad714x);
int ad714x_enable(struct ad714x_chip *ad714x);
diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c
index 4f72bdd..d00edc9 100644
--- a/drivers/input/misc/bfin_rotary.c
+++ b/drivers/input/misc/bfin_rotary.c
@@ -6,7 +6,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
new file mode 100644
index 0000000..783597a
--- /dev/null
+++ b/drivers/input/misc/kxtj9.c
@@ -0,0 +1,672 @@
+/*
+ * Copyright (C) 2011 Kionix, Inc.
+ * Written by Chris Hudson <chudson@kionix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/input/kxtj9.h>
+#include <linux/input-polldev.h>
+
+#define NAME "kxtj9"
+#define G_MAX 8000
+/* OUTPUT REGISTERS */
+#define XOUT_L 0x06
+#define WHO_AM_I 0x0F
+/* CONTROL REGISTERS */
+#define INT_REL 0x1A
+#define CTRL_REG1 0x1B
+#define INT_CTRL1 0x1E
+#define DATA_CTRL 0x21
+/* CONTROL REGISTER 1 BITS */
+#define PC1_OFF 0x7F
+#define PC1_ON (1 << 7)
+/* Data ready funtion enable bit: set during probe if using irq mode */
+#define DRDYE (1 << 5)
+/* INTERRUPT CONTROL REGISTER 1 BITS */
+/* Set these during probe if using irq mode */
+#define KXTJ9_IEL (1 << 3)
+#define KXTJ9_IEA (1 << 4)
+#define KXTJ9_IEN (1 << 5)
+/* INPUT_ABS CONSTANTS */
+#define FUZZ 3
+#define FLAT 3
+/* RESUME STATE INDICES */
+#define RES_DATA_CTRL 0
+#define RES_CTRL_REG1 1
+#define RES_INT_CTRL1 2
+#define RESUME_ENTRIES 3
+
+/*
+ * The following table lists the maximum appropriate poll interval for each
+ * available output data rate.
+ */
+static const struct {
+ unsigned int cutoff;
+ u8 mask;
+} kxtj9_odr_table[] = {
+ { 3, ODR800F },
+ { 5, ODR400F },
+ { 10, ODR200F },
+ { 20, ODR100F },
+ { 40, ODR50F },
+ { 80, ODR25F },
+ { 0, ODR12_5F},
+};
+
+struct kxtj9_data {
+ struct i2c_client *client;
+ struct kxtj9_platform_data pdata;
+ struct input_dev *input_dev;
+#ifdef CONFIG_INPUT_KXTJ9_POLLED_MODE
+ struct input_polled_dev *poll_dev;
+#endif
+ unsigned int last_poll_interval;
+ u8 shift;
+ u8 ctrl_reg1;
+ u8 data_ctrl;
+ u8 int_ctrl;
+};
+
+static int kxtj9_i2c_read(struct kxtj9_data *tj9, u8 addr, u8 *data, int len)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = tj9->client->addr,
+ .flags = tj9->client->flags,
+ .len = 1,
+ .buf = &addr,
+ },
+ {
+ .addr = tj9->client->addr,
+ .flags = tj9->client->flags | I2C_M_RD,
+ .len = len,
+ .buf = data,
+ },
+ };
+
+ return i2c_transfer(tj9->client->adapter, msgs, 2);
+}
+
+static void kxtj9_report_acceleration_data(struct kxtj9_data *tj9)
+{
+ s16 acc_data[3]; /* Data bytes from hardware xL, xH, yL, yH, zL, zH */
+ s16 x, y, z;
+ int err;
+
+ err = kxtj9_i2c_read(tj9, XOUT_L, (u8 *)acc_data, 6);
+ if (err < 0)
+ dev_err(&tj9->client->dev, "accelerometer data read failed\n");
+
+ x = le16_to_cpu(acc_data[tj9->pdata.axis_map_x]) >> tj9->shift;
+ y = le16_to_cpu(acc_data[tj9->pdata.axis_map_y]) >> tj9->shift;
+ z = le16_to_cpu(acc_data[tj9->pdata.axis_map_z]) >> tj9->shift;
+
+ input_report_abs(tj9->input_dev, ABS_X, tj9->pdata.negate_x ? -x : x);
+ input_report_abs(tj9->input_dev, ABS_Y, tj9->pdata.negate_y ? -y : y);
+ input_report_abs(tj9->input_dev, ABS_Z, tj9->pdata.negate_z ? -z : z);
+ input_sync(tj9->input_dev);
+}
+
+static irqreturn_t kxtj9_isr(int irq, void *dev)
+{
+ struct kxtj9_data *tj9 = dev;
+ int err;
+
+ /* data ready is the only possible interrupt type */
+ kxtj9_report_acceleration_data(tj9);
+
+ err = i2c_smbus_read_byte_data(tj9->client, INT_REL);
+ if (err < 0)
+ dev_err(&tj9->client->dev,
+ "error clearing interrupt status: %d\n", err);
+
+ return IRQ_HANDLED;
+}
+
+static int kxtj9_update_g_range(struct kxtj9_data *tj9, u8 new_g_range)
+{
+ switch (new_g_range) {
+ case KXTJ9_G_2G:
+ tj9->shift = 4;
+ break;
+ case KXTJ9_G_4G:
+ tj9->shift = 3;
+ break;
+ case KXTJ9_G_8G:
+ tj9->shift = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tj9->ctrl_reg1 &= 0xe7;
+ tj9->ctrl_reg1 |= new_g_range;
+
+ return 0;
+}
+
+static int kxtj9_update_odr(struct kxtj9_data *tj9, unsigned int poll_interval)
+{
+ int err;
+ int i;
+
+ /* Use the lowest ODR that can support the requested poll interval */
+ for (i = 0; i < ARRAY_SIZE(kxtj9_odr_table); i++) {
+ tj9->data_ctrl = kxtj9_odr_table[i].mask;
+ if (poll_interval < kxtj9_odr_table[i].cutoff)
+ break;
+ }
+
+ err = i2c_smbus_write_byte_data(tj9->client, CTRL_REG1, 0);
+ if (err < 0)
+ return err;
+
+ err = i2c_smbus_write_byte_data(tj9->client, DATA_CTRL, tj9->data_ctrl);
+ if (err < 0)
+ return err;
+
+ err = i2c_smbus_write_byte_data(tj9->client, CTRL_REG1, tj9->ctrl_reg1);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int kxtj9_device_power_on(struct kxtj9_data *tj9)
+{
+ if (tj9->pdata.power_on)
+ return tj9->pdata.power_on();
+
+ return 0;
+}
+
+static void kxtj9_device_power_off(struct kxtj9_data *tj9)
+{
+ int err;
+
+ tj9->ctrl_reg1 &= PC1_OFF;
+ err = i2c_smbus_write_byte_data(tj9->client, CTRL_REG1, tj9->ctrl_reg1);
+ if (err < 0)
+ dev_err(&tj9->client->dev, "soft power off failed\n");
+
+ if (tj9->pdata.power_off)
+ tj9->pdata.power_off();
+}
+
+static int kxtj9_enable(struct kxtj9_data *tj9)
+{
+ int err;
+
+ err = kxtj9_device_power_on(tj9);
+ if (err < 0)
+ return err;
+
+ /* ensure that PC1 is cleared before updating control registers */
+ err = i2c_smbus_write_byte_data(tj9->client, CTRL_REG1, 0);
+ if (err < 0)
+ return err;
+
+ /* only write INT_CTRL_REG1 if in irq mode */
+ if (tj9->client->irq) {
+ err = i2c_smbus_write_byte_data(tj9->client,
+ INT_CTRL1, tj9->int_ctrl);
+ if (err < 0)
+ return err;
+ }
+
+ err = kxtj9_update_g_range(tj9, tj9->pdata.g_range);
+ if (err < 0)
+ return err;
+
+ /* turn on outputs */
+ tj9->ctrl_reg1 |= PC1_ON;
+ err = i2c_smbus_write_byte_data(tj9->client, CTRL_REG1, tj9->ctrl_reg1);
+ if (err < 0)
+ return err;
+
+ err = kxtj9_update_odr(tj9, tj9->last_poll_interval);
+ if (err < 0)
+ return err;
+
+ /* clear initial interrupt if in irq mode */
+ if (tj9->client->irq) {
+ err = i2c_smbus_read_byte_data(tj9->client, INT_REL);
+ if (err < 0) {
+ dev_err(&tj9->client->dev,
+ "error clearing interrupt: %d\n", err);
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ kxtj9_device_power_off(tj9);
+ return err;
+}
+
+static void kxtj9_disable(struct kxtj9_data *tj9)
+{
+ kxtj9_device_power_off(tj9);
+}
+
+static int kxtj9_input_open(struct input_dev *input)
+{
+ struct kxtj9_data *tj9 = input_get_drvdata(input);
+
+ return kxtj9_enable(tj9);
+}
+
+static void kxtj9_input_close(struct input_dev *dev)
+{
+ struct kxtj9_data *tj9 = input_get_drvdata(dev);
+
+ kxtj9_disable(tj9);
+}
+
+static void __devinit kxtj9_init_input_device(struct kxtj9_data *tj9,
+ struct input_dev *input_dev)
+{
+ __set_bit(EV_ABS, input_dev->evbit);
+ input_set_abs_params(input_dev, ABS_X, -G_MAX, G_MAX, FUZZ, FLAT);
+ input_set_abs_params(input_dev, ABS_Y, -G_MAX, G_MAX, FUZZ, FLAT);
+ input_set_abs_params(input_dev, ABS_Z, -G_MAX, G_MAX, FUZZ, FLAT);
+
+ input_dev->name = "kxtj9_accel";
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->dev.parent = &tj9->client->dev;
+}
+
+static int __devinit kxtj9_setup_input_device(struct kxtj9_data *tj9)
+{
+ struct input_dev *input_dev;
+ int err;
+
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(&tj9->client->dev, "input device allocate failed\n");
+ return -ENOMEM;
+ }
+
+ tj9->input_dev = input_dev;
+
+ input_dev->open = kxtj9_input_open;
+ input_dev->close = kxtj9_input_close;
+ input_set_drvdata(input_dev, tj9);
+
+ kxtj9_init_input_device(tj9, input_dev);
+
+ err = input_register_device(tj9->input_dev);
+ if (err) {
+ dev_err(&tj9->client->dev,
+ "unable to register input polled device %s: %d\n",
+ tj9->input_dev->name, err);
+ input_free_device(tj9->input_dev);
+ return err;
+ }
+
+ return 0;
+}
+
+/*
+ * When IRQ mode is selected, we need to provide an interface to allow the user
+ * to change the output data rate of the part. For consistency, we are using
+ * the set_poll method, which accepts a poll interval in milliseconds, and then
+ * calls update_odr() while passing this value as an argument. In IRQ mode, the
+ * data outputs will not be read AT the requested poll interval, rather, the
+ * lowest ODR that can support the requested interval. The client application
+ * will be responsible for retrieving data from the input node at the desired
+ * interval.
+ */
+
+/* Returns currently selected poll interval (in ms) */
+static ssize_t kxtj9_get_poll(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct kxtj9_data *tj9 = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%d\n", tj9->last_poll_interval);
+}
+
+/* Allow users to select a new poll interval (in ms) */
+static ssize_t kxtj9_set_poll(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct kxtj9_data *tj9 = i2c_get_clientdata(client);
+ struct input_dev *input_dev = tj9->input_dev;
+ unsigned int interval;
+ int error;
+
+ error = kstrtouint(buf, 10, &interval);
+ if (error < 0)
+ return error;
+
+ /* Lock the device to prevent races with open/close (and itself) */
+ mutex_lock(&input_dev->mutex);
+
+ disable_irq(client->irq);
+
+ /*
+ * Set current interval to the greater of the minimum interval or
+ * the requested interval
+ */
+ tj9->last_poll_interval = max(interval, tj9->pdata.min_interval);
+
+ kxtj9_update_odr(tj9, tj9->last_poll_interval);
+
+ enable_irq(client->irq);
+ mutex_unlock(&input_dev->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(poll, S_IRUGO|S_IWUSR, kxtj9_get_poll, kxtj9_set_poll);
+
+static struct attribute *kxtj9_attributes[] = {
+ &dev_attr_poll.attr,
+ NULL
+};
+
+static struct attribute_group kxtj9_attribute_group = {
+ .attrs = kxtj9_attributes
+};
+
+
+#ifdef CONFIG_INPUT_KXTJ9_POLLED_MODE
+static void kxtj9_poll(struct input_polled_dev *dev)
+{
+ struct kxtj9_data *tj9 = dev->private;
+ unsigned int poll_interval = dev->poll_interval;
+
+ kxtj9_report_acceleration_data(tj9);
+
+ if (poll_interval != tj9->last_poll_interval) {
+ kxtj9_update_odr(tj9, poll_interval);
+ tj9->last_poll_interval = poll_interval;
+ }
+}
+
+static void kxtj9_polled_input_open(struct input_polled_dev *dev)
+{
+ struct kxtj9_data *tj9 = dev->private;
+
+ kxtj9_enable(tj9);
+}
+
+static void kxtj9_polled_input_close(struct input_polled_dev *dev)
+{
+ struct kxtj9_data *tj9 = dev->private;
+
+ kxtj9_disable(tj9);
+}
+
+static int __devinit kxtj9_setup_polled_device(struct kxtj9_data *tj9)
+{
+ int err;
+ struct input_polled_dev *poll_dev;
+ poll_dev = input_allocate_polled_device();
+
+ if (!poll_dev) {
+ dev_err(&tj9->client->dev,
+ "Failed to allocate polled device\n");
+ return -ENOMEM;
+ }
+
+ tj9->poll_dev = poll_dev;
+ tj9->input_dev = poll_dev->input;
+
+ poll_dev->private = tj9;
+ poll_dev->poll = kxtj9_poll;
+ poll_dev->open = kxtj9_polled_input_open;
+ poll_dev->close = kxtj9_polled_input_close;
+
+ kxtj9_init_input_device(tj9, poll_dev->input);
+
+ err = input_register_polled_device(poll_dev);
+ if (err) {
+ dev_err(&tj9->client->dev,
+ "Unable to register polled device, err=%d\n", err);
+ input_free_polled_device(poll_dev);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __devexit kxtj9_teardown_polled_device(struct kxtj9_data *tj9)
+{
+ input_unregister_polled_device(tj9->poll_dev);
+ input_free_polled_device(tj9->poll_dev);
+}
+
+#else
+
+static inline int kxtj9_setup_polled_device(struct kxtj9_data *tj9)
+{
+ return -ENOSYS;
+}
+
+static inline void kxtj9_teardown_polled_device(struct kxtj9_data *tj9)
+{
+}
+
+#endif
+
+static int __devinit kxtj9_verify(struct kxtj9_data *tj9)
+{
+ int retval;
+
+ retval = kxtj9_device_power_on(tj9);
+ if (retval < 0)
+ return retval;
+
+ retval = i2c_smbus_read_byte_data(tj9->client, WHO_AM_I);
+ if (retval < 0) {
+ dev_err(&tj9->client->dev, "read err int source\n");
+ goto out;
+ }
+
+ retval = retval != 0x06 ? -EIO : 0;
+
+out:
+ kxtj9_device_power_off(tj9);
+ return retval;
+}
+
+static int __devinit kxtj9_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct kxtj9_platform_data *pdata = client->dev.platform_data;
+ struct kxtj9_data *tj9;
+ int err;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "client is not i2c capable\n");
+ return -ENXIO;
+ }
+
+ if (!pdata) {
+ dev_err(&client->dev, "platform data is NULL; exiting\n");
+ return -EINVAL;
+ }
+
+ tj9 = kzalloc(sizeof(*tj9), GFP_KERNEL);
+ if (!tj9) {
+ dev_err(&client->dev,
+ "failed to allocate memory for module data\n");
+ return -ENOMEM;
+ }
+
+ tj9->client = client;
+ tj9->pdata = *pdata;
+
+ if (pdata->init) {
+ err = pdata->init();
+ if (err < 0)
+ goto err_free_mem;
+ }
+
+ err = kxtj9_verify(tj9);
+ if (err < 0) {
+ dev_err(&client->dev, "device not recognized\n");
+ goto err_pdata_exit;
+ }
+
+ i2c_set_clientdata(client, tj9);
+
+ tj9->ctrl_reg1 = tj9->pdata.res_12bit | tj9->pdata.g_range;
+ tj9->data_ctrl = tj9->pdata.data_odr_init;
+
+ if (client->irq) {
+ /* If in irq mode, populate INT_CTRL_REG1 and enable DRDY. */
+ tj9->int_ctrl |= KXTJ9_IEN | KXTJ9_IEA | KXTJ9_IEL;
+ tj9->ctrl_reg1 |= DRDYE;
+
+ err = kxtj9_setup_input_device(tj9);
+ if (err)
+ goto err_pdata_exit;
+
+ err = request_threaded_irq(client->irq, NULL, kxtj9_isr,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "kxtj9-irq", tj9);
+ if (err) {
+ dev_err(&client->dev, "request irq failed: %d\n", err);
+ goto err_destroy_input;
+ }
+
+ err = sysfs_create_group(&client->dev.kobj, &kxtj9_attribute_group);
+ if (err) {
+ dev_err(&client->dev, "sysfs create failed: %d\n", err);
+ goto err_free_irq;
+ }
+
+ } else {
+ err = kxtj9_setup_polled_device(tj9);
+ if (err)
+ goto err_pdata_exit;
+ }
+
+ return 0;
+
+err_free_irq:
+ free_irq(client->irq, tj9);
+err_destroy_input:
+ input_unregister_device(tj9->input_dev);
+err_pdata_exit:
+ if (tj9->pdata.exit)
+ tj9->pdata.exit();
+err_free_mem:
+ kfree(tj9);
+ return err;
+}
+
+static int __devexit kxtj9_remove(struct i2c_client *client)
+{
+ struct kxtj9_data *tj9 = i2c_get_clientdata(client);
+
+ if (client->irq) {
+ sysfs_remove_group(&client->dev.kobj, &kxtj9_attribute_group);
+ free_irq(client->irq, tj9);
+ input_unregister_device(tj9->input_dev);
+ } else {
+ kxtj9_teardown_polled_device(tj9);
+ }
+
+ if (tj9->pdata.exit)
+ tj9->pdata.exit();
+
+ kfree(tj9);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int kxtj9_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct kxtj9_data *tj9 = i2c_get_clientdata(client);
+ struct input_dev *input_dev = tj9->input_dev;
+
+ mutex_lock(&input_dev->mutex);
+
+ if (input_dev->users)
+ kxtj9_disable(tj9);
+
+ mutex_unlock(&input_dev->mutex);
+ return 0;
+}
+
+static int kxtj9_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct kxtj9_data *tj9 = i2c_get_clientdata(client);
+ struct input_dev *input_dev = tj9->input_dev;
+ int retval = 0;
+
+ mutex_lock(&input_dev->mutex);
+
+ if (input_dev->users)
+ kxtj9_enable(tj9);
+
+ mutex_unlock(&input_dev->mutex);
+ return retval;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(kxtj9_pm_ops, kxtj9_suspend, kxtj9_resume);
+
+static const struct i2c_device_id kxtj9_id[] = {
+ { NAME, 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, kxtj9_id);
+
+static struct i2c_driver kxtj9_driver = {
+ .driver = {
+ .name = NAME,
+ .owner = THIS_MODULE,
+ .pm = &kxtj9_pm_ops,
+ },
+ .probe = kxtj9_probe,
+ .remove = __devexit_p(kxtj9_remove),
+ .id_table = kxtj9_id,
+};
+
+static int __init kxtj9_init(void)
+{
+ return i2c_add_driver(&kxtj9_driver);
+}
+module_init(kxtj9_init);
+
+static void __exit kxtj9_exit(void)
+{
+ i2c_del_driver(&kxtj9_driver);
+}
+module_exit(kxtj9_exit);
+
+MODULE_DESCRIPTION("KXTJ9 accelerometer driver");
+MODULE_AUTHOR("Chris Hudson <chudson@kionix.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
new file mode 100644
index 0000000..0794778
--- /dev/null
+++ b/drivers/input/misc/mma8450.c
@@ -0,0 +1,264 @@
+/*
+ * Driver for Freescale's 3-Axis Accelerometer MMA8450
+ *
+ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input-polldev.h>
+#include <linux/of_device.h>
+
+#define MMA8450_DRV_NAME "mma8450"
+
+#define MODE_CHANGE_DELAY_MS 100
+#define POLL_INTERVAL 100
+#define POLL_INTERVAL_MAX 500
+
+/* register definitions */
+#define MMA8450_STATUS 0x00
+#define MMA8450_STATUS_ZXYDR 0x08
+
+#define MMA8450_OUT_X8 0x01
+#define MMA8450_OUT_Y8 0x02
+#define MMA8450_OUT_Z8 0x03
+
+#define MMA8450_OUT_X_LSB 0x05
+#define MMA8450_OUT_X_MSB 0x06
+#define MMA8450_OUT_Y_LSB 0x07
+#define MMA8450_OUT_Y_MSB 0x08
+#define MMA8450_OUT_Z_LSB 0x09
+#define MMA8450_OUT_Z_MSB 0x0a
+
+#define MMA8450_XYZ_DATA_CFG 0x16
+
+#define MMA8450_CTRL_REG1 0x38
+#define MMA8450_CTRL_REG2 0x39
+
+/* mma8450 status */
+struct mma8450 {
+ struct i2c_client *client;
+ struct input_polled_dev *idev;
+};
+
+static int mma8450_read(struct mma8450 *m, unsigned off)
+{
+ struct i2c_client *c = m->client;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(c, off);
+ if (ret < 0)
+ dev_err(&c->dev,
+ "failed to read register 0x%02x, error %d\n",
+ off, ret);
+
+ return ret;
+}
+
+static int mma8450_write(struct mma8450 *m, unsigned off, u8 v)
+{
+ struct i2c_client *c = m->client;
+ int error;
+
+ error = i2c_smbus_write_byte_data(c, off, v);
+ if (error < 0) {
+ dev_err(&c->dev,
+ "failed to write to register 0x%02x, error %d\n",
+ off, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int mma8450_read_xyz(struct mma8450 *m, int *x, int *y, int *z)
+{
+ struct i2c_client *c = m->client;
+ u8 buff[6];
+ int err;
+
+ err = i2c_smbus_read_i2c_block_data(c, MMA8450_OUT_X_LSB, 6, buff);
+ if (err < 0) {
+ dev_err(&c->dev,
+ "failed to read block data at 0x%02x, error %d\n",
+ MMA8450_OUT_X_LSB, err);
+ return err;
+ }
+
+ *x = ((buff[1] << 4) & 0xff0) | (buff[0] & 0xf);
+ *y = ((buff[3] << 4) & 0xff0) | (buff[2] & 0xf);
+ *z = ((buff[5] << 4) & 0xff0) | (buff[4] & 0xf);
+
+ return 0;
+}
+
+static void mma8450_poll(struct input_polled_dev *dev)
+{
+ struct mma8450 *m = dev->private;
+ int x, y, z;
+ int ret;
+ int err;
+
+ ret = mma8450_read(m, MMA8450_STATUS);
+ if (ret < 0)
+ return;
+
+ if (!(ret & MMA8450_STATUS_ZXYDR))
+ return;
+
+ err = mma8450_read_xyz(m, &x, &y, &z);
+ if (err)
+ return;
+
+ input_report_abs(dev->input, ABS_X, x);
+ input_report_abs(dev->input, ABS_Y, y);
+ input_report_abs(dev->input, ABS_Z, z);
+ input_sync(dev->input);
+}
+
+/* Initialize the MMA8450 chip */
+static void mma8450_open(struct input_polled_dev *dev)
+{
+ struct mma8450 *m = dev->private;
+ int err;
+
+ /* enable all events from X/Y/Z, no FIFO */
+ err = mma8450_write(m, MMA8450_XYZ_DATA_CFG, 0x07);
+ if (err)
+ return;
+
+ /*
+ * Sleep mode poll rate - 50Hz
+ * System output data rate - 400Hz
+ * Full scale selection - Active, +/- 2G
+ */
+ err = mma8450_write(m, MMA8450_CTRL_REG1, 0x01);
+ if (err < 0)
+ return;
+
+ msleep(MODE_CHANGE_DELAY_MS);
+}
+
+static void mma8450_close(struct input_polled_dev *dev)
+{
+ struct mma8450 *m = dev->private;
+
+ mma8450_write(m, MMA8450_CTRL_REG1, 0x00);
+ mma8450_write(m, MMA8450_CTRL_REG2, 0x01);
+}
+
+/*
+ * I2C init/probing/exit functions
+ */
+static int __devinit mma8450_probe(struct i2c_client *c,
+ const struct i2c_device_id *id)
+{
+ struct input_polled_dev *idev;
+ struct mma8450 *m;
+ int err;
+
+ m = kzalloc(sizeof(struct mma8450), GFP_KERNEL);
+ idev = input_allocate_polled_device();
+ if (!m || !idev) {
+ err = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ m->client = c;
+ m->idev = idev;
+
+ idev->private = m;
+ idev->input->name = MMA8450_DRV_NAME;
+ idev->input->id.bustype = BUS_I2C;
+ idev->poll = mma8450_poll;
+ idev->poll_interval = POLL_INTERVAL;
+ idev->poll_interval_max = POLL_INTERVAL_MAX;
+ idev->open = mma8450_open;
+ idev->close = mma8450_close;
+
+ __set_bit(EV_ABS, idev->input->evbit);
+ input_set_abs_params(idev->input, ABS_X, -2048, 2047, 32, 32);
+ input_set_abs_params(idev->input, ABS_Y, -2048, 2047, 32, 32);
+ input_set_abs_params(idev->input, ABS_Z, -2048, 2047, 32, 32);
+
+ err = input_register_polled_device(idev);
+ if (err) {
+ dev_err(&c->dev, "failed to register polled input device\n");
+ goto err_free_mem;
+ }
+
+ return 0;
+
+err_free_mem:
+ input_free_polled_device(idev);
+ kfree(m);
+ return err;
+}
+
+static int __devexit mma8450_remove(struct i2c_client *c)
+{
+ struct mma8450 *m = i2c_get_clientdata(c);
+ struct input_polled_dev *idev = m->idev;
+
+ input_unregister_polled_device(idev);
+ input_free_polled_device(idev);
+ kfree(m);
+
+ return 0;
+}
+
+static const struct i2c_device_id mma8450_id[] = {
+ { MMA8450_DRV_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, mma8450_id);
+
+static const struct of_device_id mma8450_dt_ids[] = {
+ { .compatible = "fsl,mma8450", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mma8450_dt_ids);
+
+static struct i2c_driver mma8450_driver = {
+ .driver = {
+ .name = MMA8450_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = mma8450_dt_ids,
+ },
+ .probe = mma8450_probe,
+ .remove = __devexit_p(mma8450_remove),
+ .id_table = mma8450_id,
+};
+
+static int __init mma8450_init(void)
+{
+ return i2c_add_driver(&mma8450_driver);
+}
+module_init(mma8450_init);
+
+static void __exit mma8450_exit(void)
+{
+ i2c_del_driver(&mma8450_driver);
+}
+module_exit(mma8450_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MMA8450 3-Axis Accelerometer Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
new file mode 100644
index 0000000..f71dc72
--- /dev/null
+++ b/drivers/input/misc/mpu3050.c
@@ -0,0 +1,376 @@
+/*
+ * MPU3050 Tri-axis gyroscope driver
+ *
+ * Copyright (C) 2011 Wistron Co.Ltd
+ * Joseph Lai <joseph_lai@wistron.com>
+ *
+ * Trimmed down by Alan Cox <alan@linux.intel.com> to produce this version
+ *
+ * This is a 'lite' version of the driver, while we consider the right way
+ * to present the other features to user space. In particular it requires the
+ * device has an IRQ, and it only provides an input interface, so is not much
+ * use for device orientation. A fuller version is available from the Meego
+ * tree.
+ *
+ * This program is based on bma023.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+
+#define MPU3050_CHIP_ID_REG 0x00
+#define MPU3050_CHIP_ID 0x69
+#define MPU3050_XOUT_H 0x1D
+#define MPU3050_PWR_MGM 0x3E
+#define MPU3050_PWR_MGM_POS 6
+#define MPU3050_PWR_MGM_MASK 0x40
+
+#define MPU3050_AUTO_DELAY 1000
+
+#define MPU3050_MIN_VALUE -32768
+#define MPU3050_MAX_VALUE 32767
+
+struct axis_data {
+ s16 x;
+ s16 y;
+ s16 z;
+};
+
+struct mpu3050_sensor {
+ struct i2c_client *client;
+ struct device *dev;
+ struct input_dev *idev;
+};
+
+/**
+ * mpu3050_xyz_read_reg - read the axes values
+ * @buffer: provide register addr and get register
+ * @length: length of register
+ *
+ * Reads the register values in one transaction or returns a negative
+ * error code on failure.
+ */
+static int mpu3050_xyz_read_reg(struct i2c_client *client,
+ u8 *buffer, int length)
+{
+ /*
+ * Annoying we can't make this const because the i2c layer doesn't
+ * declare input buffers const.
+ */
+ char cmd = MPU3050_XOUT_H;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &cmd,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = length,
+ .buf = buffer,
+ },
+ };
+
+ return i2c_transfer(client->adapter, msg, 2);
+}
+
+/**
+ * mpu3050_read_xyz - get co-ordinates from device
+ * @client: i2c address of sensor
+ * @coords: co-ordinates to update
+ *
+ * Return the converted X Y and Z co-ordinates from the sensor device
+ */
+static void mpu3050_read_xyz(struct i2c_client *client,
+ struct axis_data *coords)
+{
+ u16 buffer[3];
+
+ mpu3050_xyz_read_reg(client, (u8 *)buffer, 6);
+ coords->x = be16_to_cpu(buffer[0]);
+ coords->y = be16_to_cpu(buffer[1]);
+ coords->z = be16_to_cpu(buffer[2]);
+ dev_dbg(&client->dev, "%s: x %d, y %d, z %d\n", __func__,
+ coords->x, coords->y, coords->z);
+}
+
+/**
+ * mpu3050_set_power_mode - set the power mode
+ * @client: i2c client for the sensor
+ * @val: value to switch on/off of power, 1: normal power, 0: low power
+ *
+ * Put device to normal-power mode or low-power mode.
+ */
+static void mpu3050_set_power_mode(struct i2c_client *client, u8 val)
+{
+ u8 value;
+
+ value = i2c_smbus_read_byte_data(client, MPU3050_PWR_MGM);
+ value = (value & ~MPU3050_PWR_MGM_MASK) |
+ (((val << MPU3050_PWR_MGM_POS) & MPU3050_PWR_MGM_MASK) ^
+ MPU3050_PWR_MGM_MASK);
+ i2c_smbus_write_byte_data(client, MPU3050_PWR_MGM, value);
+}
+
+/**
+ * mpu3050_input_open - called on input event open
+ * @input: input dev of opened device
+ *
+ * The input layer calls this function when input event is opened. The
+ * function will push the device to resume. Then, the device is ready
+ * to provide data.
+ */
+static int mpu3050_input_open(struct input_dev *input)
+{
+ struct mpu3050_sensor *sensor = input_get_drvdata(input);
+
+ pm_runtime_get(sensor->dev);
+
+ return 0;
+}
+
+/**
+ * mpu3050_input_close - called on input event close
+ * @input: input dev of closed device
+ *
+ * The input layer calls this function when input event is closed. The
+ * function will push the device to suspend.
+ */
+static void mpu3050_input_close(struct input_dev *input)
+{
+ struct mpu3050_sensor *sensor = input_get_drvdata(input);
+
+ pm_runtime_put(sensor->dev);
+}
+
+/**
+ * mpu3050_interrupt_thread - handle an IRQ
+ * @irq: interrupt numner
+ * @data: the sensor
+ *
+ * Called by the kernel single threaded after an interrupt occurs. Read
+ * the sensor data and generate an input event for it.
+ */
+static irqreturn_t mpu3050_interrupt_thread(int irq, void *data)
+{
+ struct mpu3050_sensor *sensor = data;
+ struct axis_data axis;
+
+ mpu3050_read_xyz(sensor->client, &axis);
+
+ input_report_abs(sensor->idev, ABS_X, axis.x);
+ input_report_abs(sensor->idev, ABS_Y, axis.y);
+ input_report_abs(sensor->idev, ABS_Z, axis.z);
+ input_sync(sensor->idev);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * mpu3050_probe - device detection callback
+ * @client: i2c client of found device
+ * @id: id match information
+ *
+ * The I2C layer calls us when it believes a sensor is present at this
+ * address. Probe to see if this is correct and to validate the device.
+ *
+ * If present install the relevant sysfs interfaces and input device.
+ */
+static int __devinit mpu3050_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mpu3050_sensor *sensor;
+ struct input_dev *idev;
+ int ret;
+ int error;
+
+ sensor = kzalloc(sizeof(struct mpu3050_sensor), GFP_KERNEL);
+ idev = input_allocate_device();
+ if (!sensor || !idev) {
+ dev_err(&client->dev, "failed to allocate driver data\n");
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ sensor->client = client;
+ sensor->dev = &client->dev;
+ sensor->idev = idev;
+
+ mpu3050_set_power_mode(client, 1);
+ msleep(10);
+
+ ret = i2c_smbus_read_byte_data(client, MPU3050_CHIP_ID_REG);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to detect device\n");
+ error = -ENXIO;
+ goto err_free_mem;
+ }
+
+ if (ret != MPU3050_CHIP_ID) {
+ dev_err(&client->dev, "unsupported chip id\n");
+ error = -ENXIO;
+ goto err_free_mem;
+ }
+
+ idev->name = "MPU3050";
+ idev->id.bustype = BUS_I2C;
+ idev->dev.parent = &client->dev;
+
+ idev->open = mpu3050_input_open;
+ idev->close = mpu3050_input_close;
+
+ __set_bit(EV_ABS, idev->evbit);
+ input_set_abs_params(idev, ABS_X,
+ MPU3050_MIN_VALUE, MPU3050_MAX_VALUE, 0, 0);
+ input_set_abs_params(idev, ABS_Y,
+ MPU3050_MIN_VALUE, MPU3050_MAX_VALUE, 0, 0);
+ input_set_abs_params(idev, ABS_Z,
+ MPU3050_MIN_VALUE, MPU3050_MAX_VALUE, 0, 0);
+
+ input_set_drvdata(idev, sensor);
+
+ pm_runtime_set_active(&client->dev);
+
+ error = request_threaded_irq(client->irq,
+ NULL, mpu3050_interrupt_thread,
+ IRQF_TRIGGER_RISING,
+ "mpu_int", sensor);
+ if (error) {
+ dev_err(&client->dev,
+ "can't get IRQ %d, error %d\n", client->irq, error);
+ goto err_pm_set_suspended;
+ }
+
+ error = input_register_device(idev);
+ if (error) {
+ dev_err(&client->dev, "failed to register input device\n");
+ goto err_free_irq;
+ }
+
+ pm_runtime_enable(&client->dev);
+ pm_runtime_set_autosuspend_delay(&client->dev, MPU3050_AUTO_DELAY);
+
+ return 0;
+
+err_free_irq:
+ free_irq(client->irq, sensor);
+err_pm_set_suspended:
+ pm_runtime_set_suspended(&client->dev);
+err_free_mem:
+ input_free_device(idev);
+ kfree(sensor);
+ return error;
+}
+
+/**
+ * mpu3050_remove - remove a sensor
+ * @client: i2c client of sensor being removed
+ *
+ * Our sensor is going away, clean up the resources.
+ */
+static int __devexit mpu3050_remove(struct i2c_client *client)
+{
+ struct mpu3050_sensor *sensor = i2c_get_clientdata(client);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ free_irq(client->irq, sensor);
+ input_unregister_device(sensor->idev);
+ kfree(sensor);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+/**
+ * mpu3050_suspend - called on device suspend
+ * @dev: device being suspended
+ *
+ * Put the device into sleep mode before we suspend the machine.
+ */
+static int mpu3050_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ mpu3050_set_power_mode(client, 0);
+
+ return 0;
+}
+
+/**
+ * mpu3050_resume - called on device resume
+ * @dev: device being resumed
+ *
+ * Put the device into powered mode on resume.
+ */
+static int mpu3050_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ mpu3050_set_power_mode(client, 1);
+ msleep(100); /* wait for gyro chip resume */
+
+ return 0;
+}
+#endif
+
+static UNIVERSAL_DEV_PM_OPS(mpu3050_pm, mpu3050_suspend, mpu3050_resume, NULL);
+
+static const struct i2c_device_id mpu3050_ids[] = {
+ { "mpu3050", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mpu3050_ids);
+
+static struct i2c_driver mpu3050_i2c_driver = {
+ .driver = {
+ .name = "mpu3050",
+ .owner = THIS_MODULE,
+ .pm = &mpu3050_pm,
+ },
+ .probe = mpu3050_probe,
+ .remove = __devexit_p(mpu3050_remove),
+ .id_table = mpu3050_ids,
+};
+
+static int __init mpu3050_init(void)
+{
+ return i2c_add_driver(&mpu3050_i2c_driver);
+}
+module_init(mpu3050_init);
+
+static void __exit mpu3050_exit(void)
+{
+ i2c_del_driver(&mpu3050_i2c_driver);
+}
+module_exit(mpu3050_exit);
+
+MODULE_AUTHOR("Wistron Corp.");
+MODULE_DESCRIPTION("MPU3050 Tri-axis gyroscope driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 014dd4a..3c1a432 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -28,7 +28,7 @@
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/i2c/twl.h>
-#include <linux/mfd/twl4030-codec.h>
+#include <linux/mfd/twl4030-audio.h>
#include <linux/input.h>
#include <linux/slab.h>
@@ -67,7 +67,7 @@ static void vibra_enable(struct vibra_info *info)
{
u8 reg;
- twl4030_codec_enable_resource(TWL4030_CODEC_RES_POWER);
+ twl4030_audio_enable_resource(TWL4030_AUDIO_RES_POWER);
/* turn H-Bridge on */
twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE,
@@ -75,7 +75,7 @@ static void vibra_enable(struct vibra_info *info)
twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
(reg | TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL);
- twl4030_codec_enable_resource(TWL4030_CODEC_RES_APLL);
+ twl4030_audio_enable_resource(TWL4030_AUDIO_RES_APLL);
info->enabled = true;
}
@@ -90,8 +90,8 @@ static void vibra_disable(struct vibra_info *info)
twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
(reg & ~TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL);
- twl4030_codec_disable_resource(TWL4030_CODEC_RES_APLL);
- twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER);
+ twl4030_audio_disable_resource(TWL4030_AUDIO_RES_APLL);
+ twl4030_audio_disable_resource(TWL4030_AUDIO_RES_POWER);
info->enabled = false;
}
@@ -196,7 +196,7 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
static int __devinit twl4030_vibra_probe(struct platform_device *pdev)
{
- struct twl4030_codec_vibra_data *pdata = pdev->dev.platform_data;
+ struct twl4030_vibra_data *pdata = pdev->dev.platform_data;
struct vibra_info *info;
int ret;
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
new file mode 100644
index 0000000..c43002e
--- /dev/null
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -0,0 +1,423 @@
+/*
+ * twl6040-vibra.c - TWL6040 Vibrator driver
+ *
+ * Author: Jorge Eduardo Candelaria <jorge.candelaria@ti.com>
+ * Author: Misael Lopez Cruz <misael.lopez@ti.com>
+ *
+ * Copyright: (C) 2011 Texas Instruments, Inc.
+ *
+ * Based on twl4030-vibra.c by Henrik Saari <henrik.saari@nokia.com>
+ * Felipe Balbi <felipe.balbi@nokia.com>
+ * Jari Vanhala <ext-javi.vanhala@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/i2c/twl.h>
+#include <linux/mfd/twl6040.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+
+#define EFFECT_DIR_180_DEG 0x8000
+
+/* Recommended modulation index 85% */
+#define TWL6040_VIBRA_MOD 85
+
+#define TWL6040_NUM_SUPPLIES 2
+
+struct vibra_info {
+ struct device *dev;
+ struct input_dev *input_dev;
+ struct workqueue_struct *workqueue;
+ struct work_struct play_work;
+ struct mutex mutex;
+ int irq;
+
+ bool enabled;
+ int weak_speed;
+ int strong_speed;
+ int direction;
+
+ unsigned int vibldrv_res;
+ unsigned int vibrdrv_res;
+ unsigned int viblmotor_res;
+ unsigned int vibrmotor_res;
+
+ struct regulator_bulk_data supplies[TWL6040_NUM_SUPPLIES];
+
+ struct twl6040 *twl6040;
+};
+
+static irqreturn_t twl6040_vib_irq_handler(int irq, void *data)
+{
+ struct vibra_info *info = data;
+ struct twl6040 *twl6040 = info->twl6040;
+ u8 status;
+
+ status = twl6040_reg_read(twl6040, TWL6040_REG_STATUS);
+ if (status & TWL6040_VIBLOCDET) {
+ dev_warn(info->dev, "Left Vibrator overcurrent detected\n");
+ twl6040_clear_bits(twl6040, TWL6040_REG_VIBCTLL,
+ TWL6040_VIBENAL);
+ }
+ if (status & TWL6040_VIBROCDET) {
+ dev_warn(info->dev, "Right Vibrator overcurrent detected\n");
+ twl6040_clear_bits(twl6040, TWL6040_REG_VIBCTLR,
+ TWL6040_VIBENAR);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void twl6040_vibra_enable(struct vibra_info *info)
+{
+ struct twl6040 *twl6040 = info->twl6040;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(info->supplies), info->supplies);
+ if (ret) {
+ dev_err(info->dev, "failed to enable regulators %d\n", ret);
+ return;
+ }
+
+ twl6040_power(info->twl6040, 1);
+ if (twl6040->rev <= TWL6040_REV_ES1_1) {
+ /*
+ * ERRATA: Disable overcurrent protection for at least
+ * 3ms when enabling vibrator drivers to avoid false
+ * overcurrent detection
+ */
+ twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLL,
+ TWL6040_VIBENAL | TWL6040_VIBCTRLL);
+ twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLR,
+ TWL6040_VIBENAR | TWL6040_VIBCTRLR);
+ usleep_range(3000, 3500);
+ }
+
+ twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLL,
+ TWL6040_VIBENAL);
+ twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLR,
+ TWL6040_VIBENAR);
+
+ info->enabled = true;
+}
+
+static void twl6040_vibra_disable(struct vibra_info *info)
+{
+ struct twl6040 *twl6040 = info->twl6040;
+
+ twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLL, 0x00);
+ twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLR, 0x00);
+ twl6040_power(info->twl6040, 0);
+
+ regulator_bulk_disable(ARRAY_SIZE(info->supplies), info->supplies);
+
+ info->enabled = false;
+}
+
+static u8 twl6040_vibra_code(int vddvib, int vibdrv_res, int motor_res,
+ int speed, int direction)
+{
+ int vpk, max_code;
+ u8 vibdat;
+
+ /* output swing */
+ vpk = (vddvib * motor_res * TWL6040_VIBRA_MOD) /
+ (100 * (vibdrv_res + motor_res));
+
+ /* 50mV per VIBDAT code step */
+ max_code = vpk / 50;
+ if (max_code > TWL6040_VIBDAT_MAX)
+ max_code = TWL6040_VIBDAT_MAX;
+
+ /* scale speed to max allowed code */
+ vibdat = (u8)((speed * max_code) / USHRT_MAX);
+
+ /* 2's complement for direction > 180 degrees */
+ vibdat *= direction;
+
+ return vibdat;
+}
+
+static void twl6040_vibra_set_effect(struct vibra_info *info)
+{
+ struct twl6040 *twl6040 = info->twl6040;
+ u8 vibdatl, vibdatr;
+ int volt;
+
+ /* weak motor */
+ volt = regulator_get_voltage(info->supplies[0].consumer) / 1000;
+ vibdatl = twl6040_vibra_code(volt, info->vibldrv_res,
+ info->viblmotor_res,
+ info->weak_speed, info->direction);
+
+ /* strong motor */
+ volt = regulator_get_voltage(info->supplies[1].consumer) / 1000;
+ vibdatr = twl6040_vibra_code(volt, info->vibrdrv_res,
+ info->vibrmotor_res,
+ info->strong_speed, info->direction);
+
+ twl6040_reg_write(twl6040, TWL6040_REG_VIBDATL, vibdatl);
+ twl6040_reg_write(twl6040, TWL6040_REG_VIBDATR, vibdatr);
+}
+
+static void vibra_play_work(struct work_struct *work)
+{
+ struct vibra_info *info = container_of(work,
+ struct vibra_info, play_work);
+
+ mutex_lock(&info->mutex);
+
+ if (info->weak_speed || info->strong_speed) {
+ if (!info->enabled)
+ twl6040_vibra_enable(info);
+
+ twl6040_vibra_set_effect(info);
+ } else if (info->enabled)
+ twl6040_vibra_disable(info);
+
+ mutex_unlock(&info->mutex);
+}
+
+static int vibra_play(struct input_dev *input, void *data,
+ struct ff_effect *effect)
+{
+ struct vibra_info *info = input_get_drvdata(input);
+ int ret;
+
+ info->weak_speed = effect->u.rumble.weak_magnitude;
+ info->strong_speed = effect->u.rumble.strong_magnitude;
+ info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1;
+
+ ret = queue_work(info->workqueue, &info->play_work);
+ if (!ret) {
+ dev_info(&input->dev, "work is already on queue\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void twl6040_vibra_close(struct input_dev *input)
+{
+ struct vibra_info *info = input_get_drvdata(input);
+
+ cancel_work_sync(&info->play_work);
+
+ mutex_lock(&info->mutex);
+
+ if (info->enabled)
+ twl6040_vibra_disable(info);
+
+ mutex_unlock(&info->mutex);
+}
+
+#if CONFIG_PM_SLEEP
+static int twl6040_vibra_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct vibra_info *info = platform_get_drvdata(pdev);
+
+ mutex_lock(&info->mutex);
+
+ if (info->enabled)
+ twl6040_vibra_disable(info);
+
+ mutex_unlock(&info->mutex);
+
+ return 0;
+}
+
+#endif
+
+static SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops, twl6040_vibra_suspend, NULL);
+
+static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
+{
+ struct twl4030_vibra_data *pdata = pdev->dev.platform_data;
+ struct vibra_info *info;
+ int ret;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform_data not available\n");
+ return -EINVAL;
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "couldn't allocate memory\n");
+ return -ENOMEM;
+ }
+
+ info->dev = &pdev->dev;
+ info->twl6040 = dev_get_drvdata(pdev->dev.parent);
+ info->vibldrv_res = pdata->vibldrv_res;
+ info->vibrdrv_res = pdata->vibrdrv_res;
+ info->viblmotor_res = pdata->viblmotor_res;
+ info->vibrmotor_res = pdata->vibrmotor_res;
+ if ((!info->vibldrv_res && !info->viblmotor_res) ||
+ (!info->vibrdrv_res && !info->vibrmotor_res)) {
+ dev_err(info->dev, "invalid vibra driver/motor resistance\n");
+ ret = -EINVAL;
+ goto err_kzalloc;
+ }
+
+ info->irq = platform_get_irq(pdev, 0);
+ if (info->irq < 0) {
+ dev_err(info->dev, "invalid irq\n");
+ ret = -EINVAL;
+ goto err_kzalloc;
+ }
+
+ mutex_init(&info->mutex);
+
+ info->input_dev = input_allocate_device();
+ if (info->input_dev == NULL) {
+ dev_err(info->dev, "couldn't allocate input device\n");
+ ret = -ENOMEM;
+ goto err_kzalloc;
+ }
+
+ input_set_drvdata(info->input_dev, info);
+
+ info->input_dev->name = "twl6040:vibrator";
+ info->input_dev->id.version = 1;
+ info->input_dev->dev.parent = pdev->dev.parent;
+ info->input_dev->close = twl6040_vibra_close;
+ __set_bit(FF_RUMBLE, info->input_dev->ffbit);
+
+ ret = input_ff_create_memless(info->input_dev, NULL, vibra_play);
+ if (ret < 0) {
+ dev_err(info->dev, "couldn't register vibrator to FF\n");
+ goto err_ialloc;
+ }
+
+ ret = input_register_device(info->input_dev);
+ if (ret < 0) {
+ dev_err(info->dev, "couldn't register input device\n");
+ goto err_iff;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ ret = request_threaded_irq(info->irq, NULL, twl6040_vib_irq_handler, 0,
+ "twl6040_irq_vib", info);
+ if (ret) {
+ dev_err(info->dev, "VIB IRQ request failed: %d\n", ret);
+ goto err_irq;
+ }
+
+ info->supplies[0].supply = "vddvibl";
+ info->supplies[1].supply = "vddvibr";
+ ret = regulator_bulk_get(info->dev, ARRAY_SIZE(info->supplies),
+ info->supplies);
+ if (ret) {
+ dev_err(info->dev, "couldn't get regulators %d\n", ret);
+ goto err_regulator;
+ }
+
+ if (pdata->vddvibl_uV) {
+ ret = regulator_set_voltage(info->supplies[0].consumer,
+ pdata->vddvibl_uV,
+ pdata->vddvibl_uV);
+ if (ret) {
+ dev_err(info->dev, "failed to set VDDVIBL volt %d\n",
+ ret);
+ goto err_voltage;
+ }
+ }
+
+ if (pdata->vddvibr_uV) {
+ ret = regulator_set_voltage(info->supplies[1].consumer,
+ pdata->vddvibr_uV,
+ pdata->vddvibr_uV);
+ if (ret) {
+ dev_err(info->dev, "failed to set VDDVIBR volt %d\n",
+ ret);
+ goto err_voltage;
+ }
+ }
+
+ info->workqueue = alloc_workqueue("twl6040-vibra", 0, 0);
+ if (info->workqueue == NULL) {
+ dev_err(info->dev, "couldn't create workqueue\n");
+ ret = -ENOMEM;
+ goto err_voltage;
+ }
+ INIT_WORK(&info->play_work, vibra_play_work);
+
+ return 0;
+
+err_voltage:
+ regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
+err_regulator:
+ free_irq(info->irq, info);
+err_irq:
+ input_unregister_device(info->input_dev);
+ info->input_dev = NULL;
+err_iff:
+ if (info->input_dev)
+ input_ff_destroy(info->input_dev);
+err_ialloc:
+ input_free_device(info->input_dev);
+err_kzalloc:
+ kfree(info);
+ return ret;
+}
+
+static int __devexit twl6040_vibra_remove(struct platform_device *pdev)
+{
+ struct vibra_info *info = platform_get_drvdata(pdev);
+
+ input_unregister_device(info->input_dev);
+ free_irq(info->irq, info);
+ regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
+ destroy_workqueue(info->workqueue);
+ kfree(info);
+
+ return 0;
+}
+
+static struct platform_driver twl6040_vibra_driver = {
+ .probe = twl6040_vibra_probe,
+ .remove = __devexit_p(twl6040_vibra_remove),
+ .driver = {
+ .name = "twl6040-vibra",
+ .owner = THIS_MODULE,
+ .pm = &twl6040_vibra_pm_ops,
+ },
+};
+
+static int __init twl6040_vibra_init(void)
+{
+ return platform_driver_register(&twl6040_vibra_driver);
+}
+module_init(twl6040_vibra_init);
+
+static void __exit twl6040_vibra_exit(void)
+{
+ platform_driver_unregister(&twl6040_vibra_driver);
+}
+module_exit(twl6040_vibra_exit);
+
+MODULE_ALIAS("platform:twl6040-vibra");
+MODULE_DESCRIPTION("TWL6040 Vibra driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jorge Eduardo Candelaria <jorge.candelaria@ti.com>");
+MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>");
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 62bae99..ad2e51c 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -373,7 +373,7 @@ static struct xenbus_driver xenkbd_driver = {
static int __init xenkbd_init(void)
{
- if (!xen_pv_domain())
+ if (!xen_domain())
return -ENODEV;
/* Nothing to do if running in dom0. */
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 3126983..da28018 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -67,6 +67,14 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245
#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246
#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247
+/* MacbookAir4,2 (unibody, July 2011) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c
+#define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d
+#define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e
+/* Macbook8,2 (unibody) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252
+#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253
+#define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254
#define BCM5974_DEVICE(prod) { \
.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
@@ -104,6 +112,14 @@ static const struct usb_device_id bcm5974_table[] = {
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
+ /* MacbookAir4,2 */
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_JIS),
+ /* MacbookPro8,2 */
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
/* Terminating entry */
{}
};
@@ -294,6 +310,30 @@ static const struct bcm5974_config bcm5974_config_table[] = {
{ DIM_X, DIM_X / SN_COORD, -4415, 5050 },
{ DIM_Y, DIM_Y / SN_COORD, -55, 6680 }
},
+ {
+ USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI,
+ USB_DEVICE_ID_APPLE_WELLSPRING6_ISO,
+ USB_DEVICE_ID_APPLE_WELLSPRING6_JIS,
+ HAS_INTEGRATED_BUTTON,
+ 0x84, sizeof(struct bt_data),
+ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+ { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
+ { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
+ },
+ {
+ USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI,
+ USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO,
+ USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS,
+ HAS_INTEGRATED_BUTTON,
+ 0x84, sizeof(struct bt_data),
+ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+ { DIM_X, DIM_X / SN_COORD, -4750, 5280 },
+ { DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
+ },
{}
};
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 7b6ce17..58902fb 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -191,7 +191,7 @@ static void __exit gpio_mouse_exit(void)
}
module_exit(gpio_mouse_exit);
-MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
+MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
MODULE_DESCRIPTION("GPIO mouse driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:gpio_mouse"); /* work with hotplug and coldplug */
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index 95577c1..4d17d9f 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -32,6 +32,7 @@
#define DEBUG
#include <linux/slab.h>
#include <linux/input.h>
+#include <linux/module.h>
#include <linux/serio.h>
#include <linux/libps2.h>
#include <linux/delay.h>
diff --git a/drivers/input/mouse/lifebook.c b/drivers/input/mouse/lifebook.c
index c31ad11..83bcaba 100644
--- a/drivers/input/mouse/lifebook.c
+++ b/drivers/input/mouse/lifebook.c
@@ -33,7 +33,7 @@ static const char *desired_serio_phys;
static int lifebook_limit_serio3(const struct dmi_system_id *d)
{
desired_serio_phys = "isa0060/serio3";
- return 0;
+ return 1;
}
static bool lifebook_use_6byte_proto;
@@ -41,7 +41,7 @@ static bool lifebook_use_6byte_proto;
static int lifebook_set_6byte_proto(const struct dmi_system_id *d)
{
lifebook_use_6byte_proto = true;
- return 0;
+ return 1;
}
static const struct dmi_system_id __initconst lifebook_dmi_table[] = {
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
index 943cfec..6c5d84f 100644
--- a/drivers/input/mouse/pxa930_trkball.c
+++ b/drivers/input/mouse/pxa930_trkball.c
@@ -12,7 +12,6 @@
#include <linux/init.h>
#include <linux/input.h>
-#include <linux/version.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index 1242775..2fc887a 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -20,7 +20,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/input.h>
#include <linux/ctype.h>
#include <linux/libps2.h>
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index e06e045..5538fc6 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -207,27 +207,37 @@ static int synaptics_identify(struct psmouse *psmouse)
static int synaptics_resolution(struct psmouse *psmouse)
{
struct synaptics_data *priv = psmouse->private;
- unsigned char res[3];
- unsigned char max[3];
+ unsigned char resp[3];
if (SYN_ID_MAJOR(priv->identity) < 4)
return 0;
- if (synaptics_send_cmd(psmouse, SYN_QUE_RESOLUTION, res) == 0) {
- if (res[0] != 0 && (res[1] & 0x80) && res[2] != 0) {
- priv->x_res = res[0]; /* x resolution in units/mm */
- priv->y_res = res[2]; /* y resolution in units/mm */
+ if (synaptics_send_cmd(psmouse, SYN_QUE_RESOLUTION, resp) == 0) {
+ if (resp[0] != 0 && (resp[1] & 0x80) && resp[2] != 0) {
+ priv->x_res = resp[0]; /* x resolution in units/mm */
+ priv->y_res = resp[2]; /* y resolution in units/mm */
}
}
if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 &&
SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) {
- if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_DIMENSIONS, max)) {
- printk(KERN_ERR "Synaptics claims to have dimensions query,"
- " but I'm not able to read it.\n");
+ if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) {
+ printk(KERN_ERR "Synaptics claims to have max coordinates"
+ " query, but I'm not able to read it.\n");
+ } else {
+ priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
+ priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
+ }
+ }
+
+ if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 &&
+ SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c)) {
+ if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) {
+ printk(KERN_ERR "Synaptics claims to have min coordinates"
+ " query, but I'm not able to read it.\n");
} else {
- priv->x_max = (max[0] << 5) | ((max[1] & 0x0f) << 1);
- priv->y_max = (max[2] << 5) | ((max[1] & 0xf0) >> 3);
+ priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
+ priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
}
}
@@ -406,26 +416,10 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
memset(hw, 0, sizeof(struct synaptics_hw_state));
if (SYN_MODEL_NEWABS(priv->model_id)) {
- hw->x = (((buf[3] & 0x10) << 8) |
- ((buf[1] & 0x0f) << 8) |
- buf[4]);
- hw->y = (((buf[3] & 0x20) << 7) |
- ((buf[1] & 0xf0) << 4) |
- buf[5]);
-
- hw->z = buf[2];
hw->w = (((buf[0] & 0x30) >> 2) |
((buf[0] & 0x04) >> 1) |
((buf[3] & 0x04) >> 2));
- if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) && hw->w == 2) {
- /* Gesture packet: (x, y, z) at half resolution */
- priv->mt.x = (((buf[4] & 0x0f) << 8) | buf[1]) << 1;
- priv->mt.y = (((buf[4] & 0xf0) << 4) | buf[2]) << 1;
- priv->mt.z = ((buf[3] & 0x30) | (buf[5] & 0x0f)) << 1;
- return 1;
- }
-
hw->left = (buf[0] & 0x01) ? 1 : 0;
hw->right = (buf[0] & 0x02) ? 1 : 0;
@@ -448,6 +442,22 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
}
+ if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) && hw->w == 2) {
+ /* Gesture packet: (x, y, z) at half resolution */
+ priv->mt.x = (((buf[4] & 0x0f) << 8) | buf[1]) << 1;
+ priv->mt.y = (((buf[4] & 0xf0) << 4) | buf[2]) << 1;
+ priv->mt.z = ((buf[3] & 0x30) | (buf[5] & 0x0f)) << 1;
+ return 1;
+ }
+
+ hw->x = (((buf[3] & 0x10) << 8) |
+ ((buf[1] & 0x0f) << 8) |
+ buf[4]);
+ hw->y = (((buf[3] & 0x20) << 7) |
+ ((buf[1] & 0xf0) << 4) |
+ buf[5]);
+ hw->z = buf[2];
+
if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) &&
((buf[0] ^ buf[3]) & 0x02)) {
switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) {
@@ -485,7 +495,8 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
return 0;
}
-static void set_slot(struct input_dev *dev, int slot, bool active, int x, int y)
+static void synaptics_report_semi_mt_slot(struct input_dev *dev, int slot,
+ bool active, int x, int y)
{
input_mt_slot(dev, slot);
input_mt_report_slot_state(dev, MT_TOOL_FINGER, active);
@@ -502,14 +513,16 @@ static void synaptics_report_semi_mt_data(struct input_dev *dev,
int num_fingers)
{
if (num_fingers >= 2) {
- set_slot(dev, 0, true, min(a->x, b->x), min(a->y, b->y));
- set_slot(dev, 1, true, max(a->x, b->x), max(a->y, b->y));
+ synaptics_report_semi_mt_slot(dev, 0, true, min(a->x, b->x),
+ min(a->y, b->y));
+ synaptics_report_semi_mt_slot(dev, 1, true, max(a->x, b->x),
+ max(a->y, b->y));
} else if (num_fingers == 1) {
- set_slot(dev, 0, true, a->x, a->y);
- set_slot(dev, 1, false, 0, 0);
+ synaptics_report_semi_mt_slot(dev, 0, true, a->x, a->y);
+ synaptics_report_semi_mt_slot(dev, 1, false, 0, 0);
} else {
- set_slot(dev, 0, false, 0, 0);
- set_slot(dev, 1, false, 0, 0);
+ synaptics_report_semi_mt_slot(dev, 0, false, 0, 0);
+ synaptics_report_semi_mt_slot(dev, 1, false, 0, 0);
}
}
@@ -684,23 +697,36 @@ static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse)
static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
{
int i;
+ int fuzz = SYN_CAP_REDUCED_FILTERING(priv->ext_cap_0c) ?
+ SYN_REDUCED_FILTER_FUZZ : 0;
__set_bit(INPUT_PROP_POINTER, dev->propbit);
__set_bit(EV_ABS, dev->evbit);
input_set_abs_params(dev, ABS_X,
- XMIN_NOMINAL, priv->x_max ?: XMAX_NOMINAL, 0, 0);
+ priv->x_min ?: XMIN_NOMINAL,
+ priv->x_max ?: XMAX_NOMINAL,
+ fuzz, 0);
input_set_abs_params(dev, ABS_Y,
- YMIN_NOMINAL, priv->y_max ?: YMAX_NOMINAL, 0, 0);
+ priv->y_min ?: YMIN_NOMINAL,
+ priv->y_max ?: YMAX_NOMINAL,
+ fuzz, 0);
input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0);
if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c)) {
__set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
input_mt_init_slots(dev, 2);
- input_set_abs_params(dev, ABS_MT_POSITION_X, XMIN_NOMINAL,
- priv->x_max ?: XMAX_NOMINAL, 0, 0);
- input_set_abs_params(dev, ABS_MT_POSITION_Y, YMIN_NOMINAL,
- priv->y_max ?: YMAX_NOMINAL, 0, 0);
+ input_set_abs_params(dev, ABS_MT_POSITION_X,
+ priv->x_min ?: XMIN_NOMINAL,
+ priv->x_max ?: XMAX_NOMINAL,
+ fuzz, 0);
+ input_set_abs_params(dev, ABS_MT_POSITION_Y,
+ priv->y_min ?: YMIN_NOMINAL,
+ priv->y_max ?: YMAX_NOMINAL,
+ fuzz, 0);
+
+ input_abs_set_res(dev, ABS_MT_POSITION_X, priv->x_res);
+ input_abs_set_res(dev, ABS_MT_POSITION_Y, priv->y_res);
}
if (SYN_CAP_PALMDETECT(priv->capabilities))
@@ -971,4 +997,3 @@ bool synaptics_supported(void)
}
#endif /* CONFIG_MOUSE_PS2_SYNAPTICS */
-
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 7453938..ca040aa 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -19,7 +19,8 @@
#define SYN_QUE_RESOLUTION 0x08
#define SYN_QUE_EXT_CAPAB 0x09
#define SYN_QUE_EXT_CAPAB_0C 0x0c
-#define SYN_QUE_EXT_DIMENSIONS 0x0d
+#define SYN_QUE_EXT_MAX_COORDS 0x0d
+#define SYN_QUE_EXT_MIN_COORDS 0x0f
/* synatics modes */
#define SYN_BIT_ABSOLUTE_MODE (1 << 7)
@@ -66,18 +67,21 @@
* 1 0x60 multifinger mode identifies firmware finger counting
* (not reporting!) algorithm.
* Not particularly meaningful
- * 1 0x80 covered pad W clipped to 14, 15 == pad mostly covered
- * 2 0x01 clickpad bit 1 2-button ClickPad
- * 2 0x02 deluxe LED controls touchpad support LED commands
+ * 1 0x80 covered pad W clipped to 14, 15 == pad mostly covered
+ * 2 0x01 clickpad bit 1 2-button ClickPad
+ * 2 0x02 deluxe LED controls touchpad support LED commands
* ala multimedia control bar
* 2 0x04 reduced filtering firmware does less filtering on
* position data, driver should watch
* for noise.
+ * 2 0x20 report min query 0x0f gives min coord reported
*/
#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000)
+#define SYN_CAP_MIN_DIMENSIONS(ex0c) ((ex0c) & 0x002000)
#define SYN_CAP_ADV_GESTURE(ex0c) ((ex0c) & 0x080000)
+#define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400)
/* synaptics modes query bits */
#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
@@ -104,6 +108,9 @@
#define SYN_NEWABS_RELAXED 2
#define SYN_OLDABS 3
+/* amount to fuzz position data when touchpad reports reduced filtering */
+#define SYN_REDUCED_FILTER_FUZZ 8
+
/*
* A structure to describe the state of the touchpad hardware (buttons and pad)
*/
@@ -130,7 +137,8 @@ struct synaptics_data {
unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */
unsigned long int identity; /* Identification */
unsigned int x_res, y_res; /* X/Y resolution in units/mm */
- unsigned int x_max, y_max; /* Max dimensions (from FW) */
+ unsigned int x_max, y_max; /* Max coordinates (from FW) */
+ unsigned int x_min, y_min; /* Min coordinates (from FW) */
unsigned char pkt_type; /* packet type - old, new, etc */
unsigned char mode; /* current mode byte */
diff --git a/drivers/input/serio/at32psif.c b/drivers/input/serio/at32psif.c
index 6ee8f0d..95280f9 100644
--- a/drivers/input/serio/at32psif.c
+++ b/drivers/input/serio/at32psif.c
@@ -372,6 +372,6 @@ static void __exit psif_exit(void)
module_init(psif_init);
module_exit(psif_exit);
-MODULE_AUTHOR("Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>");
+MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
MODULE_DESCRIPTION("Atmel AVR32 PSIF PS/2 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index 4220620..979c443 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -795,7 +795,7 @@ int hp_sdc_release_cooked_irq(hp_sdc_irqhook *callback)
/************************* Keepalive timer task *********************/
-void hp_sdc_kicker (unsigned long data)
+static void hp_sdc_kicker(unsigned long data)
{
tasklet_schedule(&hp_sdc.task);
/* Re-insert the periodic task. */
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index 80baa53..d64c5a4 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -23,7 +23,7 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/io.h>
-
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 0a619c5..6d89fd1 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -225,7 +225,6 @@
/* toolMode codes
*/
#define AIPTEK_TOOL_BUTTON_PEN_MODE BTN_TOOL_PEN
-#define AIPTEK_TOOL_BUTTON_PEN_MODE BTN_TOOL_PEN
#define AIPTEK_TOOL_BUTTON_PENCIL_MODE BTN_TOOL_PENCIL
#define AIPTEK_TOOL_BUTTON_BRUSH_MODE BTN_TOOL_BRUSH
#define AIPTEK_TOOL_BUTTON_AIRBRUSH_MODE BTN_TOOL_AIRBRUSH
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 449c0a4..d27c9d9 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -49,6 +49,7 @@ struct hid_descriptor {
#define USB_REQ_GET_REPORT 0x01
#define USB_REQ_SET_REPORT 0x09
#define WAC_HID_FEATURE_REPORT 0x03
+#define WAC_MSG_RETRIES 5
static int usb_get_report(struct usb_interface *intf, unsigned char type,
unsigned char id, void *buf, int size)
@@ -165,7 +166,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
report,
hid_desc->wDescriptorLength,
5000); /* 5 secs */
- } while (result < 0 && limit++ < 5);
+ } while (result < 0 && limit++ < WAC_MSG_RETRIES);
/* No need to parse the Descriptor. It isn't an error though */
if (result < 0)
@@ -319,24 +320,26 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
int limit = 0, report_id = 2;
int error = -ENOMEM;
- rep_data = kmalloc(2, GFP_KERNEL);
+ rep_data = kmalloc(4, GFP_KERNEL);
if (!rep_data)
return error;
- /* ask to report tablet data if it is 2FGT Tablet PC or
+ /* ask to report tablet data if it is MT Tablet PC or
* not a Tablet PC */
if (features->type == TABLETPC2FG) {
do {
rep_data[0] = 3;
rep_data[1] = 4;
+ rep_data[2] = 0;
+ rep_data[3] = 0;
report_id = 3;
error = usb_set_report(intf, WAC_HID_FEATURE_REPORT,
- report_id, rep_data, 2);
+ report_id, rep_data, 4);
if (error >= 0)
error = usb_get_report(intf,
WAC_HID_FEATURE_REPORT, report_id,
- rep_data, 3);
- } while ((error < 0 || rep_data[1] != 4) && limit++ < 5);
+ rep_data, 4);
+ } while ((error < 0 || rep_data[1] != 4) && limit++ < WAC_MSG_RETRIES);
} else if (features->type != TABLETPC) {
do {
rep_data[0] = 2;
@@ -347,7 +350,7 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
error = usb_get_report(intf,
WAC_HID_FEATURE_REPORT, report_id,
rep_data, 2);
- } while ((error < 0 || rep_data[1] != 2) && limit++ < 5);
+ } while ((error < 0 || rep_data[1] != 2) && limit++ < WAC_MSG_RETRIES);
}
kfree(rep_data);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 08ba5ad..c1c2f7b 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -15,6 +15,7 @@
#include "wacom_wac.h"
#include "wacom.h"
#include <linux/input/mt.h>
+#include <linux/hid.h>
/* resolution for penabled devices */
#define WACOM_PL_RES 20
@@ -264,6 +265,7 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
wacom->id[0] = 0;
input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */
input_report_key(input, wacom->tool[0], prox);
+ input_event(input, EV_MSC, MSC_SERIAL, 1);
input_sync(input); /* sync last event */
}
@@ -273,11 +275,10 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
prox = data[7] & 0xf8;
if (prox || wacom->id[1]) {
wacom->id[1] = PAD_DEVICE_ID;
- input_report_key(input, BTN_0, (data[7] & 0x40));
- input_report_key(input, BTN_4, (data[7] & 0x80));
+ input_report_key(input, BTN_BACK, (data[7] & 0x40));
+ input_report_key(input, BTN_FORWARD, (data[7] & 0x80));
rw = ((data[7] & 0x18) >> 3) - ((data[7] & 0x20) >> 3);
input_report_rel(input, REL_WHEEL, rw);
- input_report_key(input, BTN_TOOL_FINGER, 0xf0);
if (!prox)
wacom->id[1] = 0;
input_report_abs(input, ABS_MISC, wacom->id[1]);
@@ -290,18 +291,17 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
prox = (data[7] & 0xf8) || data[8];
if (prox || wacom->id[1]) {
wacom->id[1] = PAD_DEVICE_ID;
- input_report_key(input, BTN_0, (data[7] & 0x08));
- input_report_key(input, BTN_1, (data[7] & 0x20));
- input_report_key(input, BTN_4, (data[7] & 0x10));
- input_report_key(input, BTN_5, (data[7] & 0x40));
+ input_report_key(input, BTN_BACK, (data[7] & 0x08));
+ input_report_key(input, BTN_LEFT, (data[7] & 0x20));
+ input_report_key(input, BTN_FORWARD, (data[7] & 0x10));
+ input_report_key(input, BTN_RIGHT, (data[7] & 0x40));
input_report_abs(input, ABS_WHEEL, (data[8] & 0x7f));
- input_report_key(input, BTN_TOOL_FINGER, 0xf0);
if (!prox)
wacom->id[1] = 0;
input_report_abs(input, ABS_MISC, wacom->id[1]);
input_event(input, EV_MSC, MSC_SERIAL, 0xf0);
+ retval = 1;
}
- retval = 1;
break;
}
exit:
@@ -494,10 +494,6 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
/* pad packets. Works as a second tool and is always in prox */
if (data[0] == WACOM_REPORT_INTUOSPAD) {
- /* initiate the pad as a device */
- if (wacom->tool[1] != BTN_TOOL_FINGER)
- wacom->tool[1] = BTN_TOOL_FINGER;
-
if (features->type >= INTUOS4S && features->type <= INTUOS4L) {
input_report_key(input, BTN_0, (data[2] & 0x01));
input_report_key(input, BTN_1, (data[3] & 0x01));
@@ -1080,18 +1076,14 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
switch (wacom_wac->features.type) {
case WACOM_MO:
- __set_bit(BTN_1, input_dev->keybit);
- __set_bit(BTN_5, input_dev->keybit);
-
input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
/* fall through */
case WACOM_G4:
input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
- __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
- __set_bit(BTN_0, input_dev->keybit);
- __set_bit(BTN_4, input_dev->keybit);
+ __set_bit(BTN_BACK, input_dev->keybit);
+ __set_bit(BTN_FORWARD, input_dev->keybit);
/* fall through */
case GRAPHIRE:
@@ -1127,10 +1119,12 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
case CINTIQ:
for (i = 0; i < 8; i++)
__set_bit(BTN_0 + i, input_dev->keybit);
- __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
- input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
- input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
+ if (wacom_wac->features.type != WACOM_21UX2) {
+ input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
+ input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
+ }
+
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
wacom_setup_cintiq(wacom_wac);
break;
@@ -1151,8 +1145,6 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_2, input_dev->keybit);
__set_bit(BTN_3, input_dev->keybit);
- __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
-
input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
/* fall through */
@@ -1170,7 +1162,6 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
case INTUOS4S:
for (i = 0; i < 7; i++)
__set_bit(BTN_0 + i, input_dev->keybit);
- __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
wacom_setup_intuos(wacom_wac);
@@ -1295,6 +1286,12 @@ static const struct wacom_features wacom_features_0x65 =
static const struct wacom_features wacom_features_0x69 =
{ "Wacom Bamboo1", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511,
63, GRAPHIRE, WACOM_PENPRTN_RES, WACOM_PENPRTN_RES };
+static const struct wacom_features wacom_features_0x6A =
+ { "Wacom Bamboo1 4x6", WACOM_PKGLEN_GRAPHIRE, 14760, 9225, 1023,
+ 63, GRAPHIRE, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x6B =
+ { "Wacom Bamboo1 5x8", WACOM_PKGLEN_GRAPHIRE, 21648, 13530, 1023,
+ 63, GRAPHIRE, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x20 =
{ "Wacom Intuos 4x5", WACOM_PKGLEN_INTUOS, 12700, 10600, 1023,
31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1427,6 +1424,9 @@ static const struct wacom_features wacom_features_0x90 =
static const struct wacom_features wacom_features_0x93 =
{ "Wacom ISDv4 93", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x97 =
+ { "Wacom ISDv4 97", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 511,
+ 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x9A =
{ "Wacom ISDv4 9A", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1458,7 +1458,10 @@ static const struct wacom_features wacom_features_0xD3 =
{ "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023,
63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xD4 =
- { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 255,
+ { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
+ 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xD5 =
+ { "Wacom Bamboo Pen 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023,
63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xD6 =
{ "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
@@ -1483,6 +1486,11 @@ static const struct wacom_features wacom_features_0x6004 =
USB_DEVICE(USB_VENDOR_ID_WACOM, prod), \
.driver_info = (kernel_ulong_t)&wacom_features_##prod
+#define USB_DEVICE_DETAILED(prod, class, sub, proto) \
+ USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_WACOM, prod, class, \
+ sub, proto), \
+ .driver_info = (kernel_ulong_t)&wacom_features_##prod
+
#define USB_DEVICE_LENOVO(prod) \
USB_DEVICE(USB_VENDOR_ID_LENOVO, prod), \
.driver_info = (kernel_ulong_t)&wacom_features_##prod
@@ -1506,6 +1514,8 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x64) },
{ USB_DEVICE_WACOM(0x65) },
{ USB_DEVICE_WACOM(0x69) },
+ { USB_DEVICE_WACOM(0x6A) },
+ { USB_DEVICE_WACOM(0x6B) },
{ USB_DEVICE_WACOM(0x20) },
{ USB_DEVICE_WACOM(0x21) },
{ USB_DEVICE_WACOM(0x22) },
@@ -1545,12 +1555,19 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xC5) },
{ USB_DEVICE_WACOM(0xC6) },
{ USB_DEVICE_WACOM(0xC7) },
- { USB_DEVICE_WACOM(0xCE) },
+ /*
+ * DTU-2231 has two interfaces on the same configuration,
+ * only one is used.
+ */
+ { USB_DEVICE_DETAILED(0xCE, USB_CLASS_HID,
+ USB_INTERFACE_SUBCLASS_BOOT,
+ USB_INTERFACE_PROTOCOL_MOUSE) },
{ USB_DEVICE_WACOM(0xD0) },
{ USB_DEVICE_WACOM(0xD1) },
{ USB_DEVICE_WACOM(0xD2) },
{ USB_DEVICE_WACOM(0xD3) },
{ USB_DEVICE_WACOM(0xD4) },
+ { USB_DEVICE_WACOM(0xD5) },
{ USB_DEVICE_WACOM(0xD6) },
{ USB_DEVICE_WACOM(0xD7) },
{ USB_DEVICE_WACOM(0xD8) },
@@ -1560,6 +1577,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xCC) },
{ USB_DEVICE_WACOM(0x90) },
{ USB_DEVICE_WACOM(0x93) },
+ { USB_DEVICE_WACOM(0x97) },
{ USB_DEVICE_WACOM(0x9A) },
{ USB_DEVICE_WACOM(0x9F) },
{ USB_DEVICE_WACOM(0xE2) },
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index bc3b518..131f9d1 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -249,12 +249,14 @@ static void __ad7879_enable(struct ad7879 *ts)
static void __ad7879_disable(struct ad7879 *ts)
{
+ u16 reg = (ts->cmd_crtl2 & ~AD7879_PM(-1)) |
+ AD7879_PM(AD7879_PM_SHUTDOWN);
disable_irq(ts->irq);
if (del_timer_sync(&ts->timer))
ad7879_ts_event_release(ts);
- ad7879_write(ts, AD7879_REG_CTRL2, AD7879_PM(AD7879_PM_SHUTDOWN));
+ ad7879_write(ts, AD7879_REG_CTRL2, reg);
}
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 5196861..d507b9b 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -967,17 +967,12 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784
ts->get_pendown_state = pdata->get_pendown_state;
} else if (gpio_is_valid(pdata->gpio_pendown)) {
- err = gpio_request(pdata->gpio_pendown, "ads7846_pendown");
+ err = gpio_request_one(pdata->gpio_pendown, GPIOF_IN,
+ "ads7846_pendown");
if (err) {
- dev_err(&spi->dev, "failed to request pendown GPIO%d\n",
- pdata->gpio_pendown);
- return err;
- }
- err = gpio_direction_input(pdata->gpio_pendown);
- if (err) {
- dev_err(&spi->dev, "failed to setup pendown GPIO%d\n",
- pdata->gpio_pendown);
- gpio_free(pdata->gpio_pendown);
+ dev_err(&spi->dev,
+ "failed to request/setup pendown GPIO%d: %d\n",
+ pdata->gpio_pendown, err);
return err;
}
diff --git a/drivers/input/touchscreen/atmel-wm97xx.c b/drivers/input/touchscreen/atmel-wm97xx.c
index fa8e56b..8034cbb 100644
--- a/drivers/input/touchscreen/atmel-wm97xx.c
+++ b/drivers/input/touchscreen/atmel-wm97xx.c
@@ -164,7 +164,7 @@ static irqreturn_t atmel_wm97xx_channel_b_interrupt(int irq, void *dev_id)
data = ac97c_readl(atmel_wm97xx, CBRHR);
value = data & 0x0fff;
- source = data & WM97XX_ADCSRC_MASK;
+ source = data & WM97XX_ADCSEL_MASK;
pen_down = (data & WM97XX_PEN_DOWN) >> 8;
if (source == WM97XX_ADCSEL_X)
@@ -442,6 +442,6 @@ static void __exit atmel_wm97xx_exit(void)
}
module_exit(atmel_wm97xx_exit);
-MODULE_AUTHOR("Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>");
+MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
MODULE_DESCRIPTION("wm97xx continuous touch driver for Atmel AT91 and AVR32");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 1e61387..f5d6685 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -48,41 +48,47 @@
#define MXT_OBJECT_SIZE 6
/* Object types */
-#define MXT_DEBUG_DIAGNOSTIC 37
-#define MXT_GEN_MESSAGE 5
-#define MXT_GEN_COMMAND 6
-#define MXT_GEN_POWER 7
-#define MXT_GEN_ACQUIRE 8
-#define MXT_TOUCH_MULTI 9
-#define MXT_TOUCH_KEYARRAY 15
-#define MXT_TOUCH_PROXIMITY 23
-#define MXT_PROCI_GRIPFACE 20
-#define MXT_PROCG_NOISE 22
-#define MXT_PROCI_ONETOUCH 24
-#define MXT_PROCI_TWOTOUCH 27
-#define MXT_PROCI_GRIP 40
-#define MXT_PROCI_PALM 41
-#define MXT_SPT_COMMSCONFIG 18
-#define MXT_SPT_GPIOPWM 19
-#define MXT_SPT_SELFTEST 25
-#define MXT_SPT_CTECONFIG 28
-#define MXT_SPT_USERDATA 38
-#define MXT_SPT_DIGITIZER 43
-#define MXT_SPT_MESSAGECOUNT 44
-
-/* MXT_GEN_COMMAND field */
+#define MXT_DEBUG_DIAGNOSTIC_T37 37
+#define MXT_GEN_MESSAGE_T5 5
+#define MXT_GEN_COMMAND_T6 6
+#define MXT_GEN_POWER_T7 7
+#define MXT_GEN_ACQUIRE_T8 8
+#define MXT_GEN_DATASOURCE_T53 53
+#define MXT_TOUCH_MULTI_T9 9
+#define MXT_TOUCH_KEYARRAY_T15 15
+#define MXT_TOUCH_PROXIMITY_T23 23
+#define MXT_TOUCH_PROXKEY_T52 52
+#define MXT_PROCI_GRIPFACE_T20 20
+#define MXT_PROCG_NOISE_T22 22
+#define MXT_PROCI_ONETOUCH_T24 24
+#define MXT_PROCI_TWOTOUCH_T27 27
+#define MXT_PROCI_GRIP_T40 40
+#define MXT_PROCI_PALM_T41 41
+#define MXT_PROCI_TOUCHSUPPRESSION_T42 42
+#define MXT_PROCI_STYLUS_T47 47
+#define MXT_PROCG_NOISESUPPRESSION_T48 48
+#define MXT_SPT_COMMSCONFIG_T18 18
+#define MXT_SPT_GPIOPWM_T19 19
+#define MXT_SPT_SELFTEST_T25 25
+#define MXT_SPT_CTECONFIG_T28 28
+#define MXT_SPT_USERDATA_T38 38
+#define MXT_SPT_DIGITIZER_T43 43
+#define MXT_SPT_MESSAGECOUNT_T44 44
+#define MXT_SPT_CTECONFIG_T46 46
+
+/* MXT_GEN_COMMAND_T6 field */
#define MXT_COMMAND_RESET 0
#define MXT_COMMAND_BACKUPNV 1
#define MXT_COMMAND_CALIBRATE 2
#define MXT_COMMAND_REPORTALL 3
#define MXT_COMMAND_DIAGNOSTIC 5
-/* MXT_GEN_POWER field */
+/* MXT_GEN_POWER_T7 field */
#define MXT_POWER_IDLEACQINT 0
#define MXT_POWER_ACTVACQINT 1
#define MXT_POWER_ACTV2IDLETO 2
-/* MXT_GEN_ACQUIRE field */
+/* MXT_GEN_ACQUIRE_T8 field */
#define MXT_ACQUIRE_CHRGTIME 0
#define MXT_ACQUIRE_TCHDRIFT 2
#define MXT_ACQUIRE_DRIFTST 3
@@ -91,7 +97,7 @@
#define MXT_ACQUIRE_ATCHCALST 6
#define MXT_ACQUIRE_ATCHCALSTHR 7
-/* MXT_TOUCH_MULTI field */
+/* MXT_TOUCH_MULTI_T9 field */
#define MXT_TOUCH_CTRL 0
#define MXT_TOUCH_XORIGIN 1
#define MXT_TOUCH_YORIGIN 2
@@ -121,7 +127,7 @@
#define MXT_TOUCH_YEDGEDIST 29
#define MXT_TOUCH_JUMPLIMIT 30
-/* MXT_PROCI_GRIPFACE field */
+/* MXT_PROCI_GRIPFACE_T20 field */
#define MXT_GRIPFACE_CTRL 0
#define MXT_GRIPFACE_XLOGRIP 1
#define MXT_GRIPFACE_XHIGRIP 2
@@ -151,11 +157,11 @@
#define MXT_NOISE_FREQ4 15
#define MXT_NOISE_IDLEGCAFVALID 16
-/* MXT_SPT_COMMSCONFIG */
+/* MXT_SPT_COMMSCONFIG_T18 */
#define MXT_COMMS_CTRL 0
#define MXT_COMMS_CMD 1
-/* MXT_SPT_CTECONFIG field */
+/* MXT_SPT_CTECONFIG_T28 field */
#define MXT_CTE_CTRL 0
#define MXT_CTE_CMD 1
#define MXT_CTE_MODE 2
@@ -166,7 +172,7 @@
#define MXT_VOLTAGE_DEFAULT 2700000
#define MXT_VOLTAGE_STEP 10000
-/* Define for MXT_GEN_COMMAND */
+/* Define for MXT_GEN_COMMAND_T6 */
#define MXT_BOOT_VALUE 0xa5
#define MXT_BACKUP_VALUE 0x55
#define MXT_BACKUP_TIME 25 /* msec */
@@ -238,6 +244,7 @@ struct mxt_finger {
int x;
int y;
int area;
+ int pressure;
};
/* Each client has this additional data */
@@ -256,24 +263,31 @@ struct mxt_data {
static bool mxt_object_readable(unsigned int type)
{
switch (type) {
- case MXT_GEN_MESSAGE:
- case MXT_GEN_COMMAND:
- case MXT_GEN_POWER:
- case MXT_GEN_ACQUIRE:
- case MXT_TOUCH_MULTI:
- case MXT_TOUCH_KEYARRAY:
- case MXT_TOUCH_PROXIMITY:
- case MXT_PROCI_GRIPFACE:
- case MXT_PROCG_NOISE:
- case MXT_PROCI_ONETOUCH:
- case MXT_PROCI_TWOTOUCH:
- case MXT_PROCI_GRIP:
- case MXT_PROCI_PALM:
- case MXT_SPT_COMMSCONFIG:
- case MXT_SPT_GPIOPWM:
- case MXT_SPT_SELFTEST:
- case MXT_SPT_CTECONFIG:
- case MXT_SPT_USERDATA:
+ case MXT_GEN_MESSAGE_T5:
+ case MXT_GEN_COMMAND_T6:
+ case MXT_GEN_POWER_T7:
+ case MXT_GEN_ACQUIRE_T8:
+ case MXT_GEN_DATASOURCE_T53:
+ case MXT_TOUCH_MULTI_T9:
+ case MXT_TOUCH_KEYARRAY_T15:
+ case MXT_TOUCH_PROXIMITY_T23:
+ case MXT_TOUCH_PROXKEY_T52:
+ case MXT_PROCI_GRIPFACE_T20:
+ case MXT_PROCG_NOISE_T22:
+ case MXT_PROCI_ONETOUCH_T24:
+ case MXT_PROCI_TWOTOUCH_T27:
+ case MXT_PROCI_GRIP_T40:
+ case MXT_PROCI_PALM_T41:
+ case MXT_PROCI_TOUCHSUPPRESSION_T42:
+ case MXT_PROCI_STYLUS_T47:
+ case MXT_PROCG_NOISESUPPRESSION_T48:
+ case MXT_SPT_COMMSCONFIG_T18:
+ case MXT_SPT_GPIOPWM_T19:
+ case MXT_SPT_SELFTEST_T25:
+ case MXT_SPT_CTECONFIG_T28:
+ case MXT_SPT_USERDATA_T38:
+ case MXT_SPT_DIGITIZER_T43:
+ case MXT_SPT_CTECONFIG_T46:
return true;
default:
return false;
@@ -283,21 +297,28 @@ static bool mxt_object_readable(unsigned int type)
static bool mxt_object_writable(unsigned int type)
{
switch (type) {
- case MXT_GEN_COMMAND:
- case MXT_GEN_POWER:
- case MXT_GEN_ACQUIRE:
- case MXT_TOUCH_MULTI:
- case MXT_TOUCH_KEYARRAY:
- case MXT_TOUCH_PROXIMITY:
- case MXT_PROCI_GRIPFACE:
- case MXT_PROCG_NOISE:
- case MXT_PROCI_ONETOUCH:
- case MXT_PROCI_TWOTOUCH:
- case MXT_PROCI_GRIP:
- case MXT_PROCI_PALM:
- case MXT_SPT_GPIOPWM:
- case MXT_SPT_SELFTEST:
- case MXT_SPT_CTECONFIG:
+ case MXT_GEN_COMMAND_T6:
+ case MXT_GEN_POWER_T7:
+ case MXT_GEN_ACQUIRE_T8:
+ case MXT_TOUCH_MULTI_T9:
+ case MXT_TOUCH_KEYARRAY_T15:
+ case MXT_TOUCH_PROXIMITY_T23:
+ case MXT_TOUCH_PROXKEY_T52:
+ case MXT_PROCI_GRIPFACE_T20:
+ case MXT_PROCG_NOISE_T22:
+ case MXT_PROCI_ONETOUCH_T24:
+ case MXT_PROCI_TWOTOUCH_T27:
+ case MXT_PROCI_GRIP_T40:
+ case MXT_PROCI_PALM_T41:
+ case MXT_PROCI_TOUCHSUPPRESSION_T42:
+ case MXT_PROCI_STYLUS_T47:
+ case MXT_PROCG_NOISESUPPRESSION_T48:
+ case MXT_SPT_COMMSCONFIG_T18:
+ case MXT_SPT_GPIOPWM_T19:
+ case MXT_SPT_SELFTEST_T25:
+ case MXT_SPT_CTECONFIG_T28:
+ case MXT_SPT_DIGITIZER_T43:
+ case MXT_SPT_CTECONFIG_T46:
return true;
default:
return false;
@@ -455,7 +476,7 @@ static int mxt_read_message(struct mxt_data *data,
struct mxt_object *object;
u16 reg;
- object = mxt_get_object(data, MXT_GEN_MESSAGE);
+ object = mxt_get_object(data, MXT_GEN_MESSAGE_T5);
if (!object)
return -EINVAL;
@@ -516,6 +537,8 @@ static void mxt_input_report(struct mxt_data *data, int single_id)
finger[id].x);
input_report_abs(input_dev, ABS_MT_POSITION_Y,
finger[id].y);
+ input_report_abs(input_dev, ABS_MT_PRESSURE,
+ finger[id].pressure);
} else {
finger[id].status = 0;
}
@@ -526,6 +549,8 @@ static void mxt_input_report(struct mxt_data *data, int single_id)
if (status != MXT_RELEASE) {
input_report_abs(input_dev, ABS_X, finger[single_id].x);
input_report_abs(input_dev, ABS_Y, finger[single_id].y);
+ input_report_abs(input_dev,
+ ABS_PRESSURE, finger[single_id].pressure);
}
input_sync(input_dev);
@@ -540,6 +565,7 @@ static void mxt_input_touchevent(struct mxt_data *data,
int x;
int y;
int area;
+ int pressure;
/* Check the touch is present on the screen */
if (!(status & MXT_DETECT)) {
@@ -564,6 +590,7 @@ static void mxt_input_touchevent(struct mxt_data *data,
y = y >> 2;
area = message->message[4];
+ pressure = message->message[5];
dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id,
status & MXT_MOVE ? "moved" : "pressed",
@@ -574,6 +601,7 @@ static void mxt_input_touchevent(struct mxt_data *data,
finger[id].x = x;
finger[id].y = y;
finger[id].area = area;
+ finger[id].pressure = pressure;
mxt_input_report(data, id);
}
@@ -597,8 +625,8 @@ static irqreturn_t mxt_interrupt(int irq, void *dev_id)
reportid = message.reportid;
- /* whether reportid is thing of MXT_TOUCH_MULTI */
- object = mxt_get_object(data, MXT_TOUCH_MULTI);
+ /* whether reportid is thing of MXT_TOUCH_MULTI_T9 */
+ object = mxt_get_object(data, MXT_TOUCH_MULTI_T9);
if (!object)
goto end;
@@ -635,7 +663,9 @@ static int mxt_check_reg_init(struct mxt_data *data)
if (!mxt_object_writable(object->type))
continue;
- for (j = 0; j < object->size + 1; j++) {
+ for (j = 0;
+ j < (object->size + 1) * (object->instances + 1);
+ j++) {
config_offset = index + j;
if (config_offset > pdata->config_length) {
dev_err(dev, "Not enough config data!\n");
@@ -644,7 +674,7 @@ static int mxt_check_reg_init(struct mxt_data *data)
mxt_write_object(data, object->type, j,
pdata->config[config_offset]);
}
- index += object->size + 1;
+ index += (object->size + 1) * (object->instances + 1);
}
return 0;
@@ -678,31 +708,31 @@ static void mxt_handle_pdata(struct mxt_data *data)
u8 voltage;
/* Set touchscreen lines */
- mxt_write_object(data, MXT_TOUCH_MULTI, MXT_TOUCH_XSIZE,
+ mxt_write_object(data, MXT_TOUCH_MULTI_T9, MXT_TOUCH_XSIZE,
pdata->x_line);
- mxt_write_object(data, MXT_TOUCH_MULTI, MXT_TOUCH_YSIZE,
+ mxt_write_object(data, MXT_TOUCH_MULTI_T9, MXT_TOUCH_YSIZE,
pdata->y_line);
/* Set touchscreen orient */
- mxt_write_object(data, MXT_TOUCH_MULTI, MXT_TOUCH_ORIENT,
+ mxt_write_object(data, MXT_TOUCH_MULTI_T9, MXT_TOUCH_ORIENT,
pdata->orient);
/* Set touchscreen burst length */
- mxt_write_object(data, MXT_TOUCH_MULTI,
+ mxt_write_object(data, MXT_TOUCH_MULTI_T9,
MXT_TOUCH_BLEN, pdata->blen);
/* Set touchscreen threshold */
- mxt_write_object(data, MXT_TOUCH_MULTI,
+ mxt_write_object(data, MXT_TOUCH_MULTI_T9,
MXT_TOUCH_TCHTHR, pdata->threshold);
/* Set touchscreen resolution */
- mxt_write_object(data, MXT_TOUCH_MULTI,
+ mxt_write_object(data, MXT_TOUCH_MULTI_T9,
MXT_TOUCH_XRANGE_LSB, (pdata->x_size - 1) & 0xff);
- mxt_write_object(data, MXT_TOUCH_MULTI,
+ mxt_write_object(data, MXT_TOUCH_MULTI_T9,
MXT_TOUCH_XRANGE_MSB, (pdata->x_size - 1) >> 8);
- mxt_write_object(data, MXT_TOUCH_MULTI,
+ mxt_write_object(data, MXT_TOUCH_MULTI_T9,
MXT_TOUCH_YRANGE_LSB, (pdata->y_size - 1) & 0xff);
- mxt_write_object(data, MXT_TOUCH_MULTI,
+ mxt_write_object(data, MXT_TOUCH_MULTI_T9,
MXT_TOUCH_YRANGE_MSB, (pdata->y_size - 1) >> 8);
/* Set touchscreen voltage */
@@ -715,7 +745,7 @@ static void mxt_handle_pdata(struct mxt_data *data)
voltage = (pdata->voltage - MXT_VOLTAGE_DEFAULT) /
MXT_VOLTAGE_STEP;
- mxt_write_object(data, MXT_SPT_CTECONFIG,
+ mxt_write_object(data, MXT_SPT_CTECONFIG_T28,
MXT_CTE_VOLTAGE, voltage);
}
}
@@ -819,13 +849,13 @@ static int mxt_initialize(struct mxt_data *data)
mxt_handle_pdata(data);
/* Backup to memory */
- mxt_write_object(data, MXT_GEN_COMMAND,
+ mxt_write_object(data, MXT_GEN_COMMAND_T6,
MXT_COMMAND_BACKUPNV,
MXT_BACKUP_VALUE);
msleep(MXT_BACKUP_TIME);
/* Soft reset */
- mxt_write_object(data, MXT_GEN_COMMAND,
+ mxt_write_object(data, MXT_GEN_COMMAND_T6,
MXT_COMMAND_RESET, 1);
msleep(MXT_RESET_TIME);
@@ -921,7 +951,7 @@ static int mxt_load_fw(struct device *dev, const char *fn)
}
/* Change to the bootloader mode */
- mxt_write_object(data, MXT_GEN_COMMAND,
+ mxt_write_object(data, MXT_GEN_COMMAND_T6,
MXT_COMMAND_RESET, MXT_BOOT_VALUE);
msleep(MXT_RESET_TIME);
@@ -1027,14 +1057,14 @@ static void mxt_start(struct mxt_data *data)
{
/* Touch enable */
mxt_write_object(data,
- MXT_TOUCH_MULTI, MXT_TOUCH_CTRL, 0x83);
+ MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0x83);
}
static void mxt_stop(struct mxt_data *data)
{
/* Touch disable */
mxt_write_object(data,
- MXT_TOUCH_MULTI, MXT_TOUCH_CTRL, 0);
+ MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0);
}
static int mxt_input_open(struct input_dev *dev)
@@ -1094,6 +1124,8 @@ static int __devinit mxt_probe(struct i2c_client *client,
0, data->max_x, 0, 0);
input_set_abs_params(input_dev, ABS_Y,
0, data->max_y, 0, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE,
+ 0, 255, 0, 0);
/* For multi touch */
input_mt_init_slots(input_dev, MXT_MAX_FINGER);
@@ -1103,6 +1135,8 @@ static int __devinit mxt_probe(struct i2c_client *client,
0, data->max_x, 0, 0);
input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
0, data->max_y, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_PRESSURE,
+ 0, 255, 0, 0);
input_set_drvdata(input_dev, data);
i2c_set_clientdata(client, data);
@@ -1182,7 +1216,7 @@ static int mxt_resume(struct device *dev)
struct input_dev *input_dev = data->input_dev;
/* Soft reset */
- mxt_write_object(data, MXT_GEN_COMMAND,
+ mxt_write_object(data, MXT_GEN_COMMAND_T6,
MXT_COMMAND_RESET, 1);
msleep(MXT_RESET_TIME);
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index a93c5c2..d8815c5 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -84,9 +84,9 @@ static int cy8ctmg110_write_regs(struct cy8ctmg110 *tsc, unsigned char reg,
memcpy(i2c_data + 1, value, len);
ret = i2c_master_send(client, i2c_data, len + 1);
- if (ret != 1) {
+ if (ret != len + 1) {
dev_err(&client->dev, "i2c write data cmd failed\n");
- return ret ? ret : -EIO;
+ return ret < 0 ? ret : -EIO;
}
return 0;
@@ -193,6 +193,8 @@ static int __devinit cy8ctmg110_probe(struct i2c_client *client,
ts->client = client;
ts->input = input_dev;
+ ts->reset_pin = pdata->reset_pin;
+ ts->irq_pin = pdata->irq_pin;
snprintf(ts->phys, sizeof(ts->phys),
"%s/input0", dev_name(&client->dev));
@@ -328,7 +330,7 @@ static int __devexit cy8ctmg110_remove(struct i2c_client *client)
return 0;
}
-static struct i2c_device_id cy8ctmg110_idtable[] = {
+static const struct i2c_device_id cy8ctmg110_idtable[] = {
{ CY8CTMG110_DRIVER_NAME, 1 },
{ }
};
diff --git a/drivers/input/touchscreen/intel-mid-touch.c b/drivers/input/touchscreen/intel-mid-touch.c
index 66c96bf..3276952 100644
--- a/drivers/input/touchscreen/intel-mid-touch.c
+++ b/drivers/input/touchscreen/intel-mid-touch.c
@@ -448,15 +448,11 @@ static int __devinit mrstouch_read_pmic_id(uint *vendor, uint *rev)
*/
static int __devinit mrstouch_chan_parse(struct mrstouch_dev *tsdev)
{
- int err, i, found;
+ int found = 0;
+ int err, i;
u8 r8;
- found = -1;
-
for (i = 0; i < MRSTOUCH_MAX_CHANNELS; i++) {
- if (found >= 0)
- break;
-
err = intel_scu_ipc_ioread8(PMICADDR0 + i, &r8);
if (err)
return err;
@@ -466,16 +462,15 @@ static int __devinit mrstouch_chan_parse(struct mrstouch_dev *tsdev)
break;
}
}
- if (found < 0)
- return 0;
if (tsdev->vendor == PMIC_VENDOR_FS) {
- if (found && found > (MRSTOUCH_MAX_CHANNELS - 18))
+ if (found > MRSTOUCH_MAX_CHANNELS - 18)
return -ENOSPC;
} else {
- if (found && found > (MRSTOUCH_MAX_CHANNELS - 4))
+ if (found > MRSTOUCH_MAX_CHANNELS - 4)
return -ENOSPC;
}
+
return found;
}
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c
index 3242e70..e966c29 100644
--- a/drivers/input/touchscreen/mainstone-wm97xx.c
+++ b/drivers/input/touchscreen/mainstone-wm97xx.c
@@ -157,9 +157,9 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm)
x, y, p);
/* are samples valid */
- if ((x & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_X ||
- (y & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_Y ||
- (p & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_PRES)
+ if ((x & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_X ||
+ (y & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_Y ||
+ (p & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_PRES)
goto up;
/* coordinate is good */
diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c
index 4f2713d..4627fe5 100644
--- a/drivers/input/touchscreen/max11801_ts.c
+++ b/drivers/input/touchscreen/max11801_ts.c
@@ -9,7 +9,8 @@
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
*/
/*
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
index 22a3411..0e8f63e 100644
--- a/drivers/input/touchscreen/tnetv107x-ts.c
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -13,6 +13,7 @@
* GNU General Public License for more details.
*/
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -393,5 +394,5 @@ module_exit(tsc_exit);
MODULE_AUTHOR("Cyril Chemparathy");
MODULE_DESCRIPTION("TNETV107X Touchscreen Driver");
-MODULE_ALIAS("platform: tnetv107x-ts");
+MODULE_ALIAS("platform:tnetv107x-ts");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/wm9705.c b/drivers/input/touchscreen/wm9705.c
index 98e6117..adc13a5 100644
--- a/drivers/input/touchscreen/wm9705.c
+++ b/drivers/input/touchscreen/wm9705.c
@@ -215,8 +215,9 @@ static inline int is_pden(struct wm97xx *wm)
static int wm9705_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
{
int timeout = 5 * delay;
+ bool wants_pen = adcsel & WM97XX_PEN_DOWN;
- if (!wm->pen_probably_down) {
+ if (wants_pen && !wm->pen_probably_down) {
u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
if (!(data & WM97XX_PEN_DOWN))
return RC_PENUP;
@@ -224,13 +225,10 @@ static int wm9705_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
}
/* set up digitiser */
- if (adcsel & 0x8000)
- adcsel = ((adcsel & 0x7fff) + 3) << 12;
-
if (wm->mach_ops && wm->mach_ops->pre_sample)
wm->mach_ops->pre_sample(adcsel);
- wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1,
- adcsel | WM97XX_POLL | WM97XX_DELAY(delay));
+ wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, (adcsel & WM97XX_ADCSEL_MASK)
+ | WM97XX_POLL | WM97XX_DELAY(delay));
/* wait 3 AC97 time slots + delay for conversion */
poll_delay(delay);
@@ -256,13 +254,14 @@ static int wm9705_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
wm->mach_ops->post_sample(adcsel);
/* check we have correct sample */
- if ((*sample & WM97XX_ADCSEL_MASK) != adcsel) {
- dev_dbg(wm->dev, "adc wrong sample, read %x got %x", adcsel,
- *sample & WM97XX_ADCSEL_MASK);
+ if ((*sample ^ adcsel) & WM97XX_ADCSEL_MASK) {
+ dev_dbg(wm->dev, "adc wrong sample, wanted %x got %x",
+ adcsel & WM97XX_ADCSEL_MASK,
+ *sample & WM97XX_ADCSEL_MASK);
return RC_PENUP;
}
- if (!(*sample & WM97XX_PEN_DOWN)) {
+ if (wants_pen && !(*sample & WM97XX_PEN_DOWN)) {
wm->pen_probably_down = 0;
return RC_PENUP;
}
@@ -277,14 +276,14 @@ static int wm9705_poll_touch(struct wm97xx *wm, struct wm97xx_data *data)
{
int rc;
- rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_X, &data->x);
+ rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_X | WM97XX_PEN_DOWN, &data->x);
if (rc != RC_VALID)
return rc;
- rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_Y, &data->y);
+ rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_Y | WM97XX_PEN_DOWN, &data->y);
if (rc != RC_VALID)
return rc;
if (pil) {
- rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_PRES, &data->p);
+ rc = wm9705_poll_sample(wm, WM97XX_ADCSEL_PRES | WM97XX_PEN_DOWN, &data->p);
if (rc != RC_VALID)
return rc;
} else
diff --git a/drivers/input/touchscreen/wm9712.c b/drivers/input/touchscreen/wm9712.c
index 2bc2fb8..6e743e3 100644
--- a/drivers/input/touchscreen/wm9712.c
+++ b/drivers/input/touchscreen/wm9712.c
@@ -255,8 +255,9 @@ static inline int is_pden(struct wm97xx *wm)
static int wm9712_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
{
int timeout = 5 * delay;
+ bool wants_pen = adcsel & WM97XX_PEN_DOWN;
- if (!wm->pen_probably_down) {
+ if (wants_pen && !wm->pen_probably_down) {
u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
if (!(data & WM97XX_PEN_DOWN))
return RC_PENUP;
@@ -264,13 +265,10 @@ static int wm9712_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
}
/* set up digitiser */
- if (adcsel & 0x8000)
- adcsel = ((adcsel & 0x7fff) + 3) << 12;
-
if (wm->mach_ops && wm->mach_ops->pre_sample)
wm->mach_ops->pre_sample(adcsel);
- wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1,
- adcsel | WM97XX_POLL | WM97XX_DELAY(delay));
+ wm97xx_reg_write(wm, AC97_WM97XX_DIGITISER1, (adcsel & WM97XX_ADCSEL_MASK)
+ | WM97XX_POLL | WM97XX_DELAY(delay));
/* wait 3 AC97 time slots + delay for conversion */
poll_delay(delay);
@@ -296,13 +294,14 @@ static int wm9712_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
wm->mach_ops->post_sample(adcsel);
/* check we have correct sample */
- if ((*sample & WM97XX_ADCSEL_MASK) != adcsel) {
- dev_dbg(wm->dev, "adc wrong sample, read %x got %x", adcsel,
- *sample & WM97XX_ADCSEL_MASK);
+ if ((*sample ^ adcsel) & WM97XX_ADCSEL_MASK) {
+ dev_dbg(wm->dev, "adc wrong sample, wanted %x got %x",
+ adcsel & WM97XX_ADCSEL_MASK,
+ *sample & WM97XX_ADCSEL_MASK);
return RC_PENUP;
}
- if (!(*sample & WM97XX_PEN_DOWN)) {
+ if (wants_pen && !(*sample & WM97XX_PEN_DOWN)) {
wm->pen_probably_down = 0;
return RC_PENUP;
}
@@ -387,16 +386,18 @@ static int wm9712_poll_touch(struct wm97xx *wm, struct wm97xx_data *data)
if (rc != RC_VALID)
return rc;
} else {
- rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_X, &data->x);
+ rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_X | WM97XX_PEN_DOWN,
+ &data->x);
if (rc != RC_VALID)
return rc;
- rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_Y, &data->y);
+ rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_Y | WM97XX_PEN_DOWN,
+ &data->y);
if (rc != RC_VALID)
return rc;
if (pil && !five_wire) {
- rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_PRES,
+ rc = wm9712_poll_sample(wm, WM97XX_ADCSEL_PRES | WM97XX_PEN_DOWN,
&data->p);
if (rc != RC_VALID)
return rc;
diff --git a/drivers/input/touchscreen/wm9713.c b/drivers/input/touchscreen/wm9713.c
index 73ec995..7405353 100644
--- a/drivers/input/touchscreen/wm9713.c
+++ b/drivers/input/touchscreen/wm9713.c
@@ -261,8 +261,9 @@ static int wm9713_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
{
u16 dig1;
int timeout = 5 * delay;
+ bool wants_pen = adcsel & WM97XX_PEN_DOWN;
- if (!wm->pen_probably_down) {
+ if (wants_pen && !wm->pen_probably_down) {
u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD);
if (!(data & WM97XX_PEN_DOWN))
return RC_PENUP;
@@ -270,15 +271,14 @@ static int wm9713_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
}
/* set up digitiser */
- if (adcsel & 0x8000)
- adcsel = 1 << ((adcsel & 0x7fff) + 3);
-
dig1 = wm97xx_reg_read(wm, AC97_WM9713_DIG1);
dig1 &= ~WM9713_ADCSEL_MASK;
+ /* WM97XX_ADCSEL_* channels need to be converted to WM9713 format */
+ dig1 |= 1 << ((adcsel & WM97XX_ADCSEL_MASK) >> 12);
if (wm->mach_ops && wm->mach_ops->pre_sample)
wm->mach_ops->pre_sample(adcsel);
- wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1 | adcsel | WM9713_POLL);
+ wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1 | WM9713_POLL);
/* wait 3 AC97 time slots + delay for conversion */
poll_delay(delay);
@@ -304,13 +304,14 @@ static int wm9713_poll_sample(struct wm97xx *wm, int adcsel, int *sample)
wm->mach_ops->post_sample(adcsel);
/* check we have correct sample */
- if ((*sample & WM97XX_ADCSRC_MASK) != ffs(adcsel >> 1) << 12) {
- dev_dbg(wm->dev, "adc wrong sample, read %x got %x", adcsel,
- *sample & WM97XX_ADCSRC_MASK);
+ if ((*sample ^ adcsel) & WM97XX_ADCSEL_MASK) {
+ dev_dbg(wm->dev, "adc wrong sample, wanted %x got %x",
+ adcsel & WM97XX_ADCSEL_MASK,
+ *sample & WM97XX_ADCSEL_MASK);
return RC_PENUP;
}
- if (!(*sample & WM97XX_PEN_DOWN)) {
+ if (wants_pen && !(*sample & WM97XX_PEN_DOWN)) {
wm->pen_probably_down = 0;
return RC_PENUP;
}
@@ -400,14 +401,14 @@ static int wm9713_poll_touch(struct wm97xx *wm, struct wm97xx_data *data)
if (rc != RC_VALID)
return rc;
} else {
- rc = wm9713_poll_sample(wm, WM9713_ADCSEL_X, &data->x);
+ rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_X | WM97XX_PEN_DOWN, &data->x);
if (rc != RC_VALID)
return rc;
- rc = wm9713_poll_sample(wm, WM9713_ADCSEL_Y, &data->y);
+ rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_Y | WM97XX_PEN_DOWN, &data->y);
if (rc != RC_VALID)
return rc;
if (pil) {
- rc = wm9713_poll_sample(wm, WM9713_ADCSEL_PRES,
+ rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_PRES | WM97XX_PEN_DOWN,
&data->p);
if (rc != RC_VALID)
return rc;
diff --git a/drivers/input/touchscreen/zylonite-wm97xx.c b/drivers/input/touchscreen/zylonite-wm97xx.c
index 5b0f15e..f6328c0 100644
--- a/drivers/input/touchscreen/zylonite-wm97xx.c
+++ b/drivers/input/touchscreen/zylonite-wm97xx.c
@@ -122,9 +122,9 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm)
x, y, p);
/* are samples valid */
- if ((x & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_X ||
- (y & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_Y ||
- (p & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_PRES)
+ if ((x & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_X ||
+ (y & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_Y ||
+ (p & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_PRES)
goto up;
/* coordinate is good */
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a14f8dc..0e4227f 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -605,7 +605,9 @@ static void build_inv_all(struct iommu_cmd *cmd)
* Writes the command to the IOMMUs command buffer and informs the
* hardware about the new command.
*/
-static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
+static int iommu_queue_command_sync(struct amd_iommu *iommu,
+ struct iommu_cmd *cmd,
+ bool sync)
{
u32 left, tail, head, next_tail;
unsigned long flags;
@@ -639,13 +641,18 @@ again:
copy_cmd_to_buffer(iommu, cmd, tail);
/* We need to sync now to make sure all commands are processed */
- iommu->need_sync = true;
+ iommu->need_sync = sync;
spin_unlock_irqrestore(&iommu->lock, flags);
return 0;
}
+static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
+{
+ return iommu_queue_command_sync(iommu, cmd, true);
+}
+
/*
* This function queues a completion wait command into the command
* buffer of an IOMMU
@@ -661,7 +668,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
build_completion_wait(&cmd, (u64)&sem);
- ret = iommu_queue_command(iommu, &cmd);
+ ret = iommu_queue_command_sync(iommu, &cmd, false);
if (ret)
return ret;
@@ -840,14 +847,9 @@ static void domain_flush_complete(struct protection_domain *domain)
static void domain_flush_devices(struct protection_domain *domain)
{
struct iommu_dev_data *dev_data;
- unsigned long flags;
-
- spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(dev_data, &domain->dev_list, list)
device_flush_dte(dev_data);
-
- spin_unlock_irqrestore(&domain->lock, flags);
}
/****************************************************************************
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index 6dd3607..212efaf 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -34,7 +34,7 @@
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/list.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define GIG_VERSION {0, 5, 0, 0}
#define GIG_COMPAT {0, 4, 0, 0}
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 48e9cc0..1f73d7f 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -2532,6 +2532,9 @@ static void _isdn_setup(struct net_device *dev)
/* Setup the generic properties */
dev->flags = IFF_NOARP|IFF_POINTOPOINT;
+
+ /* isdn prepends a header in the tx path, can't share skbs */
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->header_ops = NULL;
dev->netdev_ops = &isdn_netdev_ops;
diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c
index b982603..8c00937 100644
--- a/drivers/leds/leds-ams-delta.c
+++ b/drivers/leds/leds-ams-delta.c
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 3ebe382..ea21855 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -662,6 +662,11 @@ failed_unregister_led1_R:
static void bd2802_unregister_led_classdev(struct bd2802_led *led)
{
cancel_work_sync(&led->work);
+ led_classdev_unregister(&led->cdev_led2b);
+ led_classdev_unregister(&led->cdev_led2g);
+ led_classdev_unregister(&led->cdev_led2r);
+ led_classdev_unregister(&led->cdev_led1b);
+ led_classdev_unregister(&led->cdev_led1g);
led_classdev_unregister(&led->cdev_led1r);
}
diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c
index e4ce1fd..bcfbd3a 100644
--- a/drivers/leds/leds-hp6xx.c
+++ b/drivers/leds/leds-hp6xx.c
@@ -10,6 +10,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index d87c9d0..328c64c 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -41,6 +41,7 @@ static ssize_t led_delay_on_store(struct device *dev,
if (count == size) {
led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
+ led_cdev->blink_delay_on = state;
ret = count;
}
@@ -69,6 +70,7 @@ static ssize_t led_delay_off_store(struct device *dev,
if (count == size) {
led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
+ led_cdev->blink_delay_off = state;
ret = count;
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 8420129..f75a66e 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -241,12 +241,13 @@ config DM_MIRROR
needed for live data migration tools such as 'pvmove'.
config DM_RAID
- tristate "RAID 4/5/6 target (EXPERIMENTAL)"
+ tristate "RAID 1/4/5/6 target (EXPERIMENTAL)"
depends on BLK_DEV_DM && EXPERIMENTAL
+ select MD_RAID1
select MD_RAID456
select BLK_DEV_MD
---help---
- A dm target that supports RAID4, RAID5 and RAID6 mappings
+ A dm target that supports RAID1, RAID4, RAID5 and RAID6 mappings
A RAID-5 set of N drives with a capacity of C MB per drive provides
the capacity of C * (N - 1) MB, and protects against a failure
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 574b09a..0dc6546 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -29,7 +29,6 @@
#include "md.h"
#include "bitmap.h"
-#include <linux/dm-dirty-log.h>
/* debug macros */
#define DEBUG 0
@@ -775,10 +774,8 @@ static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned lon
* 0 or page 1
*/
static inline struct page *filemap_get_page(struct bitmap *bitmap,
- unsigned long chunk)
+ unsigned long chunk)
{
- if (bitmap->filemap == NULL)
- return NULL;
if (file_page_index(bitmap, chunk) >= bitmap->file_pages)
return NULL;
return bitmap->filemap[file_page_index(bitmap, chunk)
@@ -878,28 +875,19 @@ enum bitmap_page_attr {
static inline void set_page_attr(struct bitmap *bitmap, struct page *page,
enum bitmap_page_attr attr)
{
- if (page)
- __set_bit((page->index<<2) + attr, bitmap->filemap_attr);
- else
- __set_bit(attr, &bitmap->logattrs);
+ __set_bit((page->index<<2) + attr, bitmap->filemap_attr);
}
static inline void clear_page_attr(struct bitmap *bitmap, struct page *page,
enum bitmap_page_attr attr)
{
- if (page)
- __clear_bit((page->index<<2) + attr, bitmap->filemap_attr);
- else
- __clear_bit(attr, &bitmap->logattrs);
+ __clear_bit((page->index<<2) + attr, bitmap->filemap_attr);
}
static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page,
enum bitmap_page_attr attr)
{
- if (page)
- return test_bit((page->index<<2) + attr, bitmap->filemap_attr);
- else
- return test_bit(attr, &bitmap->logattrs);
+ return test_bit((page->index<<2) + attr, bitmap->filemap_attr);
}
/*
@@ -912,30 +900,26 @@ static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *p
static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
{
unsigned long bit;
- struct page *page = NULL;
+ struct page *page;
void *kaddr;
unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap);
- if (!bitmap->filemap) {
- struct dm_dirty_log *log = bitmap->mddev->bitmap_info.log;
- if (log)
- log->type->mark_region(log, chunk);
- } else {
+ if (!bitmap->filemap)
+ return;
- page = filemap_get_page(bitmap, chunk);
- if (!page)
- return;
- bit = file_page_offset(bitmap, chunk);
+ page = filemap_get_page(bitmap, chunk);
+ if (!page)
+ return;
+ bit = file_page_offset(bitmap, chunk);
- /* set the bit */
- kaddr = kmap_atomic(page, KM_USER0);
- if (bitmap->flags & BITMAP_HOSTENDIAN)
- set_bit(bit, kaddr);
- else
- __test_and_set_bit_le(bit, kaddr);
- kunmap_atomic(kaddr, KM_USER0);
- PRINTK("set file bit %lu page %lu\n", bit, page->index);
- }
+ /* set the bit */
+ kaddr = kmap_atomic(page, KM_USER0);
+ if (bitmap->flags & BITMAP_HOSTENDIAN)
+ set_bit(bit, kaddr);
+ else
+ __set_bit_le(bit, kaddr);
+ kunmap_atomic(kaddr, KM_USER0);
+ PRINTK("set file bit %lu page %lu\n", bit, page->index);
/* record page number so it gets flushed to disk when unplug occurs */
set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
}
@@ -952,16 +936,6 @@ void bitmap_unplug(struct bitmap *bitmap)
if (!bitmap)
return;
- if (!bitmap->filemap) {
- /* Must be using a dirty_log */
- struct dm_dirty_log *log = bitmap->mddev->bitmap_info.log;
- dirty = test_and_clear_bit(BITMAP_PAGE_DIRTY, &bitmap->logattrs);
- need_write = test_and_clear_bit(BITMAP_PAGE_NEEDWRITE, &bitmap->logattrs);
- if (dirty || need_write)
- if (log->type->flush(log))
- bitmap->flags |= BITMAP_WRITE_ERROR;
- goto out;
- }
/* look at each page to see if there are any set bits that need to be
* flushed out to disk */
@@ -990,7 +964,6 @@ void bitmap_unplug(struct bitmap *bitmap)
else
md_super_wait(bitmap->mddev);
}
-out:
if (bitmap->flags & BITMAP_WRITE_ERROR)
bitmap_file_kick(bitmap);
}
@@ -1199,7 +1172,6 @@ void bitmap_daemon_work(mddev_t *mddev)
struct page *page = NULL, *lastpage = NULL;
sector_t blocks;
void *paddr;
- struct dm_dirty_log *log = mddev->bitmap_info.log;
/* Use a mutex to guard daemon_work against
* bitmap_destroy.
@@ -1224,12 +1196,11 @@ void bitmap_daemon_work(mddev_t *mddev)
spin_lock_irqsave(&bitmap->lock, flags);
for (j = 0; j < bitmap->chunks; j++) {
bitmap_counter_t *bmc;
- if (!bitmap->filemap) {
- if (!log)
- /* error or shutdown */
- break;
- } else
- page = filemap_get_page(bitmap, j);
+ if (!bitmap->filemap)
+ /* error or shutdown */
+ break;
+
+ page = filemap_get_page(bitmap, j);
if (page != lastpage) {
/* skip this page unless it's marked as needing cleaning */
@@ -1298,17 +1269,16 @@ void bitmap_daemon_work(mddev_t *mddev)
-1);
/* clear the bit */
- if (page) {
- paddr = kmap_atomic(page, KM_USER0);
- if (bitmap->flags & BITMAP_HOSTENDIAN)
- clear_bit(file_page_offset(bitmap, j),
- paddr);
- else
- __test_and_clear_bit_le(file_page_offset(bitmap, j),
- paddr);
- kunmap_atomic(paddr, KM_USER0);
- } else
- log->type->clear_region(log, j);
+ paddr = kmap_atomic(page, KM_USER0);
+ if (bitmap->flags & BITMAP_HOSTENDIAN)
+ clear_bit(file_page_offset(bitmap, j),
+ paddr);
+ else
+ __clear_bit_le(
+ file_page_offset(bitmap,
+ j),
+ paddr);
+ kunmap_atomic(paddr, KM_USER0);
}
} else
j |= PAGE_COUNTER_MASK;
@@ -1316,16 +1286,12 @@ void bitmap_daemon_work(mddev_t *mddev)
spin_unlock_irqrestore(&bitmap->lock, flags);
/* now sync the final page */
- if (lastpage != NULL || log != NULL) {
+ if (lastpage != NULL) {
spin_lock_irqsave(&bitmap->lock, flags);
if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
- if (lastpage)
- write_page(bitmap, lastpage, 0);
- else
- if (log->type->flush(log))
- bitmap->flags |= BITMAP_WRITE_ERROR;
+ write_page(bitmap, lastpage, 0);
} else {
set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
@@ -1767,12 +1733,10 @@ int bitmap_create(mddev_t *mddev)
BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
if (!file
- && !mddev->bitmap_info.offset
- && !mddev->bitmap_info.log) /* bitmap disabled, nothing to do */
+ && !mddev->bitmap_info.offset) /* bitmap disabled, nothing to do */
return 0;
BUG_ON(file && mddev->bitmap_info.offset);
- BUG_ON(mddev->bitmap_info.offset && mddev->bitmap_info.log);
bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
if (!bitmap)
@@ -1863,6 +1827,7 @@ int bitmap_create(mddev_t *mddev)
int bitmap_load(mddev_t *mddev)
{
int err = 0;
+ sector_t start = 0;
sector_t sector = 0;
struct bitmap *bitmap = mddev->bitmap;
@@ -1881,24 +1846,14 @@ int bitmap_load(mddev_t *mddev)
}
bitmap_close_sync(bitmap);
- if (mddev->bitmap_info.log) {
- unsigned long i;
- struct dm_dirty_log *log = mddev->bitmap_info.log;
- for (i = 0; i < bitmap->chunks; i++)
- if (!log->type->in_sync(log, i, 1))
- bitmap_set_memory_bits(bitmap,
- (sector_t)i << CHUNK_BLOCK_SHIFT(bitmap),
- 1);
- } else {
- sector_t start = 0;
- if (mddev->degraded == 0
- || bitmap->events_cleared == mddev->events)
- /* no need to keep dirty bits to optimise a
- * re-add of a missing device */
- start = mddev->recovery_cp;
-
- err = bitmap_init_from_disk(bitmap, start);
- }
+ if (mddev->degraded == 0
+ || bitmap->events_cleared == mddev->events)
+ /* no need to keep dirty bits to optimise a
+ * re-add of a missing device */
+ start = mddev->recovery_cp;
+
+ err = bitmap_init_from_disk(bitmap, start);
+
if (err)
goto out;
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index b2a127e..a28f2e5 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -212,10 +212,6 @@ struct bitmap {
unsigned long file_pages; /* number of pages in the file */
int last_page_size; /* bytes in the last page */
- unsigned long logattrs; /* used when filemap_attr doesn't exist
- * because we are working with a dirty_log
- */
-
unsigned long flags;
int allclean;
@@ -237,7 +233,6 @@ struct bitmap {
wait_queue_head_t behind_wait;
struct sysfs_dirent *sysfs_can_clear;
-
};
/* the bitmap API */
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index c8827ff..49da55c 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -19,7 +19,7 @@
#include <linux/workqueue.h>
#include <linux/backing-dev.h>
#include <linux/percpu.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/scatterlist.h>
#include <asm/page.h>
#include <asm/unaligned.h>
@@ -30,7 +30,6 @@
#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "crypt"
-#define MESG_STR(x) x, sizeof(x)
/*
* context holding the current state of a multi-part conversion
@@ -239,7 +238,7 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
memset(iv, 0, cc->iv_size);
- *(u32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
+ *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
return 0;
}
@@ -248,7 +247,7 @@ static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
memset(iv, 0, cc->iv_size);
- *(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
+ *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
return 0;
}
@@ -415,7 +414,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private;
memset(iv, 0, cc->iv_size);
- *(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
+ *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
return 0;
@@ -1575,11 +1574,17 @@ bad_mem:
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct crypt_config *cc;
- unsigned int key_size;
+ unsigned int key_size, opt_params;
unsigned long long tmpll;
int ret;
+ struct dm_arg_set as;
+ const char *opt_string;
+
+ static struct dm_arg _args[] = {
+ {0, 1, "Invalid number of feature args"},
+ };
- if (argc != 5) {
+ if (argc < 5) {
ti->error = "Not enough arguments";
return -EINVAL;
}
@@ -1648,6 +1653,30 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
cc->start = tmpll;
+ argv += 5;
+ argc -= 5;
+
+ /* Optional parameters */
+ if (argc) {
+ as.argc = argc;
+ as.argv = argv;
+
+ ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
+ if (ret)
+ goto bad;
+
+ opt_string = dm_shift_arg(&as);
+
+ if (opt_params == 1 && opt_string &&
+ !strcasecmp(opt_string, "allow_discards"))
+ ti->num_discard_requests = 1;
+ else if (opt_params) {
+ ret = -EINVAL;
+ ti->error = "Invalid feature arguments";
+ goto bad;
+ }
+ }
+
ret = -ENOMEM;
cc->io_queue = alloc_workqueue("kcryptd_io",
WQ_NON_REENTRANT|
@@ -1682,9 +1711,16 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
struct dm_crypt_io *io;
struct crypt_config *cc;
- if (bio->bi_rw & REQ_FLUSH) {
+ /*
+ * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
+ * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
+ * - for REQ_DISCARD caller must use flush if IO ordering matters
+ */
+ if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
cc = ti->private;
bio->bi_bdev = cc->dev->bdev;
+ if (bio_sectors(bio))
+ bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
return DM_MAPIO_REMAPPED;
}
@@ -1727,6 +1763,10 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
cc->dev->name, (unsigned long long)cc->start);
+
+ if (ti->num_discard_requests)
+ DMEMIT(" 1 allow_discards");
+
break;
}
return 0;
@@ -1770,12 +1810,12 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
if (argc < 2)
goto error;
- if (!strnicmp(argv[0], MESG_STR("key"))) {
+ if (!strcasecmp(argv[0], "key")) {
if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
DMWARN("not suspended during key manipulation.");
return -EINVAL;
}
- if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) {
+ if (argc == 3 && !strcasecmp(argv[1], "set")) {
ret = crypt_set_key(cc, argv[2]);
if (ret)
return ret;
@@ -1783,7 +1823,7 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
ret = cc->iv_gen_ops->init(cc);
return ret;
}
- if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) {
+ if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
ret = cc->iv_gen_ops->wipe(cc);
if (ret)
@@ -1823,7 +1863,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index ea79062..89f73ca 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2003 Sistina Software (UK) Limited.
- * Copyright (C) 2004, 2010 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
@@ -15,6 +15,9 @@
#define DM_MSG_PREFIX "flakey"
+#define all_corrupt_bio_flags_match(bio, fc) \
+ (((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
+
/*
* Flakey: Used for testing only, simulates intermittent,
* catastrophic device failure.
@@ -25,60 +28,189 @@ struct flakey_c {
sector_t start;
unsigned up_interval;
unsigned down_interval;
+ unsigned long flags;
+ unsigned corrupt_bio_byte;
+ unsigned corrupt_bio_rw;
+ unsigned corrupt_bio_value;
+ unsigned corrupt_bio_flags;
+};
+
+enum feature_flag_bits {
+ DROP_WRITES
};
+static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
+ struct dm_target *ti)
+{
+ int r;
+ unsigned argc;
+ const char *arg_name;
+
+ static struct dm_arg _args[] = {
+ {0, 6, "Invalid number of feature args"},
+ {1, UINT_MAX, "Invalid corrupt bio byte"},
+ {0, 255, "Invalid corrupt value to write into bio byte (0-255)"},
+ {0, UINT_MAX, "Invalid corrupt bio flags mask"},
+ };
+
+ /* No feature arguments supplied. */
+ if (!as->argc)
+ return 0;
+
+ r = dm_read_arg_group(_args, as, &argc, &ti->error);
+ if (r)
+ return r;
+
+ while (argc) {
+ arg_name = dm_shift_arg(as);
+ argc--;
+
+ /*
+ * drop_writes
+ */
+ if (!strcasecmp(arg_name, "drop_writes")) {
+ if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
+ ti->error = "Feature drop_writes duplicated";
+ return -EINVAL;
+ }
+
+ continue;
+ }
+
+ /*
+ * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
+ */
+ if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
+ if (!argc)
+ ti->error = "Feature corrupt_bio_byte requires parameters";
+
+ r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
+ if (r)
+ return r;
+ argc--;
+
+ /*
+ * Direction r or w?
+ */
+ arg_name = dm_shift_arg(as);
+ if (!strcasecmp(arg_name, "w"))
+ fc->corrupt_bio_rw = WRITE;
+ else if (!strcasecmp(arg_name, "r"))
+ fc->corrupt_bio_rw = READ;
+ else {
+ ti->error = "Invalid corrupt bio direction (r or w)";
+ return -EINVAL;
+ }
+ argc--;
+
+ /*
+ * Value of byte (0-255) to write in place of correct one.
+ */
+ r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error);
+ if (r)
+ return r;
+ argc--;
+
+ /*
+ * Only corrupt bios with these flags set.
+ */
+ r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error);
+ if (r)
+ return r;
+ argc--;
+
+ continue;
+ }
+
+ ti->error = "Unrecognised flakey feature requested";
+ return -EINVAL;
+ }
+
+ if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
+ ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*
- * Construct a flakey mapping: <dev_path> <offset> <up interval> <down interval>
+ * Construct a flakey mapping:
+ * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*]
+ *
+ * Feature args:
+ * [drop_writes]
+ * [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>]
+ *
+ * Nth_byte starts from 1 for the first byte.
+ * Direction is r for READ or w for WRITE.
+ * bio_flags is ignored if 0.
*/
static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
+ static struct dm_arg _args[] = {
+ {0, UINT_MAX, "Invalid up interval"},
+ {0, UINT_MAX, "Invalid down interval"},
+ };
+
+ int r;
struct flakey_c *fc;
- unsigned long long tmp;
+ unsigned long long tmpll;
+ struct dm_arg_set as;
+ const char *devname;
- if (argc != 4) {
- ti->error = "dm-flakey: Invalid argument count";
+ as.argc = argc;
+ as.argv = argv;
+
+ if (argc < 4) {
+ ti->error = "Invalid argument count";
return -EINVAL;
}
- fc = kmalloc(sizeof(*fc), GFP_KERNEL);
+ fc = kzalloc(sizeof(*fc), GFP_KERNEL);
if (!fc) {
- ti->error = "dm-flakey: Cannot allocate linear context";
+ ti->error = "Cannot allocate linear context";
return -ENOMEM;
}
fc->start_time = jiffies;
- if (sscanf(argv[1], "%llu", &tmp) != 1) {
- ti->error = "dm-flakey: Invalid device sector";
+ devname = dm_shift_arg(&as);
+
+ if (sscanf(dm_shift_arg(&as), "%llu", &tmpll) != 1) {
+ ti->error = "Invalid device sector";
goto bad;
}
- fc->start = tmp;
+ fc->start = tmpll;
- if (sscanf(argv[2], "%u", &fc->up_interval) != 1) {
- ti->error = "dm-flakey: Invalid up interval";
+ r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error);
+ if (r)
goto bad;
- }
- if (sscanf(argv[3], "%u", &fc->down_interval) != 1) {
- ti->error = "dm-flakey: Invalid down interval";
+ r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error);
+ if (r)
goto bad;
- }
if (!(fc->up_interval + fc->down_interval)) {
- ti->error = "dm-flakey: Total (up + down) interval is zero";
+ ti->error = "Total (up + down) interval is zero";
goto bad;
}
if (fc->up_interval + fc->down_interval < fc->up_interval) {
- ti->error = "dm-flakey: Interval overflow";
+ ti->error = "Interval overflow";
goto bad;
}
- if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &fc->dev)) {
- ti->error = "dm-flakey: Device lookup failed";
+ r = parse_features(&as, fc, ti);
+ if (r)
+ goto bad;
+
+ if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev)) {
+ ti->error = "Device lookup failed";
goto bad;
}
ti->num_flush_requests = 1;
+ ti->num_discard_requests = 1;
ti->private = fc;
return 0;
@@ -99,7 +231,7 @@ static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
{
struct flakey_c *fc = ti->private;
- return fc->start + (bi_sector - ti->begin);
+ return fc->start + dm_target_offset(ti, bi_sector);
}
static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
@@ -111,6 +243,25 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
bio->bi_sector = flakey_map_sector(ti, bio->bi_sector);
}
+static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
+{
+ unsigned bio_bytes = bio_cur_bytes(bio);
+ char *data = bio_data(bio);
+
+ /*
+ * Overwrite the Nth byte of the data returned.
+ */
+ if (data && bio_bytes >= fc->corrupt_bio_byte) {
+ data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
+
+ DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
+ "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
+ bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
+ (bio_data_dir(bio) == WRITE) ? 'w' : 'r',
+ bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes);
+ }
+}
+
static int flakey_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
@@ -119,18 +270,71 @@ static int flakey_map(struct dm_target *ti, struct bio *bio,
/* Are we alive ? */
elapsed = (jiffies - fc->start_time) / HZ;
- if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval)
+ if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
+ /*
+ * Flag this bio as submitted while down.
+ */
+ map_context->ll = 1;
+
+ /*
+ * Map reads as normal.
+ */
+ if (bio_data_dir(bio) == READ)
+ goto map_bio;
+
+ /*
+ * Drop writes?
+ */
+ if (test_bit(DROP_WRITES, &fc->flags)) {
+ bio_endio(bio, 0);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ /*
+ * Corrupt matching writes.
+ */
+ if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) {
+ if (all_corrupt_bio_flags_match(bio, fc))
+ corrupt_bio_data(bio, fc);
+ goto map_bio;
+ }
+
+ /*
+ * By default, error all I/O.
+ */
return -EIO;
+ }
+map_bio:
flakey_map_bio(ti, bio);
return DM_MAPIO_REMAPPED;
}
+static int flakey_end_io(struct dm_target *ti, struct bio *bio,
+ int error, union map_info *map_context)
+{
+ struct flakey_c *fc = ti->private;
+ unsigned bio_submitted_while_down = map_context->ll;
+
+ /*
+ * Corrupt successful READs while in down state.
+ * If flags were specified, only corrupt those that match.
+ */
+ if (!error && bio_submitted_while_down &&
+ (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
+ all_corrupt_bio_flags_match(bio, fc))
+ corrupt_bio_data(bio, fc);
+
+ return error;
+}
+
static int flakey_status(struct dm_target *ti, status_type_t type,
char *result, unsigned int maxlen)
{
+ unsigned sz = 0;
struct flakey_c *fc = ti->private;
+ unsigned drop_writes;
switch (type) {
case STATUSTYPE_INFO:
@@ -138,9 +342,22 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
break;
case STATUSTYPE_TABLE:
- snprintf(result, maxlen, "%s %llu %u %u", fc->dev->name,
- (unsigned long long)fc->start, fc->up_interval,
- fc->down_interval);
+ DMEMIT("%s %llu %u %u ", fc->dev->name,
+ (unsigned long long)fc->start, fc->up_interval,
+ fc->down_interval);
+
+ drop_writes = test_bit(DROP_WRITES, &fc->flags);
+ DMEMIT("%u ", drop_writes + (fc->corrupt_bio_byte > 0) * 5);
+
+ if (drop_writes)
+ DMEMIT("drop_writes ");
+
+ if (fc->corrupt_bio_byte)
+ DMEMIT("corrupt_bio_byte %u %c %u %u ",
+ fc->corrupt_bio_byte,
+ (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r',
+ fc->corrupt_bio_value, fc->corrupt_bio_flags);
+
break;
}
return 0;
@@ -177,11 +394,12 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
static struct target_type flakey_target = {
.name = "flakey",
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = flakey_ctr,
.dtr = flakey_dtr,
.map = flakey_map,
+ .end_io = flakey_end_io,
.status = flakey_status,
.ioctl = flakey_ioctl,
.merge = flakey_merge,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2067288..ad2eba4 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -38,6 +38,8 @@ struct io {
struct dm_io_client *client;
io_notify_fn callback;
void *context;
+ void *vma_invalidate_address;
+ unsigned long vma_invalidate_size;
} __attribute__((aligned(DM_IO_MAX_REGIONS)));
static struct kmem_cache *_dm_io_cache;
@@ -116,6 +118,10 @@ static void dec_count(struct io *io, unsigned int region, int error)
set_bit(region, &io->error_bits);
if (atomic_dec_and_test(&io->count)) {
+ if (io->vma_invalidate_size)
+ invalidate_kernel_vmap_range(io->vma_invalidate_address,
+ io->vma_invalidate_size);
+
if (io->sleeper)
wake_up_process(io->sleeper);
@@ -159,6 +165,9 @@ struct dpages {
unsigned context_u;
void *context_ptr;
+
+ void *vma_invalidate_address;
+ unsigned long vma_invalidate_size;
};
/*
@@ -377,6 +386,9 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
io->sleeper = current;
io->client = client;
+ io->vma_invalidate_address = dp->vma_invalidate_address;
+ io->vma_invalidate_size = dp->vma_invalidate_size;
+
dispatch_io(rw, num_regions, where, dp, io, 1);
while (1) {
@@ -415,13 +427,21 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
io->callback = fn;
io->context = context;
+ io->vma_invalidate_address = dp->vma_invalidate_address;
+ io->vma_invalidate_size = dp->vma_invalidate_size;
+
dispatch_io(rw, num_regions, where, dp, io, 0);
return 0;
}
-static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
+static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
+ unsigned long size)
{
/* Set up dpages based on memory type */
+
+ dp->vma_invalidate_address = NULL;
+ dp->vma_invalidate_size = 0;
+
switch (io_req->mem.type) {
case DM_IO_PAGE_LIST:
list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
@@ -432,6 +452,11 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
break;
case DM_IO_VMA:
+ flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
+ if ((io_req->bi_rw & RW_MASK) == READ) {
+ dp->vma_invalidate_address = io_req->mem.ptr.vma;
+ dp->vma_invalidate_size = size;
+ }
vm_dp_init(dp, io_req->mem.ptr.vma);
break;
@@ -460,7 +485,7 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
int r;
struct dpages dp;
- r = dp_init(io_req, &dp);
+ r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
if (r)
return r;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 4cacdad..2e9a3ca 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -128,6 +128,24 @@ static struct hash_cell *__get_uuid_cell(const char *str)
return NULL;
}
+static struct hash_cell *__get_dev_cell(uint64_t dev)
+{
+ struct mapped_device *md;
+ struct hash_cell *hc;
+
+ md = dm_get_md(huge_decode_dev(dev));
+ if (!md)
+ return NULL;
+
+ hc = dm_get_mdptr(md);
+ if (!hc) {
+ dm_put(md);
+ return NULL;
+ }
+
+ return hc;
+}
+
/*-----------------------------------------------------------------
* Inserting, removing and renaming a device.
*---------------------------------------------------------------*/
@@ -718,25 +736,45 @@ static int dev_create(struct dm_ioctl *param, size_t param_size)
*/
static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
{
- struct mapped_device *md;
- void *mdptr = NULL;
+ struct hash_cell *hc = NULL;
- if (*param->uuid)
- return __get_uuid_cell(param->uuid);
+ if (*param->uuid) {
+ if (*param->name || param->dev)
+ return NULL;
- if (*param->name)
- return __get_name_cell(param->name);
+ hc = __get_uuid_cell(param->uuid);
+ if (!hc)
+ return NULL;
+ } else if (*param->name) {
+ if (param->dev)
+ return NULL;
- md = dm_get_md(huge_decode_dev(param->dev));
- if (!md)
- goto out;
+ hc = __get_name_cell(param->name);
+ if (!hc)
+ return NULL;
+ } else if (param->dev) {
+ hc = __get_dev_cell(param->dev);
+ if (!hc)
+ return NULL;
+ } else
+ return NULL;
- mdptr = dm_get_mdptr(md);
- if (!mdptr)
- dm_put(md);
+ /*
+ * Sneakily write in both the name and the uuid
+ * while we have the cell.
+ */
+ strlcpy(param->name, hc->name, sizeof(param->name));
+ if (hc->uuid)
+ strlcpy(param->uuid, hc->uuid, sizeof(param->uuid));
+ else
+ param->uuid[0] = '\0';
-out:
- return mdptr;
+ if (hc->new_map)
+ param->flags |= DM_INACTIVE_PRESENT_FLAG;
+ else
+ param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
+
+ return hc;
}
static struct mapped_device *find_device(struct dm_ioctl *param)
@@ -746,24 +784,8 @@ static struct mapped_device *find_device(struct dm_ioctl *param)
down_read(&_hash_lock);
hc = __find_device_hash_cell(param);
- if (hc) {
+ if (hc)
md = hc->md;
-
- /*
- * Sneakily write in both the name and the uuid
- * while we have the cell.
- */
- strlcpy(param->name, hc->name, sizeof(param->name));
- if (hc->uuid)
- strlcpy(param->uuid, hc->uuid, sizeof(param->uuid));
- else
- param->uuid[0] = '\0';
-
- if (hc->new_map)
- param->flags |= DM_INACTIVE_PRESENT_FLAG;
- else
- param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
- }
up_read(&_hash_lock);
return md;
@@ -1402,6 +1424,11 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
goto out;
}
+ if (!argc) {
+ DMWARN("Empty message received.");
+ goto out;
+ }
+
table = dm_get_live_table(md);
if (!table)
goto out_argv;
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 819e37e..f821470 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -10,7 +10,7 @@
*/
#include <linux/types.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/blkdev.h>
#include <linux/fs.h>
#include <linux/init.h>
@@ -224,8 +224,6 @@ struct kcopyd_job {
unsigned int num_dests;
struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS];
- sector_t offset;
- unsigned int nr_pages;
struct page_list *pages;
/*
@@ -380,7 +378,7 @@ static int run_io_job(struct kcopyd_job *job)
.bi_rw = job->rw,
.mem.type = DM_IO_PAGE_LIST,
.mem.ptr.pl = job->pages,
- .mem.offset = job->offset,
+ .mem.offset = 0,
.notify.fn = complete_io,
.notify.context = job,
.client = job->kc->io_client,
@@ -397,10 +395,9 @@ static int run_io_job(struct kcopyd_job *job)
static int run_pages_job(struct kcopyd_job *job)
{
int r;
+ unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
- job->nr_pages = dm_div_up(job->dests[0].count + job->offset,
- PAGE_SIZE >> 9);
- r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages);
+ r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
if (!r) {
/* this job is ready for io */
push(&job->kc->io_jobs, job);
@@ -602,8 +599,6 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
job->num_dests = num_dests;
memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
- job->offset = 0;
- job->nr_pages = 0;
job->pages = NULL;
job->fn = fn;
@@ -622,6 +617,37 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
}
EXPORT_SYMBOL(dm_kcopyd_copy);
+void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
+ dm_kcopyd_notify_fn fn, void *context)
+{
+ struct kcopyd_job *job;
+
+ job = mempool_alloc(kc->job_pool, GFP_NOIO);
+
+ memset(job, 0, sizeof(struct kcopyd_job));
+ job->kc = kc;
+ job->fn = fn;
+ job->context = context;
+
+ atomic_inc(&kc->nr_jobs);
+
+ return job;
+}
+EXPORT_SYMBOL(dm_kcopyd_prepare_callback);
+
+void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err)
+{
+ struct kcopyd_job *job = j;
+ struct dm_kcopyd_client *kc = job->kc;
+
+ job->read_err = read_err;
+ job->write_err = write_err;
+
+ push(&kc->complete_jobs, job);
+ wake(kc);
+}
+EXPORT_SYMBOL(dm_kcopyd_do_callback);
+
/*
* Cancels a kcopyd job, eg. someone might be deactivating a
* mirror.
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index aa2e0c3..1021c89 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -394,8 +394,7 @@ static int flush_by_group(struct log_c *lc, struct list_head *flush_list)
group[count] = fe->region;
count++;
- list_del(&fe->list);
- list_add(&fe->list, &tmp_list);
+ list_move(&fe->list, &tmp_list);
type = fe->type;
if (count >= MAX_FLUSH_GROUP_COUNT)
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 948e3f4..3b52bb7 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -197,15 +197,21 @@ EXPORT_SYMBOL(dm_dirty_log_destroy);
#define MIRROR_DISK_VERSION 2
#define LOG_OFFSET 2
-struct log_header {
- uint32_t magic;
+struct log_header_disk {
+ __le32 magic;
/*
* Simple, incrementing version. no backward
* compatibility.
*/
+ __le32 version;
+ __le64 nr_regions;
+} __packed;
+
+struct log_header_core {
+ uint32_t magic;
uint32_t version;
- sector_t nr_regions;
+ uint64_t nr_regions;
};
struct log_c {
@@ -239,10 +245,10 @@ struct log_c {
int log_dev_failed;
int log_dev_flush_failed;
struct dm_dev *log_dev;
- struct log_header header;
+ struct log_header_core header;
struct dm_io_region header_location;
- struct log_header *disk_header;
+ struct log_header_disk *disk_header;
};
/*
@@ -251,34 +257,34 @@ struct log_c {
*/
static inline int log_test_bit(uint32_t *bs, unsigned bit)
{
- return test_bit_le(bit, (unsigned long *) bs) ? 1 : 0;
+ return test_bit_le(bit, bs) ? 1 : 0;
}
static inline void log_set_bit(struct log_c *l,
uint32_t *bs, unsigned bit)
{
- __test_and_set_bit_le(bit, (unsigned long *) bs);
+ __set_bit_le(bit, bs);
l->touched_cleaned = 1;
}
static inline void log_clear_bit(struct log_c *l,
uint32_t *bs, unsigned bit)
{
- __test_and_clear_bit_le(bit, (unsigned long *) bs);
+ __clear_bit_le(bit, bs);
l->touched_dirtied = 1;
}
/*----------------------------------------------------------------
* Header IO
*--------------------------------------------------------------*/
-static void header_to_disk(struct log_header *core, struct log_header *disk)
+static void header_to_disk(struct log_header_core *core, struct log_header_disk *disk)
{
disk->magic = cpu_to_le32(core->magic);
disk->version = cpu_to_le32(core->version);
disk->nr_regions = cpu_to_le64(core->nr_regions);
}
-static void header_from_disk(struct log_header *core, struct log_header *disk)
+static void header_from_disk(struct log_header_core *core, struct log_header_disk *disk)
{
core->magic = le32_to_cpu(disk->magic);
core->version = le32_to_cpu(disk->version);
@@ -486,7 +492,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size);
lc->sync_count = (sync == NOSYNC) ? region_count : 0;
- lc->recovering_bits = vmalloc(bitset_size);
+ lc->recovering_bits = vzalloc(bitset_size);
if (!lc->recovering_bits) {
DMWARN("couldn't allocate sync bitset");
vfree(lc->sync_bits);
@@ -498,7 +504,6 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
kfree(lc);
return -ENOMEM;
}
- memset(lc->recovering_bits, 0, bitset_size);
lc->sync_search = 0;
log->context = lc;
@@ -739,8 +744,7 @@ static int core_get_resync_work(struct dm_dirty_log *log, region_t *region)
return 0;
do {
- *region = find_next_zero_bit_le(
- (unsigned long *) lc->sync_bits,
+ *region = find_next_zero_bit_le(lc->sync_bits,
lc->region_count,
lc->sync_search);
lc->sync_search = *region + 1;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index aa4e570..5e0090e 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -19,10 +19,9 @@
#include <linux/time.h>
#include <linux/workqueue.h>
#include <scsi/scsi_dh.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define DM_MSG_PREFIX "multipath"
-#define MESG_STR(x) x, sizeof(x)
#define DM_PG_INIT_DELAY_MSECS 2000
#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
@@ -505,80 +504,29 @@ static void trigger_event(struct work_struct *work)
* <#paths> <#per-path selector args>
* [<path> [<arg>]* ]+ ]+
*---------------------------------------------------------------*/
-struct param {
- unsigned min;
- unsigned max;
- char *error;
-};
-
-static int read_param(struct param *param, char *str, unsigned *v, char **error)
-{
- if (!str ||
- (sscanf(str, "%u", v) != 1) ||
- (*v < param->min) ||
- (*v > param->max)) {
- *error = param->error;
- return -EINVAL;
- }
-
- return 0;
-}
-
-struct arg_set {
- unsigned argc;
- char **argv;
-};
-
-static char *shift(struct arg_set *as)
-{
- char *r;
-
- if (as->argc) {
- as->argc--;
- r = *as->argv;
- as->argv++;
- return r;
- }
-
- return NULL;
-}
-
-static void consume(struct arg_set *as, unsigned n)
-{
- BUG_ON (as->argc < n);
- as->argc -= n;
- as->argv += n;
-}
-
-static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
+static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
struct dm_target *ti)
{
int r;
struct path_selector_type *pst;
unsigned ps_argc;
- static struct param _params[] = {
+ static struct dm_arg _args[] = {
{0, 1024, "invalid number of path selector args"},
};
- pst = dm_get_path_selector(shift(as));
+ pst = dm_get_path_selector(dm_shift_arg(as));
if (!pst) {
ti->error = "unknown path selector type";
return -EINVAL;
}
- r = read_param(_params, shift(as), &ps_argc, &ti->error);
+ r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
if (r) {
dm_put_path_selector(pst);
return -EINVAL;
}
- if (ps_argc > as->argc) {
- dm_put_path_selector(pst);
- ti->error = "not enough arguments for path selector";
- return -EINVAL;
- }
-
r = pst->create(&pg->ps, ps_argc, as->argv);
if (r) {
dm_put_path_selector(pst);
@@ -587,12 +535,12 @@ static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
}
pg->ps.type = pst;
- consume(as, ps_argc);
+ dm_consume_args(as, ps_argc);
return 0;
}
-static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
+static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
struct dm_target *ti)
{
int r;
@@ -609,7 +557,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
if (!p)
return ERR_PTR(-ENOMEM);
- r = dm_get_device(ti, shift(as), dm_table_get_mode(ti->table),
+ r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
&p->path.dev);
if (r) {
ti->error = "error getting device";
@@ -660,16 +608,16 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
return ERR_PTR(r);
}
-static struct priority_group *parse_priority_group(struct arg_set *as,
+static struct priority_group *parse_priority_group(struct dm_arg_set *as,
struct multipath *m)
{
- static struct param _params[] = {
+ static struct dm_arg _args[] = {
{1, 1024, "invalid number of paths"},
{0, 1024, "invalid number of selector args"}
};
int r;
- unsigned i, nr_selector_args, nr_params;
+ unsigned i, nr_selector_args, nr_args;
struct priority_group *pg;
struct dm_target *ti = m->ti;
@@ -693,26 +641,26 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
/*
* read the paths
*/
- r = read_param(_params, shift(as), &pg->nr_pgpaths, &ti->error);
+ r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
if (r)
goto bad;
- r = read_param(_params + 1, shift(as), &nr_selector_args, &ti->error);
+ r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
if (r)
goto bad;
- nr_params = 1 + nr_selector_args;
+ nr_args = 1 + nr_selector_args;
for (i = 0; i < pg->nr_pgpaths; i++) {
struct pgpath *pgpath;
- struct arg_set path_args;
+ struct dm_arg_set path_args;
- if (as->argc < nr_params) {
+ if (as->argc < nr_args) {
ti->error = "not enough path parameters";
r = -EINVAL;
goto bad;
}
- path_args.argc = nr_params;
+ path_args.argc = nr_args;
path_args.argv = as->argv;
pgpath = parse_path(&path_args, &pg->ps, ti);
@@ -723,7 +671,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
pgpath->pg = pg;
list_add_tail(&pgpath->list, &pg->pgpaths);
- consume(as, nr_params);
+ dm_consume_args(as, nr_args);
}
return pg;
@@ -733,28 +681,23 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
return ERR_PTR(r);
}
-static int parse_hw_handler(struct arg_set *as, struct multipath *m)
+static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
{
unsigned hw_argc;
int ret;
struct dm_target *ti = m->ti;
- static struct param _params[] = {
+ static struct dm_arg _args[] = {
{0, 1024, "invalid number of hardware handler args"},
};
- if (read_param(_params, shift(as), &hw_argc, &ti->error))
+ if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
return -EINVAL;
if (!hw_argc)
return 0;
- if (hw_argc > as->argc) {
- ti->error = "not enough arguments for hardware handler";
- return -EINVAL;
- }
-
- m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL);
+ m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
request_module("scsi_dh_%s", m->hw_handler_name);
if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
ti->error = "unknown hardware handler type";
@@ -778,7 +721,7 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
j = sprintf(p, "%s", as->argv[i]);
}
- consume(as, hw_argc - 1);
+ dm_consume_args(as, hw_argc - 1);
return 0;
fail:
@@ -787,20 +730,20 @@ fail:
return ret;
}
-static int parse_features(struct arg_set *as, struct multipath *m)
+static int parse_features(struct dm_arg_set *as, struct multipath *m)
{
int r;
unsigned argc;
struct dm_target *ti = m->ti;
- const char *param_name;
+ const char *arg_name;
- static struct param _params[] = {
+ static struct dm_arg _args[] = {
{0, 5, "invalid number of feature args"},
{1, 50, "pg_init_retries must be between 1 and 50"},
{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
};
- r = read_param(_params, shift(as), &argc, &ti->error);
+ r = dm_read_arg_group(_args, as, &argc, &ti->error);
if (r)
return -EINVAL;
@@ -808,26 +751,24 @@ static int parse_features(struct arg_set *as, struct multipath *m)
return 0;
do {
- param_name = shift(as);
+ arg_name = dm_shift_arg(as);
argc--;
- if (!strnicmp(param_name, MESG_STR("queue_if_no_path"))) {
+ if (!strcasecmp(arg_name, "queue_if_no_path")) {
r = queue_if_no_path(m, 1, 0);
continue;
}
- if (!strnicmp(param_name, MESG_STR("pg_init_retries")) &&
+ if (!strcasecmp(arg_name, "pg_init_retries") &&
(argc >= 1)) {
- r = read_param(_params + 1, shift(as),
- &m->pg_init_retries, &ti->error);
+ r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
argc--;
continue;
}
- if (!strnicmp(param_name, MESG_STR("pg_init_delay_msecs")) &&
+ if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
(argc >= 1)) {
- r = read_param(_params + 2, shift(as),
- &m->pg_init_delay_msecs, &ti->error);
+ r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
argc--;
continue;
}
@@ -842,15 +783,15 @@ static int parse_features(struct arg_set *as, struct multipath *m)
static int multipath_ctr(struct dm_target *ti, unsigned int argc,
char **argv)
{
- /* target parameters */
- static struct param _params[] = {
+ /* target arguments */
+ static struct dm_arg _args[] = {
{0, 1024, "invalid number of priority groups"},
{0, 1024, "invalid initial priority group number"},
};
int r;
struct multipath *m;
- struct arg_set as;
+ struct dm_arg_set as;
unsigned pg_count = 0;
unsigned next_pg_num;
@@ -871,11 +812,11 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
if (r)
goto bad;
- r = read_param(_params, shift(&as), &m->nr_priority_groups, &ti->error);
+ r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
if (r)
goto bad;
- r = read_param(_params + 1, shift(&as), &next_pg_num, &ti->error);
+ r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
if (r)
goto bad;
@@ -1505,10 +1446,10 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
}
if (argc == 1) {
- if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) {
+ if (!strcasecmp(argv[0], "queue_if_no_path")) {
r = queue_if_no_path(m, 1, 0);
goto out;
- } else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) {
+ } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
r = queue_if_no_path(m, 0, 0);
goto out;
}
@@ -1519,18 +1460,18 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
goto out;
}
- if (!strnicmp(argv[0], MESG_STR("disable_group"))) {
+ if (!strcasecmp(argv[0], "disable_group")) {
r = bypass_pg_num(m, argv[1], 1);
goto out;
- } else if (!strnicmp(argv[0], MESG_STR("enable_group"))) {
+ } else if (!strcasecmp(argv[0], "enable_group")) {
r = bypass_pg_num(m, argv[1], 0);
goto out;
- } else if (!strnicmp(argv[0], MESG_STR("switch_group"))) {
+ } else if (!strcasecmp(argv[0], "switch_group")) {
r = switch_pg_num(m, argv[1]);
goto out;
- } else if (!strnicmp(argv[0], MESG_STR("reinstate_path")))
+ } else if (!strcasecmp(argv[0], "reinstate_path"))
action = reinstate_path;
- else if (!strnicmp(argv[0], MESG_STR("fail_path")))
+ else if (!strcasecmp(argv[0], "fail_path"))
action = fail_path;
else {
DMWARN("Unrecognised multipath message received.");
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-queue-length.c
index f92b6ce..03a837a 100644
--- a/drivers/md/dm-queue-length.c
+++ b/drivers/md/dm-queue-length.c
@@ -20,7 +20,7 @@
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/module.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define DM_MSG_PREFIX "multipath queue-length"
#define QL_MIN_IO 128
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index e5d8904..a002dd8 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -8,19 +8,19 @@
#include <linux/slab.h>
#include "md.h"
+#include "raid1.h"
#include "raid5.h"
-#include "dm.h"
#include "bitmap.h"
+#include <linux/device-mapper.h>
+
#define DM_MSG_PREFIX "raid"
/*
- * If the MD doesn't support MD_SYNC_STATE_FORCED yet, then
- * make it so the flag doesn't set anything.
+ * The following flags are used by dm-raid.c to set up the array state.
+ * They must be cleared before md_run is called.
*/
-#ifndef MD_SYNC_STATE_FORCED
-#define MD_SYNC_STATE_FORCED 0
-#endif
+#define FirstUse 10 /* rdev flag */
struct raid_dev {
/*
@@ -43,14 +43,15 @@ struct raid_dev {
/*
* Flags for rs->print_flags field.
*/
-#define DMPF_DAEMON_SLEEP 0x1
-#define DMPF_MAX_WRITE_BEHIND 0x2
-#define DMPF_SYNC 0x4
-#define DMPF_NOSYNC 0x8
-#define DMPF_STRIPE_CACHE 0x10
-#define DMPF_MIN_RECOVERY_RATE 0x20
-#define DMPF_MAX_RECOVERY_RATE 0x40
-
+#define DMPF_SYNC 0x1
+#define DMPF_NOSYNC 0x2
+#define DMPF_REBUILD 0x4
+#define DMPF_DAEMON_SLEEP 0x8
+#define DMPF_MIN_RECOVERY_RATE 0x10
+#define DMPF_MAX_RECOVERY_RATE 0x20
+#define DMPF_MAX_WRITE_BEHIND 0x40
+#define DMPF_STRIPE_CACHE 0x80
+#define DMPF_REGION_SIZE 0X100
struct raid_set {
struct dm_target *ti;
@@ -72,6 +73,7 @@ static struct raid_type {
const unsigned level; /* RAID level. */
const unsigned algorithm; /* RAID algorithm. */
} raid_types[] = {
+ {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */},
{"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
{"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
{"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
@@ -105,7 +107,8 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra
}
sectors_per_dev = ti->len;
- if (sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) {
+ if ((raid_type->level > 1) &&
+ sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) {
ti->error = "Target length not divisible by number of data devices";
return ERR_PTR(-EINVAL);
}
@@ -147,9 +150,16 @@ static void context_free(struct raid_set *rs)
{
int i;
- for (i = 0; i < rs->md.raid_disks; i++)
+ for (i = 0; i < rs->md.raid_disks; i++) {
+ if (rs->dev[i].meta_dev)
+ dm_put_device(rs->ti, rs->dev[i].meta_dev);
+ if (rs->dev[i].rdev.sb_page)
+ put_page(rs->dev[i].rdev.sb_page);
+ rs->dev[i].rdev.sb_page = NULL;
+ rs->dev[i].rdev.sb_loaded = 0;
if (rs->dev[i].data_dev)
dm_put_device(rs->ti, rs->dev[i].data_dev);
+ }
kfree(rs);
}
@@ -159,7 +169,16 @@ static void context_free(struct raid_set *rs)
* <meta_dev>: meta device name or '-' if missing
* <data_dev>: data device name or '-' if missing
*
- * This code parses those words.
+ * The following are permitted:
+ * - -
+ * - <data_dev>
+ * <meta_dev> <data_dev>
+ *
+ * The following is not allowed:
+ * <meta_dev> -
+ *
+ * This code parses those words. If there is a failure,
+ * the caller must use context_free to unwind the operations.
*/
static int dev_parms(struct raid_set *rs, char **argv)
{
@@ -182,8 +201,16 @@ static int dev_parms(struct raid_set *rs, char **argv)
rs->dev[i].rdev.mddev = &rs->md;
if (strcmp(argv[0], "-")) {
- rs->ti->error = "Metadata devices not supported";
- return -EINVAL;
+ ret = dm_get_device(rs->ti, argv[0],
+ dm_table_get_mode(rs->ti->table),
+ &rs->dev[i].meta_dev);
+ rs->ti->error = "RAID metadata device lookup failure";
+ if (ret)
+ return ret;
+
+ rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
+ if (!rs->dev[i].rdev.sb_page)
+ return -ENOMEM;
}
if (!strcmp(argv[1], "-")) {
@@ -193,6 +220,10 @@ static int dev_parms(struct raid_set *rs, char **argv)
return -EINVAL;
}
+ rs->ti->error = "No data device supplied with metadata device";
+ if (rs->dev[i].meta_dev)
+ return -EINVAL;
+
continue;
}
@@ -204,6 +235,10 @@ static int dev_parms(struct raid_set *rs, char **argv)
return ret;
}
+ if (rs->dev[i].meta_dev) {
+ metadata_available = 1;
+ rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
+ }
rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
@@ -235,33 +270,109 @@ static int dev_parms(struct raid_set *rs, char **argv)
}
/*
+ * validate_region_size
+ * @rs
+ * @region_size: region size in sectors. If 0, pick a size (4MiB default).
+ *
+ * Set rs->md.bitmap_info.chunksize (which really refers to 'region size').
+ * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap.
+ *
+ * Returns: 0 on success, -EINVAL on failure.
+ */
+static int validate_region_size(struct raid_set *rs, unsigned long region_size)
+{
+ unsigned long min_region_size = rs->ti->len / (1 << 21);
+
+ if (!region_size) {
+ /*
+ * Choose a reasonable default. All figures in sectors.
+ */
+ if (min_region_size > (1 << 13)) {
+ DMINFO("Choosing default region size of %lu sectors",
+ region_size);
+ region_size = min_region_size;
+ } else {
+ DMINFO("Choosing default region size of 4MiB");
+ region_size = 1 << 13; /* sectors */
+ }
+ } else {
+ /*
+ * Validate user-supplied value.
+ */
+ if (region_size > rs->ti->len) {
+ rs->ti->error = "Supplied region size is too large";
+ return -EINVAL;
+ }
+
+ if (region_size < min_region_size) {
+ DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
+ region_size, min_region_size);
+ rs->ti->error = "Supplied region size is too small";
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(region_size)) {
+ rs->ti->error = "Region size is not a power of 2";
+ return -EINVAL;
+ }
+
+ if (region_size < rs->md.chunk_sectors) {
+ rs->ti->error = "Region size is smaller than the chunk size";
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Convert sectors to bytes.
+ */
+ rs->md.bitmap_info.chunksize = (region_size << 9);
+
+ return 0;
+}
+
+/*
* Possible arguments are...
- * RAID456:
* <chunk_size> [optional_args]
*
- * Optional args:
- * [[no]sync] Force or prevent recovery of the entire array
+ * Argument definitions
+ * <chunk_size> The number of sectors per disk that
+ * will form the "stripe"
+ * [[no]sync] Force or prevent recovery of the
+ * entire array
* [rebuild <idx>] Rebuild the drive indicated by the index
- * [daemon_sleep <ms>] Time between bitmap daemon work to clear bits
+ * [daemon_sleep <ms>] Time between bitmap daemon work to
+ * clear bits
* [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
* [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
+ * [write_mostly <idx>] Indicate a write mostly drive via index
* [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
* [stripe_cache <sectors>] Stripe cache size for higher RAIDs
+ * [region_size <sectors>] Defines granularity of bitmap
*/
static int parse_raid_params(struct raid_set *rs, char **argv,
unsigned num_raid_params)
{
unsigned i, rebuild_cnt = 0;
- unsigned long value;
+ unsigned long value, region_size = 0;
char *key;
/*
* First, parse the in-order required arguments
+ * "chunk_size" is the only argument of this type.
*/
- if ((strict_strtoul(argv[0], 10, &value) < 0) ||
- !is_power_of_2(value) || (value < 8)) {
+ if ((strict_strtoul(argv[0], 10, &value) < 0)) {
rs->ti->error = "Bad chunk size";
return -EINVAL;
+ } else if (rs->raid_type->level == 1) {
+ if (value)
+ DMERR("Ignoring chunk size parameter for RAID 1");
+ value = 0;
+ } else if (!is_power_of_2(value)) {
+ rs->ti->error = "Chunk size must be a power of 2";
+ return -EINVAL;
+ } else if (value < 8) {
+ rs->ti->error = "Chunk size value is too small";
+ return -EINVAL;
}
rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
@@ -269,22 +380,39 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
num_raid_params--;
/*
- * Second, parse the unordered optional arguments
+ * We set each individual device as In_sync with a completed
+ * 'recovery_offset'. If there has been a device failure or
+ * replacement then one of the following cases applies:
+ *
+ * 1) User specifies 'rebuild'.
+ * - Device is reset when param is read.
+ * 2) A new device is supplied.
+ * - No matching superblock found, resets device.
+ * 3) Device failure was transient and returns on reload.
+ * - Failure noticed, resets device for bitmap replay.
+ * 4) Device hadn't completed recovery after previous failure.
+ * - Superblock is read and overrides recovery_offset.
+ *
+ * What is found in the superblocks of the devices is always
+ * authoritative, unless 'rebuild' or '[no]sync' was specified.
*/
- for (i = 0; i < rs->md.raid_disks; i++)
+ for (i = 0; i < rs->md.raid_disks; i++) {
set_bit(In_sync, &rs->dev[i].rdev.flags);
+ rs->dev[i].rdev.recovery_offset = MaxSector;
+ }
+ /*
+ * Second, parse the unordered optional arguments
+ */
for (i = 0; i < num_raid_params; i++) {
- if (!strcmp(argv[i], "nosync")) {
+ if (!strcasecmp(argv[i], "nosync")) {
rs->md.recovery_cp = MaxSector;
rs->print_flags |= DMPF_NOSYNC;
- rs->md.flags |= MD_SYNC_STATE_FORCED;
continue;
}
- if (!strcmp(argv[i], "sync")) {
+ if (!strcasecmp(argv[i], "sync")) {
rs->md.recovery_cp = 0;
rs->print_flags |= DMPF_SYNC;
- rs->md.flags |= MD_SYNC_STATE_FORCED;
continue;
}
@@ -300,9 +428,13 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
return -EINVAL;
}
- if (!strcmp(key, "rebuild")) {
- if (++rebuild_cnt > rs->raid_type->parity_devs) {
- rs->ti->error = "Too many rebuild drives given";
+ if (!strcasecmp(key, "rebuild")) {
+ rebuild_cnt++;
+ if (((rs->raid_type->level != 1) &&
+ (rebuild_cnt > rs->raid_type->parity_devs)) ||
+ ((rs->raid_type->level == 1) &&
+ (rebuild_cnt > (rs->md.raid_disks - 1)))) {
+ rs->ti->error = "Too many rebuild devices specified for given RAID type";
return -EINVAL;
}
if (value > rs->md.raid_disks) {
@@ -311,7 +443,22 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
}
clear_bit(In_sync, &rs->dev[value].rdev.flags);
rs->dev[value].rdev.recovery_offset = 0;
- } else if (!strcmp(key, "max_write_behind")) {
+ rs->print_flags |= DMPF_REBUILD;
+ } else if (!strcasecmp(key, "write_mostly")) {
+ if (rs->raid_type->level != 1) {
+ rs->ti->error = "write_mostly option is only valid for RAID1";
+ return -EINVAL;
+ }
+ if (value > rs->md.raid_disks) {
+ rs->ti->error = "Invalid write_mostly drive index given";
+ return -EINVAL;
+ }
+ set_bit(WriteMostly, &rs->dev[value].rdev.flags);
+ } else if (!strcasecmp(key, "max_write_behind")) {
+ if (rs->raid_type->level != 1) {
+ rs->ti->error = "max_write_behind option is only valid for RAID1";
+ return -EINVAL;
+ }
rs->print_flags |= DMPF_MAX_WRITE_BEHIND;
/*
@@ -324,14 +471,14 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
return -EINVAL;
}
rs->md.bitmap_info.max_write_behind = value;
- } else if (!strcmp(key, "daemon_sleep")) {
+ } else if (!strcasecmp(key, "daemon_sleep")) {
rs->print_flags |= DMPF_DAEMON_SLEEP;
if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
rs->ti->error = "daemon sleep period out of range";
return -EINVAL;
}
rs->md.bitmap_info.daemon_sleep = value;
- } else if (!strcmp(key, "stripe_cache")) {
+ } else if (!strcasecmp(key, "stripe_cache")) {
rs->print_flags |= DMPF_STRIPE_CACHE;
/*
@@ -348,20 +495,23 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
rs->ti->error = "Bad stripe_cache size";
return -EINVAL;
}
- } else if (!strcmp(key, "min_recovery_rate")) {
+ } else if (!strcasecmp(key, "min_recovery_rate")) {
rs->print_flags |= DMPF_MIN_RECOVERY_RATE;
if (value > INT_MAX) {
rs->ti->error = "min_recovery_rate out of range";
return -EINVAL;
}
rs->md.sync_speed_min = (int)value;
- } else if (!strcmp(key, "max_recovery_rate")) {
+ } else if (!strcasecmp(key, "max_recovery_rate")) {
rs->print_flags |= DMPF_MAX_RECOVERY_RATE;
if (value > INT_MAX) {
rs->ti->error = "max_recovery_rate out of range";
return -EINVAL;
}
rs->md.sync_speed_max = (int)value;
+ } else if (!strcasecmp(key, "region_size")) {
+ rs->print_flags |= DMPF_REGION_SIZE;
+ region_size = value;
} else {
DMERR("Unable to parse RAID parameter: %s", key);
rs->ti->error = "Unable to parse RAID parameters";
@@ -369,6 +519,19 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
}
}
+ if (validate_region_size(rs, region_size))
+ return -EINVAL;
+
+ if (rs->md.chunk_sectors)
+ rs->ti->split_io = rs->md.chunk_sectors;
+ else
+ rs->ti->split_io = region_size;
+
+ if (rs->md.chunk_sectors)
+ rs->ti->split_io = rs->md.chunk_sectors;
+ else
+ rs->ti->split_io = region_size;
+
/* Assume there are no metadata devices until the drives are parsed */
rs->md.persistent = 0;
rs->md.external = 1;
@@ -387,17 +550,351 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
{
struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
+ if (rs->raid_type->level == 1)
+ return md_raid1_congested(&rs->md, bits);
+
return md_raid5_congested(&rs->md, bits);
}
/*
+ * This structure is never routinely used by userspace, unlike md superblocks.
+ * Devices with this superblock should only ever be accessed via device-mapper.
+ */
+#define DM_RAID_MAGIC 0x64526D44
+struct dm_raid_superblock {
+ __le32 magic; /* "DmRd" */
+ __le32 features; /* Used to indicate possible future changes */
+
+ __le32 num_devices; /* Number of devices in this array. (Max 64) */
+ __le32 array_position; /* The position of this drive in the array */
+
+ __le64 events; /* Incremented by md when superblock updated */
+ __le64 failed_devices; /* Bit field of devices to indicate failures */
+
+ /*
+ * This offset tracks the progress of the repair or replacement of
+ * an individual drive.
+ */
+ __le64 disk_recovery_offset;
+
+ /*
+ * This offset tracks the progress of the initial array
+ * synchronisation/parity calculation.
+ */
+ __le64 array_resync_offset;
+
+ /*
+ * RAID characteristics
+ */
+ __le32 level;
+ __le32 layout;
+ __le32 stripe_sectors;
+
+ __u8 pad[452]; /* Round struct to 512 bytes. */
+ /* Always set to 0 when writing. */
+} __packed;
+
+static int read_disk_sb(mdk_rdev_t *rdev, int size)
+{
+ BUG_ON(!rdev->sb_page);
+
+ if (rdev->sb_loaded)
+ return 0;
+
+ if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) {
+ DMERR("Failed to read device superblock");
+ return -EINVAL;
+ }
+
+ rdev->sb_loaded = 1;
+
+ return 0;
+}
+
+static void super_sync(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+ mdk_rdev_t *r, *t;
+ uint64_t failed_devices;
+ struct dm_raid_superblock *sb;
+
+ sb = page_address(rdev->sb_page);
+ failed_devices = le64_to_cpu(sb->failed_devices);
+
+ rdev_for_each(r, t, mddev)
+ if ((r->raid_disk >= 0) && test_bit(Faulty, &r->flags))
+ failed_devices |= (1ULL << r->raid_disk);
+
+ memset(sb, 0, sizeof(*sb));
+
+ sb->magic = cpu_to_le32(DM_RAID_MAGIC);
+ sb->features = cpu_to_le32(0); /* No features yet */
+
+ sb->num_devices = cpu_to_le32(mddev->raid_disks);
+ sb->array_position = cpu_to_le32(rdev->raid_disk);
+
+ sb->events = cpu_to_le64(mddev->events);
+ sb->failed_devices = cpu_to_le64(failed_devices);
+
+ sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
+ sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
+
+ sb->level = cpu_to_le32(mddev->level);
+ sb->layout = cpu_to_le32(mddev->layout);
+ sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
+}
+
+/*
+ * super_load
+ *
+ * This function creates a superblock if one is not found on the device
+ * and will decide which superblock to use if there's a choice.
+ *
+ * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
+ */
+static int super_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev)
+{
+ int ret;
+ struct dm_raid_superblock *sb;
+ struct dm_raid_superblock *refsb;
+ uint64_t events_sb, events_refsb;
+
+ rdev->sb_start = 0;
+ rdev->sb_size = sizeof(*sb);
+
+ ret = read_disk_sb(rdev, rdev->sb_size);
+ if (ret)
+ return ret;
+
+ sb = page_address(rdev->sb_page);
+ if (sb->magic != cpu_to_le32(DM_RAID_MAGIC)) {
+ super_sync(rdev->mddev, rdev);
+
+ set_bit(FirstUse, &rdev->flags);
+
+ /* Force writing of superblocks to disk */
+ set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
+
+ /* Any superblock is better than none, choose that if given */
+ return refdev ? 0 : 1;
+ }
+
+ if (!refdev)
+ return 1;
+
+ events_sb = le64_to_cpu(sb->events);
+
+ refsb = page_address(refdev->sb_page);
+ events_refsb = le64_to_cpu(refsb->events);
+
+ return (events_sb > events_refsb) ? 1 : 0;
+}
+
+static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+ int role;
+ struct raid_set *rs = container_of(mddev, struct raid_set, md);
+ uint64_t events_sb;
+ uint64_t failed_devices;
+ struct dm_raid_superblock *sb;
+ uint32_t new_devs = 0;
+ uint32_t rebuilds = 0;
+ mdk_rdev_t *r, *t;
+ struct dm_raid_superblock *sb2;
+
+ sb = page_address(rdev->sb_page);
+ events_sb = le64_to_cpu(sb->events);
+ failed_devices = le64_to_cpu(sb->failed_devices);
+
+ /*
+ * Initialise to 1 if this is a new superblock.
+ */
+ mddev->events = events_sb ? : 1;
+
+ /*
+ * Reshaping is not currently allowed
+ */
+ if ((le32_to_cpu(sb->level) != mddev->level) ||
+ (le32_to_cpu(sb->layout) != mddev->layout) ||
+ (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors)) {
+ DMERR("Reshaping arrays not yet supported.");
+ return -EINVAL;
+ }
+
+ /* We can only change the number of devices in RAID1 right now */
+ if ((rs->raid_type->level != 1) &&
+ (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
+ DMERR("Reshaping arrays not yet supported.");
+ return -EINVAL;
+ }
+
+ if (!(rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)))
+ mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
+
+ /*
+ * During load, we set FirstUse if a new superblock was written.
+ * There are two reasons we might not have a superblock:
+ * 1) The array is brand new - in which case, all of the
+ * devices must have their In_sync bit set. Also,
+ * recovery_cp must be 0, unless forced.
+ * 2) This is a new device being added to an old array
+ * and the new device needs to be rebuilt - in which
+ * case the In_sync bit will /not/ be set and
+ * recovery_cp must be MaxSector.
+ */
+ rdev_for_each(r, t, mddev) {
+ if (!test_bit(In_sync, &r->flags)) {
+ if (!test_bit(FirstUse, &r->flags))
+ DMERR("Superblock area of "
+ "rebuild device %d should have been "
+ "cleared.", r->raid_disk);
+ set_bit(FirstUse, &r->flags);
+ rebuilds++;
+ } else if (test_bit(FirstUse, &r->flags))
+ new_devs++;
+ }
+
+ if (!rebuilds) {
+ if (new_devs == mddev->raid_disks) {
+ DMINFO("Superblocks created for new array");
+ set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
+ } else if (new_devs) {
+ DMERR("New device injected "
+ "into existing array without 'rebuild' "
+ "parameter specified");
+ return -EINVAL;
+ }
+ } else if (new_devs) {
+ DMERR("'rebuild' devices cannot be "
+ "injected into an array with other first-time devices");
+ return -EINVAL;
+ } else if (mddev->recovery_cp != MaxSector) {
+ DMERR("'rebuild' specified while array is not in-sync");
+ return -EINVAL;
+ }
+
+ /*
+ * Now we set the Faulty bit for those devices that are
+ * recorded in the superblock as failed.
+ */
+ rdev_for_each(r, t, mddev) {
+ if (!r->sb_page)
+ continue;
+ sb2 = page_address(r->sb_page);
+ sb2->failed_devices = 0;
+
+ /*
+ * Check for any device re-ordering.
+ */
+ if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
+ role = le32_to_cpu(sb2->array_position);
+ if (role != r->raid_disk) {
+ if (rs->raid_type->level != 1) {
+ rs->ti->error = "Cannot change device "
+ "positions in RAID array";
+ return -EINVAL;
+ }
+ DMINFO("RAID1 device #%d now at position #%d",
+ role, r->raid_disk);
+ }
+
+ /*
+ * Partial recovery is performed on
+ * returning failed devices.
+ */
+ if (failed_devices & (1 << role))
+ set_bit(Faulty, &r->flags);
+ }
+ }
+
+ return 0;
+}
+
+static int super_validate(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+ struct dm_raid_superblock *sb = page_address(rdev->sb_page);
+
+ /*
+ * If mddev->events is not set, we know we have not yet initialized
+ * the array.
+ */
+ if (!mddev->events && super_init_validation(mddev, rdev))
+ return -EINVAL;
+
+ mddev->bitmap_info.offset = 4096 >> 9; /* Enable bitmap creation */
+ rdev->mddev->bitmap_info.default_offset = 4096 >> 9;
+ if (!test_bit(FirstUse, &rdev->flags)) {
+ rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
+ if (rdev->recovery_offset != MaxSector)
+ clear_bit(In_sync, &rdev->flags);
+ }
+
+ /*
+ * If a device comes back, set it as not In_sync and no longer faulty.
+ */
+ if (test_bit(Faulty, &rdev->flags)) {
+ clear_bit(Faulty, &rdev->flags);
+ clear_bit(In_sync, &rdev->flags);
+ rdev->saved_raid_disk = rdev->raid_disk;
+ rdev->recovery_offset = 0;
+ }
+
+ clear_bit(FirstUse, &rdev->flags);
+
+ return 0;
+}
+
+/*
+ * Analyse superblocks and select the freshest.
+ */
+static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
+{
+ int ret;
+ mdk_rdev_t *rdev, *freshest, *tmp;
+ mddev_t *mddev = &rs->md;
+
+ freshest = NULL;
+ rdev_for_each(rdev, tmp, mddev) {
+ if (!rdev->meta_bdev)
+ continue;
+
+ ret = super_load(rdev, freshest);
+
+ switch (ret) {
+ case 1:
+ freshest = rdev;
+ break;
+ case 0:
+ break;
+ default:
+ ti->error = "Failed to load superblock";
+ return ret;
+ }
+ }
+
+ if (!freshest)
+ return 0;
+
+ /*
+ * Validation of the freshest device provides the source of
+ * validation for the remaining devices.
+ */
+ ti->error = "Unable to assemble array: Invalid superblocks";
+ if (super_validate(mddev, freshest))
+ return -EINVAL;
+
+ rdev_for_each(rdev, tmp, mddev)
+ if ((rdev != freshest) && super_validate(mddev, rdev))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
* Construct a RAID4/5/6 mapping:
* Args:
* <raid_type> <#raid_params> <raid_params> \
* <#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> }
*
- * ** metadata devices are not supported yet, use '-' instead **
- *
* <raid_params> varies by <raid_type>. See 'parse_raid_params' for
* details on possible <raid_params>.
*/
@@ -465,8 +962,12 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (ret)
goto bad;
+ rs->md.sync_super = super_sync;
+ ret = analyse_superblocks(ti, rs);
+ if (ret)
+ goto bad;
+
INIT_WORK(&rs->md.event_work, do_table_event);
- ti->split_io = rs->md.chunk_sectors;
ti->private = rs;
mutex_lock(&rs->md.reconfig_mutex);
@@ -482,6 +983,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
rs->callbacks.congested_fn = raid_is_congested;
dm_table_add_target_callbacks(ti->table, &rs->callbacks);
+ mddev_suspend(&rs->md);
return 0;
bad:
@@ -546,12 +1048,17 @@ static int raid_status(struct dm_target *ti, status_type_t type,
break;
case STATUSTYPE_TABLE:
/* The string you would use to construct this array */
- for (i = 0; i < rs->md.raid_disks; i++)
- if (rs->dev[i].data_dev &&
+ for (i = 0; i < rs->md.raid_disks; i++) {
+ if ((rs->print_flags & DMPF_REBUILD) &&
+ rs->dev[i].data_dev &&
!test_bit(In_sync, &rs->dev[i].rdev.flags))
- raid_param_cnt++; /* for rebuilds */
+ raid_param_cnt += 2; /* for rebuilds */
+ if (rs->dev[i].data_dev &&
+ test_bit(WriteMostly, &rs->dev[i].rdev.flags))
+ raid_param_cnt += 2;
+ }
- raid_param_cnt += (hweight64(rs->print_flags) * 2);
+ raid_param_cnt += (hweight64(rs->print_flags & ~DMPF_REBUILD) * 2);
if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
raid_param_cnt--;
@@ -565,7 +1072,8 @@ static int raid_status(struct dm_target *ti, status_type_t type,
DMEMIT(" nosync");
for (i = 0; i < rs->md.raid_disks; i++)
- if (rs->dev[i].data_dev &&
+ if ((rs->print_flags & DMPF_REBUILD) &&
+ rs->dev[i].data_dev &&
!test_bit(In_sync, &rs->dev[i].rdev.flags))
DMEMIT(" rebuild %u", i);
@@ -579,6 +1087,11 @@ static int raid_status(struct dm_target *ti, status_type_t type,
if (rs->print_flags & DMPF_MAX_RECOVERY_RATE)
DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
+ for (i = 0; i < rs->md.raid_disks; i++)
+ if (rs->dev[i].data_dev &&
+ test_bit(WriteMostly, &rs->dev[i].rdev.flags))
+ DMEMIT(" write_mostly %u", i);
+
if (rs->print_flags & DMPF_MAX_WRITE_BEHIND)
DMEMIT(" max_write_behind %lu",
rs->md.bitmap_info.max_write_behind);
@@ -591,9 +1104,16 @@ static int raid_status(struct dm_target *ti, status_type_t type,
conf ? conf->max_nr_stripes * 2 : 0);
}
+ if (rs->print_flags & DMPF_REGION_SIZE)
+ DMEMIT(" region_size %lu",
+ rs->md.bitmap_info.chunksize >> 9);
+
DMEMIT(" %d", rs->md.raid_disks);
for (i = 0; i < rs->md.raid_disks; i++) {
- DMEMIT(" -"); /* metadata device */
+ if (rs->dev[i].meta_dev)
+ DMEMIT(" %s", rs->dev[i].meta_dev->name);
+ else
+ DMEMIT(" -");
if (rs->dev[i].data_dev)
DMEMIT(" %s", rs->dev[i].data_dev->name);
@@ -650,12 +1170,13 @@ static void raid_resume(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
+ bitmap_load(&rs->md);
mddev_resume(&rs->md);
}
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 135c2f1..d1f1d70 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -58,25 +58,30 @@
#define NUM_SNAPSHOT_HDR_CHUNKS 1
struct disk_header {
- uint32_t magic;
+ __le32 magic;
/*
* Is this snapshot valid. There is no way of recovering
* an invalid snapshot.
*/
- uint32_t valid;
+ __le32 valid;
/*
* Simple, incrementing version. no backward
* compatibility.
*/
- uint32_t version;
+ __le32 version;
/* In sectors */
- uint32_t chunk_size;
-};
+ __le32 chunk_size;
+} __packed;
struct disk_exception {
+ __le64 old_chunk;
+ __le64 new_chunk;
+} __packed;
+
+struct core_exception {
uint64_t old_chunk;
uint64_t new_chunk;
};
@@ -169,10 +174,9 @@ static int alloc_area(struct pstore *ps)
if (!ps->area)
goto err_area;
- ps->zero_area = vmalloc(len);
+ ps->zero_area = vzalloc(len);
if (!ps->zero_area)
goto err_zero_area;
- memset(ps->zero_area, 0, len);
ps->header_area = vmalloc(len);
if (!ps->header_area)
@@ -396,32 +400,32 @@ static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
}
static void read_exception(struct pstore *ps,
- uint32_t index, struct disk_exception *result)
+ uint32_t index, struct core_exception *result)
{
- struct disk_exception *e = get_exception(ps, index);
+ struct disk_exception *de = get_exception(ps, index);
/* copy it */
- result->old_chunk = le64_to_cpu(e->old_chunk);
- result->new_chunk = le64_to_cpu(e->new_chunk);
+ result->old_chunk = le64_to_cpu(de->old_chunk);
+ result->new_chunk = le64_to_cpu(de->new_chunk);
}
static void write_exception(struct pstore *ps,
- uint32_t index, struct disk_exception *de)
+ uint32_t index, struct core_exception *e)
{
- struct disk_exception *e = get_exception(ps, index);
+ struct disk_exception *de = get_exception(ps, index);
/* copy it */
- e->old_chunk = cpu_to_le64(de->old_chunk);
- e->new_chunk = cpu_to_le64(de->new_chunk);
+ de->old_chunk = cpu_to_le64(e->old_chunk);
+ de->new_chunk = cpu_to_le64(e->new_chunk);
}
static void clear_exception(struct pstore *ps, uint32_t index)
{
- struct disk_exception *e = get_exception(ps, index);
+ struct disk_exception *de = get_exception(ps, index);
/* clear it */
- e->old_chunk = 0;
- e->new_chunk = 0;
+ de->old_chunk = 0;
+ de->new_chunk = 0;
}
/*
@@ -437,13 +441,13 @@ static int insert_exceptions(struct pstore *ps,
{
int r;
unsigned int i;
- struct disk_exception de;
+ struct core_exception e;
/* presume the area is full */
*full = 1;
for (i = 0; i < ps->exceptions_per_area; i++) {
- read_exception(ps, i, &de);
+ read_exception(ps, i, &e);
/*
* If the new_chunk is pointing at the start of
@@ -451,7 +455,7 @@ static int insert_exceptions(struct pstore *ps,
* is we know that we've hit the end of the
* exceptions. Therefore the area is not full.
*/
- if (de.new_chunk == 0LL) {
+ if (e.new_chunk == 0LL) {
ps->current_committed = i;
*full = 0;
break;
@@ -460,13 +464,13 @@ static int insert_exceptions(struct pstore *ps,
/*
* Keep track of the start of the free chunks.
*/
- if (ps->next_free <= de.new_chunk)
- ps->next_free = de.new_chunk + 1;
+ if (ps->next_free <= e.new_chunk)
+ ps->next_free = e.new_chunk + 1;
/*
* Otherwise we add the exception to the snapshot.
*/
- r = callback(callback_context, de.old_chunk, de.new_chunk);
+ r = callback(callback_context, e.old_chunk, e.new_chunk);
if (r)
return r;
}
@@ -563,7 +567,7 @@ static int persistent_read_metadata(struct dm_exception_store *store,
ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
sizeof(struct disk_exception);
ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
- sizeof(*ps->callbacks));
+ sizeof(*ps->callbacks));
if (!ps->callbacks)
return -ENOMEM;
@@ -641,12 +645,12 @@ static void persistent_commit_exception(struct dm_exception_store *store,
{
unsigned int i;
struct pstore *ps = get_info(store);
- struct disk_exception de;
+ struct core_exception ce;
struct commit_callback *cb;
- de.old_chunk = e->old_chunk;
- de.new_chunk = e->new_chunk;
- write_exception(ps, ps->current_committed++, &de);
+ ce.old_chunk = e->old_chunk;
+ ce.new_chunk = e->new_chunk;
+ write_exception(ps, ps->current_committed++, &ce);
/*
* Add the callback to the back of the array. This code
@@ -670,7 +674,7 @@ static void persistent_commit_exception(struct dm_exception_store *store,
* If we completely filled the current area, then wipe the next one.
*/
if ((ps->current_committed == ps->exceptions_per_area) &&
- zero_disk_area(ps, ps->current_area + 1))
+ zero_disk_area(ps, ps->current_area + 1))
ps->valid = 0;
/*
@@ -701,7 +705,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
chunk_t *last_new_chunk)
{
struct pstore *ps = get_info(store);
- struct disk_exception de;
+ struct core_exception ce;
int nr_consecutive;
int r;
@@ -722,9 +726,9 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
ps->current_committed = ps->exceptions_per_area;
}
- read_exception(ps, ps->current_committed - 1, &de);
- *last_old_chunk = de.old_chunk;
- *last_new_chunk = de.new_chunk;
+ read_exception(ps, ps->current_committed - 1, &ce);
+ *last_old_chunk = ce.old_chunk;
+ *last_new_chunk = ce.new_chunk;
/*
* Find number of consecutive chunks within the current area,
@@ -733,9 +737,9 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
nr_consecutive++) {
read_exception(ps, ps->current_committed - 1 - nr_consecutive,
- &de);
- if (de.old_chunk != *last_old_chunk - nr_consecutive ||
- de.new_chunk != *last_new_chunk - nr_consecutive)
+ &ce);
+ if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
+ ce.new_chunk != *last_new_chunk - nr_consecutive)
break;
}
@@ -753,7 +757,7 @@ static int persistent_commit_merge(struct dm_exception_store *store,
for (i = 0; i < nr_merged; i++)
clear_exception(ps, ps->current_committed - 1 - i);
- r = area_io(ps, WRITE);
+ r = area_io(ps, WRITE_FLUSH_FUA);
if (r < 0)
return r;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 9ecff5f..6f75887 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -30,16 +30,6 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
((ti)->type->name == dm_snapshot_merge_target_name)
/*
- * The percentage increment we will wake up users at
- */
-#define WAKE_UP_PERCENT 5
-
-/*
- * kcopyd priority of snapshot operations
- */
-#define SNAPSHOT_COPY_PRIORITY 2
-
-/*
* The size of the mempool used to track chunks in use.
*/
#define MIN_IOS 256
@@ -180,6 +170,13 @@ struct dm_snap_pending_exception {
* kcopyd.
*/
int started;
+
+ /*
+ * For writing a complete chunk, bypassing the copy.
+ */
+ struct bio *full_bio;
+ bio_end_io_t *full_bio_end_io;
+ void *full_bio_private;
};
/*
@@ -1055,8 +1052,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s) {
- ti->error = "Cannot allocate snapshot context private "
- "structure";
+ ti->error = "Cannot allocate private snapshot structure";
r = -ENOMEM;
goto bad;
}
@@ -1380,6 +1376,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
struct dm_snapshot *s = pe->snap;
struct bio *origin_bios = NULL;
struct bio *snapshot_bios = NULL;
+ struct bio *full_bio = NULL;
int error = 0;
if (!success) {
@@ -1415,10 +1412,15 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
*/
dm_insert_exception(&s->complete, e);
- out:
+out:
dm_remove_exception(&pe->e);
snapshot_bios = bio_list_get(&pe->snapshot_bios);
origin_bios = bio_list_get(&pe->origin_bios);
+ full_bio = pe->full_bio;
+ if (full_bio) {
+ full_bio->bi_end_io = pe->full_bio_end_io;
+ full_bio->bi_private = pe->full_bio_private;
+ }
free_pending_exception(pe);
increment_pending_exceptions_done_count();
@@ -1426,10 +1428,15 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
up_write(&s->lock);
/* Submit any pending write bios */
- if (error)
+ if (error) {
+ if (full_bio)
+ bio_io_error(full_bio);
error_bios(snapshot_bios);
- else
+ } else {
+ if (full_bio)
+ bio_endio(full_bio, 0);
flush_bios(snapshot_bios);
+ }
retry_origin_bios(s, origin_bios);
}
@@ -1480,8 +1487,33 @@ static void start_copy(struct dm_snap_pending_exception *pe)
dest.count = src.count;
/* Hand over to kcopyd */
- dm_kcopyd_copy(s->kcopyd_client,
- &src, 1, &dest, 0, copy_callback, pe);
+ dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
+}
+
+static void full_bio_end_io(struct bio *bio, int error)
+{
+ void *callback_data = bio->bi_private;
+
+ dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0);
+}
+
+static void start_full_bio(struct dm_snap_pending_exception *pe,
+ struct bio *bio)
+{
+ struct dm_snapshot *s = pe->snap;
+ void *callback_data;
+
+ pe->full_bio = bio;
+ pe->full_bio_end_io = bio->bi_end_io;
+ pe->full_bio_private = bio->bi_private;
+
+ callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
+ copy_callback, pe);
+
+ bio->bi_end_io = full_bio_end_io;
+ bio->bi_private = callback_data;
+
+ generic_make_request(bio);
}
static struct dm_snap_pending_exception *
@@ -1519,6 +1551,7 @@ __find_pending_exception(struct dm_snapshot *s,
bio_list_init(&pe->origin_bios);
bio_list_init(&pe->snapshot_bios);
pe->started = 0;
+ pe->full_bio = NULL;
if (s->store->type->prepare_exception(s->store, &pe->e)) {
free_pending_exception(pe);
@@ -1612,10 +1645,19 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
}
remap_exception(s, &pe->e, bio, chunk);
- bio_list_add(&pe->snapshot_bios, bio);
r = DM_MAPIO_SUBMITTED;
+ if (!pe->started &&
+ bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) {
+ pe->started = 1;
+ up_write(&s->lock);
+ start_full_bio(pe, bio);
+ goto out;
+ }
+
+ bio_list_add(&pe->snapshot_bios, bio);
+
if (!pe->started) {
/* this is protected by snap->lock */
pe->started = 1;
@@ -1628,9 +1670,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
map_context->ptr = track_chunk(s, chunk);
}
- out_unlock:
+out_unlock:
up_write(&s->lock);
- out:
+out:
return r;
}
@@ -1974,7 +2016,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
pe_to_start_now = pe;
}
- next_snapshot:
+next_snapshot:
up_write(&snap->lock);
if (pe_to_start_now) {
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 451c3bb..986b875 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -17,7 +17,7 @@
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/delay.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define DM_MSG_PREFIX "table"
@@ -54,7 +54,6 @@ struct dm_table {
sector_t *highs;
struct dm_target *targets;
- unsigned discards_supported:1;
unsigned integrity_supported:1;
/*
@@ -154,12 +153,11 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
return NULL;
size = nmemb * elem_size;
- addr = vmalloc(size);
- if (addr)
- memset(addr, 0, size);
+ addr = vzalloc(size);
return addr;
}
+EXPORT_SYMBOL(dm_vcalloc);
/*
* highs, and targets are managed as dynamic arrays during a
@@ -209,7 +207,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
INIT_LIST_HEAD(&t->devices);
INIT_LIST_HEAD(&t->target_callbacks);
atomic_set(&t->holders, 0);
- t->discards_supported = 1;
if (!num_targets)
num_targets = KEYS_PER_NODE;
@@ -281,6 +278,7 @@ void dm_table_get(struct dm_table *t)
{
atomic_inc(&t->holders);
}
+EXPORT_SYMBOL(dm_table_get);
void dm_table_put(struct dm_table *t)
{
@@ -290,6 +288,7 @@ void dm_table_put(struct dm_table *t)
smp_mb__before_atomic_dec();
atomic_dec(&t->holders);
}
+EXPORT_SYMBOL(dm_table_put);
/*
* Checks to see if we need to extend highs or targets.
@@ -455,13 +454,14 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
* Add a device to the list, or just increment the usage count if
* it's already present.
*/
-static int __table_get_device(struct dm_table *t, struct dm_target *ti,
- const char *path, fmode_t mode, struct dm_dev **result)
+int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
+ struct dm_dev **result)
{
int r;
dev_t uninitialized_var(dev);
struct dm_dev_internal *dd;
unsigned int major, minor;
+ struct dm_table *t = ti->table;
BUG_ON(!t);
@@ -509,6 +509,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
*result = &dd->dm_dev;
return 0;
}
+EXPORT_SYMBOL(dm_get_device);
int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
@@ -539,23 +540,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
* If not we'll force DM to use PAGE_SIZE or
* smaller I/O, just to be safe.
*/
-
- if (q->merge_bvec_fn && !ti->type->merge)
+ if (dm_queue_merge_is_compulsory(q) && !ti->type->merge)
blk_limits_max_hw_sectors(limits,
(unsigned int) (PAGE_SIZE >> 9));
return 0;
}
EXPORT_SYMBOL_GPL(dm_set_device_limits);
-int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
- struct dm_dev **result)
-{
- return __table_get_device(ti->table, ti, path, mode, result);
-}
-
-
/*
- * Decrement a devices use count and remove it if necessary.
+ * Decrement a device's use count and remove it if necessary.
*/
void dm_put_device(struct dm_target *ti, struct dm_dev *d)
{
@@ -568,6 +561,7 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d)
kfree(dd);
}
}
+EXPORT_SYMBOL(dm_put_device);
/*
* Checks to see if the target joins onto the end of the table.
@@ -791,8 +785,9 @@ int dm_table_add_target(struct dm_table *t, const char *type,
t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
- if (!tgt->num_discard_requests)
- t->discards_supported = 0;
+ if (!tgt->num_discard_requests && tgt->discards_supported)
+ DMWARN("%s: %s: ignoring discards_supported because num_discard_requests is zero.",
+ dm_device_name(t->md), type);
return 0;
@@ -802,6 +797,63 @@ int dm_table_add_target(struct dm_table *t, const char *type,
return r;
}
+/*
+ * Target argument parsing helpers.
+ */
+static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
+ unsigned *value, char **error, unsigned grouped)
+{
+ const char *arg_str = dm_shift_arg(arg_set);
+
+ if (!arg_str ||
+ (sscanf(arg_str, "%u", value) != 1) ||
+ (*value < arg->min) ||
+ (*value > arg->max) ||
+ (grouped && arg_set->argc < *value)) {
+ *error = arg->error;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
+ unsigned *value, char **error)
+{
+ return validate_next_arg(arg, arg_set, value, error, 0);
+}
+EXPORT_SYMBOL(dm_read_arg);
+
+int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
+ unsigned *value, char **error)
+{
+ return validate_next_arg(arg, arg_set, value, error, 1);
+}
+EXPORT_SYMBOL(dm_read_arg_group);
+
+const char *dm_shift_arg(struct dm_arg_set *as)
+{
+ char *r;
+
+ if (as->argc) {
+ as->argc--;
+ r = *as->argv;
+ as->argv++;
+ return r;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(dm_shift_arg);
+
+void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
+{
+ BUG_ON(as->argc < num_args);
+ as->argc -= num_args;
+ as->argv += num_args;
+}
+EXPORT_SYMBOL(dm_consume_args);
+
static int dm_table_set_type(struct dm_table *t)
{
unsigned i;
@@ -1077,11 +1129,13 @@ void dm_table_event(struct dm_table *t)
t->event_fn(t->event_context);
mutex_unlock(&_event_lock);
}
+EXPORT_SYMBOL(dm_table_event);
sector_t dm_table_get_size(struct dm_table *t)
{
return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
}
+EXPORT_SYMBOL(dm_table_get_size);
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
{
@@ -1194,9 +1248,45 @@ static void dm_table_set_integrity(struct dm_table *t)
blk_get_integrity(template_disk));
}
+static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ unsigned flush = (*(unsigned *)data);
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && (q->flush_flags & flush);
+}
+
+static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
+{
+ struct dm_target *ti;
+ unsigned i = 0;
+
+ /*
+ * Require at least one underlying device to support flushes.
+ * t->devices includes internal dm devices such as mirror logs
+ * so we need to use iterate_devices here, which targets
+ * supporting flushes must provide.
+ */
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (!ti->num_flush_requests)
+ continue;
+
+ if (ti->type->iterate_devices &&
+ ti->type->iterate_devices(ti, device_flush_capable, &flush))
+ return 1;
+ }
+
+ return 0;
+}
+
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
+ unsigned flush = 0;
+
/*
* Copy table's limits to the DM device's request_queue
*/
@@ -1207,6 +1297,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ if (dm_table_supports_flush(t, REQ_FLUSH)) {
+ flush |= REQ_FLUSH;
+ if (dm_table_supports_flush(t, REQ_FUA))
+ flush |= REQ_FUA;
+ }
+ blk_queue_flush(q, flush);
+
dm_table_set_integrity(t);
/*
@@ -1237,6 +1334,7 @@ fmode_t dm_table_get_mode(struct dm_table *t)
{
return t->mode;
}
+EXPORT_SYMBOL(dm_table_get_mode);
static void suspend_targets(struct dm_table *t, unsigned postsuspend)
{
@@ -1345,6 +1443,7 @@ struct mapped_device *dm_table_get_md(struct dm_table *t)
{
return t->md;
}
+EXPORT_SYMBOL(dm_table_get_md);
static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
@@ -1359,19 +1458,19 @@ bool dm_table_supports_discards(struct dm_table *t)
struct dm_target *ti;
unsigned i = 0;
- if (!t->discards_supported)
- return 0;
-
/*
* Unless any target used by the table set discards_supported,
* require at least one underlying device to support discards.
* t->devices includes internal dm devices such as mirror logs
* so we need to use iterate_devices here, which targets
- * supporting discard must provide.
+ * supporting discard selectively must provide.
*/
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
+ if (!ti->num_discard_requests)
+ continue;
+
if (ti->discards_supported)
return 1;
@@ -1382,13 +1481,3 @@ bool dm_table_supports_discards(struct dm_table *t)
return 0;
}
-
-EXPORT_SYMBOL(dm_vcalloc);
-EXPORT_SYMBOL(dm_get_device);
-EXPORT_SYMBOL(dm_put_device);
-EXPORT_SYMBOL(dm_table_event);
-EXPORT_SYMBOL(dm_table_get_size);
-EXPORT_SYMBOL(dm_table_get_mode);
-EXPORT_SYMBOL(dm_table_get_md);
-EXPORT_SYMBOL(dm_table_put);
-EXPORT_SYMBOL(dm_table_get);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 0cf68b4..52b39f3 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -37,6 +37,8 @@ static const char *_name = DM_NAME;
static unsigned int major = 0;
static unsigned int _major = 0;
+static DEFINE_IDR(_minor_idr);
+
static DEFINE_SPINLOCK(_minor_lock);
/*
* For bio-based dm.
@@ -109,6 +111,7 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
#define DMF_FREEING 3
#define DMF_DELETING 4
#define DMF_NOFLUSH_SUSPENDING 5
+#define DMF_MERGE_IS_OPTIONAL 6
/*
* Work processed by per-device workqueue.
@@ -313,6 +316,12 @@ static void __exit dm_exit(void)
while (i--)
_exits[i]();
+
+ /*
+ * Should be empty by this point.
+ */
+ idr_remove_all(&_minor_idr);
+ idr_destroy(&_minor_idr);
}
/*
@@ -1171,7 +1180,8 @@ static int __clone_and_map_discard(struct clone_info *ci)
/*
* Even though the device advertised discard support,
- * reconfiguration might have changed that since the
+ * that does not mean every target supports it, and
+ * reconfiguration might also have changed that since the
* check was performed.
*/
if (!ti->num_discard_requests)
@@ -1705,8 +1715,6 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
/*-----------------------------------------------------------------
* An IDR is used to keep track of allocated minor numbers.
*---------------------------------------------------------------*/
-static DEFINE_IDR(_minor_idr);
-
static void free_minor(int minor)
{
spin_lock(&_minor_lock);
@@ -1800,7 +1808,6 @@ static void dm_init_md_queue(struct mapped_device *md)
blk_queue_make_request(md->queue, dm_request);
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
blk_queue_merge_bvec(md->queue, dm_merge_bvec);
- blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
}
/*
@@ -1986,6 +1993,59 @@ static void __set_size(struct mapped_device *md, sector_t size)
}
/*
+ * Return 1 if the queue has a compulsory merge_bvec_fn function.
+ *
+ * If this function returns 0, then the device is either a non-dm
+ * device without a merge_bvec_fn, or it is a dm device that is
+ * able to split any bios it receives that are too big.
+ */
+int dm_queue_merge_is_compulsory(struct request_queue *q)
+{
+ struct mapped_device *dev_md;
+
+ if (!q->merge_bvec_fn)
+ return 0;
+
+ if (q->make_request_fn == dm_request) {
+ dev_md = q->queuedata;
+ if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
+ return 0;
+ }
+
+ return 1;
+}
+
+static int dm_device_merge_is_compulsory(struct dm_target *ti,
+ struct dm_dev *dev, sector_t start,
+ sector_t len, void *data)
+{
+ struct block_device *bdev = dev->bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ return dm_queue_merge_is_compulsory(q);
+}
+
+/*
+ * Return 1 if it is acceptable to ignore merge_bvec_fn based
+ * on the properties of the underlying devices.
+ */
+static int dm_table_merge_is_optional(struct dm_table *table)
+{
+ unsigned i = 0;
+ struct dm_target *ti;
+
+ while (i < dm_table_get_num_targets(table)) {
+ ti = dm_table_get_target(table, i++);
+
+ if (ti->type->iterate_devices &&
+ ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
* Returns old map, which caller must destroy.
*/
static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
@@ -1995,6 +2055,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
struct request_queue *q = md->queue;
sector_t size;
unsigned long flags;
+ int merge_is_optional;
size = dm_table_get_size(t);
@@ -2020,10 +2081,16 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
__bind_mempools(md, t);
+ merge_is_optional = dm_table_merge_is_optional(t);
+
write_lock_irqsave(&md->map_lock, flags);
old_map = md->map;
md->map = t;
dm_table_set_restrictions(t, q, limits);
+ if (merge_is_optional)
+ set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
+ else
+ clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
write_unlock_irqrestore(&md->map_lock, flags);
return old_map;
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 1aaf167..6745dbd 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -66,6 +66,8 @@ int dm_table_alloc_md_mempools(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
+int dm_queue_merge_is_compulsory(struct request_queue *q);
+
void dm_lock_md_type(struct mapped_device *md);
void dm_unlock_md_type(struct mapped_device *md);
void dm_set_md_type(struct mapped_device *md, unsigned type);
diff --git a/drivers/md/linear.h b/drivers/md/linear.h
index 0ce29b6..2f2da05 100644
--- a/drivers/md/linear.h
+++ b/drivers/md/linear.h
@@ -10,9 +10,9 @@ typedef struct dev_info dev_info_t;
struct linear_private_data
{
+ struct rcu_head rcu;
sector_t array_sectors;
dev_info_t disks[0];
- struct rcu_head rcu;
};
diff --git a/drivers/md/md.c b/drivers/md/md.c
index dfc9425..5404b22 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -215,6 +215,55 @@ struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
}
EXPORT_SYMBOL_GPL(bio_clone_mddev);
+void md_trim_bio(struct bio *bio, int offset, int size)
+{
+ /* 'bio' is a cloned bio which we need to trim to match
+ * the given offset and size.
+ * This requires adjusting bi_sector, bi_size, and bi_io_vec
+ */
+ int i;
+ struct bio_vec *bvec;
+ int sofar = 0;
+
+ size <<= 9;
+ if (offset == 0 && size == bio->bi_size)
+ return;
+
+ bio->bi_sector += offset;
+ bio->bi_size = size;
+ offset <<= 9;
+ clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+
+ while (bio->bi_idx < bio->bi_vcnt &&
+ bio->bi_io_vec[bio->bi_idx].bv_len <= offset) {
+ /* remove this whole bio_vec */
+ offset -= bio->bi_io_vec[bio->bi_idx].bv_len;
+ bio->bi_idx++;
+ }
+ if (bio->bi_idx < bio->bi_vcnt) {
+ bio->bi_io_vec[bio->bi_idx].bv_offset += offset;
+ bio->bi_io_vec[bio->bi_idx].bv_len -= offset;
+ }
+ /* avoid any complications with bi_idx being non-zero*/
+ if (bio->bi_idx) {
+ memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
+ (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
+ bio->bi_vcnt -= bio->bi_idx;
+ bio->bi_idx = 0;
+ }
+ /* Make sure vcnt and last bv are not too big */
+ bio_for_each_segment(bvec, bio, i) {
+ if (sofar + bvec->bv_len > size)
+ bvec->bv_len = size - sofar;
+ if (bvec->bv_len == 0) {
+ bio->bi_vcnt = i;
+ break;
+ }
+ sofar += bvec->bv_len;
+ }
+}
+EXPORT_SYMBOL_GPL(md_trim_bio);
+
/*
* We have a system wide 'event count' that is incremented
* on any 'interesting' event, and readers of /proc/mdstat
@@ -757,6 +806,10 @@ static void free_disk_sb(mdk_rdev_t * rdev)
rdev->sb_start = 0;
rdev->sectors = 0;
}
+ if (rdev->bb_page) {
+ put_page(rdev->bb_page);
+ rdev->bb_page = NULL;
+ }
}
@@ -795,7 +848,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
bio->bi_end_io = super_written;
atomic_inc(&mddev->pending_writes);
- submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio);
+ submit_bio(WRITE_FLUSH_FUA, bio);
}
void md_super_wait(mddev_t *mddev)
@@ -1025,7 +1078,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
ret = -EINVAL;
bdevname(rdev->bdev, b);
- sb = (mdp_super_t*)page_address(rdev->sb_page);
+ sb = page_address(rdev->sb_page);
if (sb->md_magic != MD_SB_MAGIC) {
printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
@@ -1054,6 +1107,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
rdev->preferred_minor = sb->md_minor;
rdev->data_offset = 0;
rdev->sb_size = MD_SB_BYTES;
+ rdev->badblocks.shift = -1;
if (sb->level == LEVEL_MULTIPATH)
rdev->desc_nr = -1;
@@ -1064,7 +1118,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
ret = 1;
} else {
__u64 ev1, ev2;
- mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
+ mdp_super_t *refsb = page_address(refdev->sb_page);
if (!uuid_equal(refsb, sb)) {
printk(KERN_WARNING "md: %s has different UUID to %s\n",
b, bdevname(refdev->bdev,b2));
@@ -1084,8 +1138,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
ret = 0;
}
rdev->sectors = rdev->sb_start;
+ /* Limit to 4TB as metadata cannot record more than that */
+ if (rdev->sectors >= (2ULL << 32))
+ rdev->sectors = (2ULL << 32) - 2;
- if (rdev->sectors < sb->size * 2 && sb->level > 1)
+ if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
/* "this cannot possibly happen" ... */
ret = -EINVAL;
@@ -1099,7 +1156,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
{
mdp_disk_t *desc;
- mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
+ mdp_super_t *sb = page_address(rdev->sb_page);
__u64 ev1 = md_event(sb);
rdev->raid_disk = -1;
@@ -1119,7 +1176,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->clevel[0] = 0;
mddev->layout = sb->layout;
mddev->raid_disks = sb->raid_disks;
- mddev->dev_sectors = sb->size * 2;
+ mddev->dev_sectors = ((sector_t)sb->size) * 2;
mddev->events = ev1;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
@@ -1230,7 +1287,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
rdev->sb_size = MD_SB_BYTES;
- sb = (mdp_super_t*)page_address(rdev->sb_page);
+ sb = page_address(rdev->sb_page);
memset(sb, 0, sizeof(*sb));
@@ -1361,6 +1418,11 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
rdev->sb_start = calc_dev_sboffset(rdev);
if (!num_sectors || num_sectors > rdev->sb_start)
num_sectors = rdev->sb_start;
+ /* Limit to 4TB as metadata cannot record more than that.
+ * 4TB == 2^32 KB, or 2*2^32 sectors.
+ */
+ if (num_sectors >= (2ULL << 32))
+ num_sectors = (2ULL << 32) - 2;
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
md_super_wait(rdev->mddev);
@@ -1395,6 +1457,8 @@ static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
return cpu_to_le32(csum);
}
+static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
+ int acknowledged);
static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
{
struct mdp_superblock_1 *sb;
@@ -1435,7 +1499,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
if (ret) return ret;
- sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
+ sb = page_address(rdev->sb_page);
if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
sb->major_version != cpu_to_le32(1) ||
@@ -1473,12 +1537,52 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
else
rdev->desc_nr = le32_to_cpu(sb->dev_number);
+ if (!rdev->bb_page) {
+ rdev->bb_page = alloc_page(GFP_KERNEL);
+ if (!rdev->bb_page)
+ return -ENOMEM;
+ }
+ if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
+ rdev->badblocks.count == 0) {
+ /* need to load the bad block list.
+ * Currently we limit it to one page.
+ */
+ s32 offset;
+ sector_t bb_sector;
+ u64 *bbp;
+ int i;
+ int sectors = le16_to_cpu(sb->bblog_size);
+ if (sectors > (PAGE_SIZE / 512))
+ return -EINVAL;
+ offset = le32_to_cpu(sb->bblog_offset);
+ if (offset == 0)
+ return -EINVAL;
+ bb_sector = (long long)offset;
+ if (!sync_page_io(rdev, bb_sector, sectors << 9,
+ rdev->bb_page, READ, true))
+ return -EIO;
+ bbp = (u64 *)page_address(rdev->bb_page);
+ rdev->badblocks.shift = sb->bblog_shift;
+ for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
+ u64 bb = le64_to_cpu(*bbp);
+ int count = bb & (0x3ff);
+ u64 sector = bb >> 10;
+ sector <<= sb->bblog_shift;
+ count <<= sb->bblog_shift;
+ if (bb + 1 == 0)
+ break;
+ if (md_set_badblocks(&rdev->badblocks,
+ sector, count, 1) == 0)
+ return -EINVAL;
+ }
+ } else if (sb->bblog_offset == 0)
+ rdev->badblocks.shift = -1;
+
if (!refdev) {
ret = 1;
} else {
__u64 ev1, ev2;
- struct mdp_superblock_1 *refsb =
- (struct mdp_superblock_1*)page_address(refdev->sb_page);
+ struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
sb->level != refsb->level ||
@@ -1513,7 +1617,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
{
- struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
+ struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
__u64 ev1 = le64_to_cpu(sb->events);
rdev->raid_disk = -1;
@@ -1619,13 +1723,12 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
int max_dev, i;
/* make rdev->sb match mddev and rdev data. */
- sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
+ sb = page_address(rdev->sb_page);
sb->feature_map = 0;
sb->pad0 = 0;
sb->recovery_offset = cpu_to_le64(0);
memset(sb->pad1, 0, sizeof(sb->pad1));
- memset(sb->pad2, 0, sizeof(sb->pad2));
memset(sb->pad3, 0, sizeof(sb->pad3));
sb->utime = cpu_to_le64((__u64)mddev->utime);
@@ -1643,6 +1746,11 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
+ if (test_bit(WriteMostly, &rdev->flags))
+ sb->devflags |= WriteMostly1;
+ else
+ sb->devflags &= ~WriteMostly1;
+
if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
@@ -1665,6 +1773,40 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
}
+ if (rdev->badblocks.count == 0)
+ /* Nothing to do for bad blocks*/ ;
+ else if (sb->bblog_offset == 0)
+ /* Cannot record bad blocks on this device */
+ md_error(mddev, rdev);
+ else {
+ struct badblocks *bb = &rdev->badblocks;
+ u64 *bbp = (u64 *)page_address(rdev->bb_page);
+ u64 *p = bb->page;
+ sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
+ if (bb->changed) {
+ unsigned seq;
+
+retry:
+ seq = read_seqbegin(&bb->lock);
+
+ memset(bbp, 0xff, PAGE_SIZE);
+
+ for (i = 0 ; i < bb->count ; i++) {
+ u64 internal_bb = *p++;
+ u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
+ | BB_LEN(internal_bb));
+ *bbp++ = cpu_to_le64(store_bb);
+ }
+ if (read_seqretry(&bb->lock, seq))
+ goto retry;
+
+ bb->sector = (rdev->sb_start +
+ (int)le32_to_cpu(sb->bblog_offset));
+ bb->size = le16_to_cpu(sb->bblog_size);
+ bb->changed = 0;
+ }
+ }
+
max_dev = 0;
list_for_each_entry(rdev2, &mddev->disks, same_set)
if (rdev2->desc_nr+1 > max_dev)
@@ -1724,7 +1866,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
num_sectors = max_sectors;
rdev->sb_start = sb_start;
}
- sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page);
+ sb = page_address(rdev->sb_page);
sb->data_size = cpu_to_le64(num_sectors);
sb->super_offset = rdev->sb_start;
sb->sb_csum = calc_sb_1_csum(sb);
@@ -1922,7 +2064,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
bd_link_disk_holder(rdev->bdev, mddev->gendisk);
/* May as well allow recovery to be retried once */
- mddev->recovery_disabled = 0;
+ mddev->recovery_disabled++;
return 0;
@@ -1953,6 +2095,9 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
rdev->sysfs_state = NULL;
+ kfree(rdev->badblocks.page);
+ rdev->badblocks.count = 0;
+ rdev->badblocks.page = NULL;
/* We need to delay this, otherwise we can deadlock when
* writing to 'remove' to "dev/state". We also need
* to delay it due to rcu usage.
@@ -2127,10 +2272,10 @@ static void print_rdev(mdk_rdev_t *rdev, int major_version)
printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
switch (major_version) {
case 0:
- print_sb_90((mdp_super_t*)page_address(rdev->sb_page));
+ print_sb_90(page_address(rdev->sb_page));
break;
case 1:
- print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page));
+ print_sb_1(page_address(rdev->sb_page));
break;
}
} else
@@ -2194,6 +2339,7 @@ static void md_update_sb(mddev_t * mddev, int force_change)
mdk_rdev_t *rdev;
int sync_req;
int nospares = 0;
+ int any_badblocks_changed = 0;
repeat:
/* First make sure individual recovery_offsets are correct */
@@ -2208,8 +2354,18 @@ repeat:
if (!mddev->persistent) {
clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
clear_bit(MD_CHANGE_DEVS, &mddev->flags);
- if (!mddev->external)
+ if (!mddev->external) {
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ if (rdev->badblocks.changed) {
+ md_ack_all_badblocks(&rdev->badblocks);
+ md_error(mddev, rdev);
+ }
+ clear_bit(Blocked, &rdev->flags);
+ clear_bit(BlockedBadBlocks, &rdev->flags);
+ wake_up(&rdev->blocked_wait);
+ }
+ }
wake_up(&mddev->sb_wait);
return;
}
@@ -2265,6 +2421,14 @@ repeat:
MD_BUG();
mddev->events --;
}
+
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ if (rdev->badblocks.changed)
+ any_badblocks_changed++;
+ if (test_bit(Faulty, &rdev->flags))
+ set_bit(FaultRecorded, &rdev->flags);
+ }
+
sync_sbs(mddev, nospares);
spin_unlock_irq(&mddev->write_lock);
@@ -2290,6 +2454,13 @@ repeat:
bdevname(rdev->bdev,b),
(unsigned long long)rdev->sb_start);
rdev->sb_events = mddev->events;
+ if (rdev->badblocks.size) {
+ md_super_write(mddev, rdev,
+ rdev->badblocks.sector,
+ rdev->badblocks.size << 9,
+ rdev->bb_page);
+ rdev->badblocks.size = 0;
+ }
} else
dprintk(")\n");
@@ -2313,6 +2484,15 @@ repeat:
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ if (test_and_clear_bit(FaultRecorded, &rdev->flags))
+ clear_bit(Blocked, &rdev->flags);
+
+ if (any_badblocks_changed)
+ md_ack_all_badblocks(&rdev->badblocks);
+ clear_bit(BlockedBadBlocks, &rdev->flags);
+ wake_up(&rdev->blocked_wait);
+ }
}
/* words written to sysfs files may, or may not, be \n terminated.
@@ -2347,7 +2527,8 @@ state_show(mdk_rdev_t *rdev, char *page)
char *sep = "";
size_t len = 0;
- if (test_bit(Faulty, &rdev->flags)) {
+ if (test_bit(Faulty, &rdev->flags) ||
+ rdev->badblocks.unacked_exist) {
len+= sprintf(page+len, "%sfaulty",sep);
sep = ",";
}
@@ -2359,7 +2540,8 @@ state_show(mdk_rdev_t *rdev, char *page)
len += sprintf(page+len, "%swrite_mostly",sep);
sep = ",";
}
- if (test_bit(Blocked, &rdev->flags)) {
+ if (test_bit(Blocked, &rdev->flags) ||
+ rdev->badblocks.unacked_exist) {
len += sprintf(page+len, "%sblocked", sep);
sep = ",";
}
@@ -2368,6 +2550,10 @@ state_show(mdk_rdev_t *rdev, char *page)
len += sprintf(page+len, "%sspare", sep);
sep = ",";
}
+ if (test_bit(WriteErrorSeen, &rdev->flags)) {
+ len += sprintf(page+len, "%swrite_error", sep);
+ sep = ",";
+ }
return len+sprintf(page+len, "\n");
}
@@ -2375,18 +2561,23 @@ static ssize_t
state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
{
/* can write
- * faulty - simulates and error
+ * faulty - simulates an error
* remove - disconnects the device
* writemostly - sets write_mostly
* -writemostly - clears write_mostly
- * blocked - sets the Blocked flag
- * -blocked - clears the Blocked flag
+ * blocked - sets the Blocked flags
+ * -blocked - clears the Blocked and possibly simulates an error
* insync - sets Insync providing device isn't active
+ * write_error - sets WriteErrorSeen
+ * -write_error - clears WriteErrorSeen
*/
int err = -EINVAL;
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
md_error(rdev->mddev, rdev);
- err = 0;
+ if (test_bit(Faulty, &rdev->flags))
+ err = 0;
+ else
+ err = -EBUSY;
} else if (cmd_match(buf, "remove")) {
if (rdev->raid_disk >= 0)
err = -EBUSY;
@@ -2408,7 +2599,15 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
set_bit(Blocked, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "-blocked")) {
+ if (!test_bit(Faulty, &rdev->flags) &&
+ rdev->badblocks.unacked_exist) {
+ /* metadata handler doesn't understand badblocks,
+ * so we need to fail the device
+ */
+ md_error(rdev->mddev, rdev);
+ }
clear_bit(Blocked, &rdev->flags);
+ clear_bit(BlockedBadBlocks, &rdev->flags);
wake_up(&rdev->blocked_wait);
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
@@ -2417,6 +2616,12 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
set_bit(In_sync, &rdev->flags);
err = 0;
+ } else if (cmd_match(buf, "write_error")) {
+ set_bit(WriteErrorSeen, &rdev->flags);
+ err = 0;
+ } else if (cmd_match(buf, "-write_error")) {
+ clear_bit(WriteErrorSeen, &rdev->flags);
+ err = 0;
}
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
@@ -2459,7 +2664,6 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
{
char *e;
int err;
- char nm[20];
int slot = simple_strtoul(buf, &e, 10);
if (strncmp(buf, "none", 4)==0)
slot = -1;
@@ -2482,8 +2686,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
hot_remove_disk(rdev->mddev, rdev->raid_disk);
if (err)
return err;
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_remove_link(&rdev->mddev->kobj, nm);
+ sysfs_unlink_rdev(rdev->mddev, rdev);
rdev->raid_disk = -1;
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
@@ -2522,8 +2725,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
return err;
} else
sysfs_notify_dirent_safe(rdev->sysfs_state);
- sprintf(nm, "rd%d", rdev->raid_disk);
- if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
+ if (sysfs_link_rdev(rdev->mddev, rdev))
/* failure here is OK */;
/* don't wakeup anyone, leave that to userspace. */
} else {
@@ -2712,6 +2914,39 @@ static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t le
static struct rdev_sysfs_entry rdev_recovery_start =
__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
+
+static ssize_t
+badblocks_show(struct badblocks *bb, char *page, int unack);
+static ssize_t
+badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack);
+
+static ssize_t bb_show(mdk_rdev_t *rdev, char *page)
+{
+ return badblocks_show(&rdev->badblocks, page, 0);
+}
+static ssize_t bb_store(mdk_rdev_t *rdev, const char *page, size_t len)
+{
+ int rv = badblocks_store(&rdev->badblocks, page, len, 0);
+ /* Maybe that ack was all we needed */
+ if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
+ wake_up(&rdev->blocked_wait);
+ return rv;
+}
+static struct rdev_sysfs_entry rdev_bad_blocks =
+__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
+
+
+static ssize_t ubb_show(mdk_rdev_t *rdev, char *page)
+{
+ return badblocks_show(&rdev->badblocks, page, 1);
+}
+static ssize_t ubb_store(mdk_rdev_t *rdev, const char *page, size_t len)
+{
+ return badblocks_store(&rdev->badblocks, page, len, 1);
+}
+static struct rdev_sysfs_entry rdev_unack_bad_blocks =
+__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
+
static struct attribute *rdev_default_attrs[] = {
&rdev_state.attr,
&rdev_errors.attr,
@@ -2719,6 +2954,8 @@ static struct attribute *rdev_default_attrs[] = {
&rdev_offset.attr,
&rdev_size.attr,
&rdev_recovery_start.attr,
+ &rdev_bad_blocks.attr,
+ &rdev_unack_bad_blocks.attr,
NULL,
};
static ssize_t
@@ -2782,7 +3019,7 @@ static struct kobj_type rdev_ktype = {
.default_attrs = rdev_default_attrs,
};
-void md_rdev_init(mdk_rdev_t *rdev)
+int md_rdev_init(mdk_rdev_t *rdev)
{
rdev->desc_nr = -1;
rdev->saved_raid_disk = -1;
@@ -2792,12 +3029,27 @@ void md_rdev_init(mdk_rdev_t *rdev)
rdev->sb_events = 0;
rdev->last_read_error.tv_sec = 0;
rdev->last_read_error.tv_nsec = 0;
+ rdev->sb_loaded = 0;
+ rdev->bb_page = NULL;
atomic_set(&rdev->nr_pending, 0);
atomic_set(&rdev->read_errors, 0);
atomic_set(&rdev->corrected_errors, 0);
INIT_LIST_HEAD(&rdev->same_set);
init_waitqueue_head(&rdev->blocked_wait);
+
+ /* Add space to store bad block list.
+ * This reserves the space even on arrays where it cannot
+ * be used - I wonder if that matters
+ */
+ rdev->badblocks.count = 0;
+ rdev->badblocks.shift = 0;
+ rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ seqlock_init(&rdev->badblocks.lock);
+ if (rdev->badblocks.page == NULL)
+ return -ENOMEM;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(md_rdev_init);
/*
@@ -2823,8 +3075,11 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
return ERR_PTR(-ENOMEM);
}
- md_rdev_init(rdev);
- if ((err = alloc_disk_sb(rdev)))
+ err = md_rdev_init(rdev);
+ if (err)
+ goto abort_free;
+ err = alloc_disk_sb(rdev);
+ if (err)
goto abort_free;
err = lock_rdev(rdev, newdev, super_format == -2);
@@ -2860,15 +3115,17 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
goto abort_free;
}
}
+ if (super_format == -1)
+ /* hot-add for 0.90, or non-persistent: so no badblocks */
+ rdev->badblocks.shift = -1;
return rdev;
abort_free:
- if (rdev->sb_page) {
- if (rdev->bdev)
- unlock_rdev(rdev);
- free_disk_sb(rdev);
- }
+ if (rdev->bdev)
+ unlock_rdev(rdev);
+ free_disk_sb(rdev);
+ kfree(rdev->badblocks.page);
kfree(rdev);
return ERR_PTR(err);
}
@@ -3149,15 +3406,13 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
}
list_for_each_entry(rdev, &mddev->disks, same_set) {
- char nm[20];
if (rdev->raid_disk < 0)
continue;
if (rdev->new_raid_disk >= mddev->raid_disks)
rdev->new_raid_disk = -1;
if (rdev->new_raid_disk == rdev->raid_disk)
continue;
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
+ sysfs_unlink_rdev(mddev, rdev);
}
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (rdev->raid_disk < 0)
@@ -3168,11 +3423,10 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
if (rdev->raid_disk < 0)
clear_bit(In_sync, &rdev->flags);
else {
- char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- if(sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
- printk("md: cannot register %s for %s after level change\n",
- nm, mdname(mddev));
+ if (sysfs_link_rdev(mddev, rdev))
+ printk(KERN_WARNING "md: cannot register rd%d"
+ " for %s after level change\n",
+ rdev->raid_disk, mdname(mddev));
}
}
@@ -4504,7 +4758,8 @@ int md_run(mddev_t *mddev)
}
if (mddev->bio_set == NULL)
- mddev->bio_set = bioset_create(BIO_POOL_SIZE, sizeof(mddev));
+ mddev->bio_set = bioset_create(BIO_POOL_SIZE,
+ sizeof(mddev_t *));
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
@@ -4621,12 +4876,9 @@ int md_run(mddev_t *mddev)
smp_wmb();
mddev->ready = 1;
list_for_each_entry(rdev, &mddev->disks, same_set)
- if (rdev->raid_disk >= 0) {
- char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
+ if (rdev->raid_disk >= 0)
+ if (sysfs_link_rdev(mddev, rdev))
/* failure here is OK */;
- }
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -4854,11 +5106,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
sysfs_notify_dirent_safe(mddev->sysfs_state);
list_for_each_entry(rdev, &mddev->disks, same_set)
- if (rdev->raid_disk >= 0) {
- char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
- }
+ if (rdev->raid_disk >= 0)
+ sysfs_unlink_rdev(mddev, rdev);
set_capacity(disk, 0);
mutex_unlock(&mddev->open_mutex);
@@ -5750,6 +5999,8 @@ static int set_disk_faulty(mddev_t *mddev, dev_t dev)
return -ENODEV;
md_error(mddev, rdev);
+ if (!test_bit(Faulty, &rdev->flags))
+ return -EBUSY;
return 0;
}
@@ -6198,18 +6449,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
if (!rdev || test_bit(Faulty, &rdev->flags))
return;
- if (mddev->external)
- set_bit(Blocked, &rdev->flags);
-/*
- dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
- mdname(mddev),
- MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
- __builtin_return_address(0),__builtin_return_address(1),
- __builtin_return_address(2),__builtin_return_address(3));
-*/
- if (!mddev->pers)
- return;
- if (!mddev->pers->error_handler)
+ if (!mddev->pers || !mddev->pers->error_handler)
return;
mddev->pers->error_handler(mddev,rdev);
if (mddev->degraded)
@@ -6933,11 +7173,14 @@ void md_do_sync(mddev_t *mddev)
atomic_add(sectors, &mddev->recovery_active);
}
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ break;
+
j += sectors;
if (j>1) mddev->curr_resync = j;
mddev->curr_mark_cnt = io_sectors;
if (last_check == 0)
- /* this is the earliers that rebuilt will be
+ /* this is the earliest that rebuild will be
* visible in /proc/mdstat
*/
md_new_event(mddev);
@@ -6946,10 +7189,6 @@ void md_do_sync(mddev_t *mddev)
continue;
last_check = io_sectors;
-
- if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
- break;
-
repeat:
if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
/* step marks */
@@ -7067,29 +7306,23 @@ static int remove_and_add_spares(mddev_t *mddev)
atomic_read(&rdev->nr_pending)==0) {
if (mddev->pers->hot_remove_disk(
mddev, rdev->raid_disk)==0) {
- char nm[20];
- sprintf(nm,"rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
+ sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = -1;
}
}
- if (mddev->degraded && !mddev->recovery_disabled) {
+ if (mddev->degraded) {
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags) &&
- !test_bit(Faulty, &rdev->flags) &&
- !test_bit(Blocked, &rdev->flags))
+ !test_bit(Faulty, &rdev->flags))
spares++;
if (rdev->raid_disk < 0
&& !test_bit(Faulty, &rdev->flags)) {
rdev->recovery_offset = 0;
if (mddev->pers->
hot_add_disk(mddev, rdev) == 0) {
- char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- if (sysfs_create_link(&mddev->kobj,
- &rdev->kobj, nm))
+ if (sysfs_link_rdev(mddev, rdev))
/* failure here is OK */;
spares++;
md_new_event(mddev);
@@ -7138,6 +7371,8 @@ static void reap_sync_thread(mddev_t *mddev)
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
+ if (mddev->event_work.func)
+ queue_work(md_misc_wq, &mddev->event_work);
}
/*
@@ -7170,9 +7405,6 @@ void md_check_recovery(mddev_t *mddev)
if (mddev->bitmap)
bitmap_daemon_work(mddev);
- if (mddev->ro)
- return;
-
if (signal_pending(current)) {
if (mddev->pers->sync_request && !mddev->external) {
printk(KERN_INFO "md: %s in immediate safe mode\n",
@@ -7209,9 +7441,7 @@ void md_check_recovery(mddev_t *mddev)
atomic_read(&rdev->nr_pending)==0) {
if (mddev->pers->hot_remove_disk(
mddev, rdev->raid_disk)==0) {
- char nm[20];
- sprintf(nm,"rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
+ sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = -1;
}
}
@@ -7331,12 +7561,499 @@ void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
{
sysfs_notify_dirent_safe(rdev->sysfs_state);
wait_event_timeout(rdev->blocked_wait,
- !test_bit(Blocked, &rdev->flags),
+ !test_bit(Blocked, &rdev->flags) &&
+ !test_bit(BlockedBadBlocks, &rdev->flags),
msecs_to_jiffies(5000));
rdev_dec_pending(rdev, mddev);
}
EXPORT_SYMBOL(md_wait_for_blocked_rdev);
+
+/* Bad block management.
+ * We can record which blocks on each device are 'bad' and so just
+ * fail those blocks, or that stripe, rather than the whole device.
+ * Entries in the bad-block table are 64bits wide. This comprises:
+ * Length of bad-range, in sectors: 0-511 for lengths 1-512
+ * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
+ * A 'shift' can be set so that larger blocks are tracked and
+ * consequently larger devices can be covered.
+ * 'Acknowledged' flag - 1 bit. - the most significant bit.
+ *
+ * Locking of the bad-block table uses a seqlock so md_is_badblock
+ * might need to retry if it is very unlucky.
+ * We will sometimes want to check for bad blocks in a bi_end_io function,
+ * so we use the write_seqlock_irq variant.
+ *
+ * When looking for a bad block we specify a range and want to
+ * know if any block in the range is bad. So we binary-search
+ * to the last range that starts at-or-before the given endpoint,
+ * (or "before the sector after the target range")
+ * then see if it ends after the given start.
+ * We return
+ * 0 if there are no known bad blocks in the range
+ * 1 if there are known bad block which are all acknowledged
+ * -1 if there are bad blocks which have not yet been acknowledged in metadata.
+ * plus the start/length of the first bad section we overlap.
+ */
+int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
+ sector_t *first_bad, int *bad_sectors)
+{
+ int hi;
+ int lo = 0;
+ u64 *p = bb->page;
+ int rv = 0;
+ sector_t target = s + sectors;
+ unsigned seq;
+
+ if (bb->shift > 0) {
+ /* round the start down, and the end up */
+ s >>= bb->shift;
+ target += (1<<bb->shift) - 1;
+ target >>= bb->shift;
+ sectors = target - s;
+ }
+ /* 'target' is now the first block after the bad range */
+
+retry:
+ seq = read_seqbegin(&bb->lock);
+
+ hi = bb->count;
+
+ /* Binary search between lo and hi for 'target'
+ * i.e. for the last range that starts before 'target'
+ */
+ /* INVARIANT: ranges before 'lo' and at-or-after 'hi'
+ * are known not to be the last range before target.
+ * VARIANT: hi-lo is the number of possible
+ * ranges, and decreases until it reaches 1
+ */
+ while (hi - lo > 1) {
+ int mid = (lo + hi) / 2;
+ sector_t a = BB_OFFSET(p[mid]);
+ if (a < target)
+ /* This could still be the one, earlier ranges
+ * could not. */
+ lo = mid;
+ else
+ /* This and later ranges are definitely out. */
+ hi = mid;
+ }
+ /* 'lo' might be the last that started before target, but 'hi' isn't */
+ if (hi > lo) {
+ /* need to check all range that end after 's' to see if
+ * any are unacknowledged.
+ */
+ while (lo >= 0 &&
+ BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
+ if (BB_OFFSET(p[lo]) < target) {
+ /* starts before the end, and finishes after
+ * the start, so they must overlap
+ */
+ if (rv != -1 && BB_ACK(p[lo]))
+ rv = 1;
+ else
+ rv = -1;
+ *first_bad = BB_OFFSET(p[lo]);
+ *bad_sectors = BB_LEN(p[lo]);
+ }
+ lo--;
+ }
+ }
+
+ if (read_seqretry(&bb->lock, seq))
+ goto retry;
+
+ return rv;
+}
+EXPORT_SYMBOL_GPL(md_is_badblock);
+
+/*
+ * Add a range of bad blocks to the table.
+ * This might extend the table, or might contract it
+ * if two adjacent ranges can be merged.
+ * We binary-search to find the 'insertion' point, then
+ * decide how best to handle it.
+ */
+static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
+ int acknowledged)
+{
+ u64 *p;
+ int lo, hi;
+ int rv = 1;
+
+ if (bb->shift < 0)
+ /* badblocks are disabled */
+ return 0;
+
+ if (bb->shift) {
+ /* round the start down, and the end up */
+ sector_t next = s + sectors;
+ s >>= bb->shift;
+ next += (1<<bb->shift) - 1;
+ next >>= bb->shift;
+ sectors = next - s;
+ }
+
+ write_seqlock_irq(&bb->lock);
+
+ p = bb->page;
+ lo = 0;
+ hi = bb->count;
+ /* Find the last range that starts at-or-before 's' */
+ while (hi - lo > 1) {
+ int mid = (lo + hi) / 2;
+ sector_t a = BB_OFFSET(p[mid]);
+ if (a <= s)
+ lo = mid;
+ else
+ hi = mid;
+ }
+ if (hi > lo && BB_OFFSET(p[lo]) > s)
+ hi = lo;
+
+ if (hi > lo) {
+ /* we found a range that might merge with the start
+ * of our new range
+ */
+ sector_t a = BB_OFFSET(p[lo]);
+ sector_t e = a + BB_LEN(p[lo]);
+ int ack = BB_ACK(p[lo]);
+ if (e >= s) {
+ /* Yes, we can merge with a previous range */
+ if (s == a && s + sectors >= e)
+ /* new range covers old */
+ ack = acknowledged;
+ else
+ ack = ack && acknowledged;
+
+ if (e < s + sectors)
+ e = s + sectors;
+ if (e - a <= BB_MAX_LEN) {
+ p[lo] = BB_MAKE(a, e-a, ack);
+ s = e;
+ } else {
+ /* does not all fit in one range,
+ * make p[lo] maximal
+ */
+ if (BB_LEN(p[lo]) != BB_MAX_LEN)
+ p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
+ s = a + BB_MAX_LEN;
+ }
+ sectors = e - s;
+ }
+ }
+ if (sectors && hi < bb->count) {
+ /* 'hi' points to the first range that starts after 's'.
+ * Maybe we can merge with the start of that range */
+ sector_t a = BB_OFFSET(p[hi]);
+ sector_t e = a + BB_LEN(p[hi]);
+ int ack = BB_ACK(p[hi]);
+ if (a <= s + sectors) {
+ /* merging is possible */
+ if (e <= s + sectors) {
+ /* full overlap */
+ e = s + sectors;
+ ack = acknowledged;
+ } else
+ ack = ack && acknowledged;
+
+ a = s;
+ if (e - a <= BB_MAX_LEN) {
+ p[hi] = BB_MAKE(a, e-a, ack);
+ s = e;
+ } else {
+ p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
+ s = a + BB_MAX_LEN;
+ }
+ sectors = e - s;
+ lo = hi;
+ hi++;
+ }
+ }
+ if (sectors == 0 && hi < bb->count) {
+ /* we might be able to combine lo and hi */
+ /* Note: 's' is at the end of 'lo' */
+ sector_t a = BB_OFFSET(p[hi]);
+ int lolen = BB_LEN(p[lo]);
+ int hilen = BB_LEN(p[hi]);
+ int newlen = lolen + hilen - (s - a);
+ if (s >= a && newlen < BB_MAX_LEN) {
+ /* yes, we can combine them */
+ int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
+ p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
+ memmove(p + hi, p + hi + 1,
+ (bb->count - hi - 1) * 8);
+ bb->count--;
+ }
+ }
+ while (sectors) {
+ /* didn't merge (it all).
+ * Need to add a range just before 'hi' */
+ if (bb->count >= MD_MAX_BADBLOCKS) {
+ /* No room for more */
+ rv = 0;
+ break;
+ } else {
+ int this_sectors = sectors;
+ memmove(p + hi + 1, p + hi,
+ (bb->count - hi) * 8);
+ bb->count++;
+
+ if (this_sectors > BB_MAX_LEN)
+ this_sectors = BB_MAX_LEN;
+ p[hi] = BB_MAKE(s, this_sectors, acknowledged);
+ sectors -= this_sectors;
+ s += this_sectors;
+ }
+ }
+
+ bb->changed = 1;
+ if (!acknowledged)
+ bb->unacked_exist = 1;
+ write_sequnlock_irq(&bb->lock);
+
+ return rv;
+}
+
+int rdev_set_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors,
+ int acknowledged)
+{
+ int rv = md_set_badblocks(&rdev->badblocks,
+ s + rdev->data_offset, sectors, acknowledged);
+ if (rv) {
+ /* Make sure they get written out promptly */
+ set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
+ md_wakeup_thread(rdev->mddev->thread);
+ }
+ return rv;
+}
+EXPORT_SYMBOL_GPL(rdev_set_badblocks);
+
+/*
+ * Remove a range of bad blocks from the table.
+ * This may involve extending the table if we spilt a region,
+ * but it must not fail. So if the table becomes full, we just
+ * drop the remove request.
+ */
+static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors)
+{
+ u64 *p;
+ int lo, hi;
+ sector_t target = s + sectors;
+ int rv = 0;
+
+ if (bb->shift > 0) {
+ /* When clearing we round the start up and the end down.
+ * This should not matter as the shift should align with
+ * the block size and no rounding should ever be needed.
+ * However it is better the think a block is bad when it
+ * isn't than to think a block is not bad when it is.
+ */
+ s += (1<<bb->shift) - 1;
+ s >>= bb->shift;
+ target >>= bb->shift;
+ sectors = target - s;
+ }
+
+ write_seqlock_irq(&bb->lock);
+
+ p = bb->page;
+ lo = 0;
+ hi = bb->count;
+ /* Find the last range that starts before 'target' */
+ while (hi - lo > 1) {
+ int mid = (lo + hi) / 2;
+ sector_t a = BB_OFFSET(p[mid]);
+ if (a < target)
+ lo = mid;
+ else
+ hi = mid;
+ }
+ if (hi > lo) {
+ /* p[lo] is the last range that could overlap the
+ * current range. Earlier ranges could also overlap,
+ * but only this one can overlap the end of the range.
+ */
+ if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
+ /* Partial overlap, leave the tail of this range */
+ int ack = BB_ACK(p[lo]);
+ sector_t a = BB_OFFSET(p[lo]);
+ sector_t end = a + BB_LEN(p[lo]);
+
+ if (a < s) {
+ /* we need to split this range */
+ if (bb->count >= MD_MAX_BADBLOCKS) {
+ rv = 0;
+ goto out;
+ }
+ memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
+ bb->count++;
+ p[lo] = BB_MAKE(a, s-a, ack);
+ lo++;
+ }
+ p[lo] = BB_MAKE(target, end - target, ack);
+ /* there is no longer an overlap */
+ hi = lo;
+ lo--;
+ }
+ while (lo >= 0 &&
+ BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
+ /* This range does overlap */
+ if (BB_OFFSET(p[lo]) < s) {
+ /* Keep the early parts of this range. */
+ int ack = BB_ACK(p[lo]);
+ sector_t start = BB_OFFSET(p[lo]);
+ p[lo] = BB_MAKE(start, s - start, ack);
+ /* now low doesn't overlap, so.. */
+ break;
+ }
+ lo--;
+ }
+ /* 'lo' is strictly before, 'hi' is strictly after,
+ * anything between needs to be discarded
+ */
+ if (hi - lo > 1) {
+ memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
+ bb->count -= (hi - lo - 1);
+ }
+ }
+
+ bb->changed = 1;
+out:
+ write_sequnlock_irq(&bb->lock);
+ return rv;
+}
+
+int rdev_clear_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors)
+{
+ return md_clear_badblocks(&rdev->badblocks,
+ s + rdev->data_offset,
+ sectors);
+}
+EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
+
+/*
+ * Acknowledge all bad blocks in a list.
+ * This only succeeds if ->changed is clear. It is used by
+ * in-kernel metadata updates
+ */
+void md_ack_all_badblocks(struct badblocks *bb)
+{
+ if (bb->page == NULL || bb->changed)
+ /* no point even trying */
+ return;
+ write_seqlock_irq(&bb->lock);
+
+ if (bb->changed == 0) {
+ u64 *p = bb->page;
+ int i;
+ for (i = 0; i < bb->count ; i++) {
+ if (!BB_ACK(p[i])) {
+ sector_t start = BB_OFFSET(p[i]);
+ int len = BB_LEN(p[i]);
+ p[i] = BB_MAKE(start, len, 1);
+ }
+ }
+ bb->unacked_exist = 0;
+ }
+ write_sequnlock_irq(&bb->lock);
+}
+EXPORT_SYMBOL_GPL(md_ack_all_badblocks);
+
+/* sysfs access to bad-blocks list.
+ * We present two files.
+ * 'bad-blocks' lists sector numbers and lengths of ranges that
+ * are recorded as bad. The list is truncated to fit within
+ * the one-page limit of sysfs.
+ * Writing "sector length" to this file adds an acknowledged
+ * bad block list.
+ * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
+ * been acknowledged. Writing to this file adds bad blocks
+ * without acknowledging them. This is largely for testing.
+ */
+
+static ssize_t
+badblocks_show(struct badblocks *bb, char *page, int unack)
+{
+ size_t len;
+ int i;
+ u64 *p = bb->page;
+ unsigned seq;
+
+ if (bb->shift < 0)
+ return 0;
+
+retry:
+ seq = read_seqbegin(&bb->lock);
+
+ len = 0;
+ i = 0;
+
+ while (len < PAGE_SIZE && i < bb->count) {
+ sector_t s = BB_OFFSET(p[i]);
+ unsigned int length = BB_LEN(p[i]);
+ int ack = BB_ACK(p[i]);
+ i++;
+
+ if (unack && ack)
+ continue;
+
+ len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
+ (unsigned long long)s << bb->shift,
+ length << bb->shift);
+ }
+ if (unack && len == 0)
+ bb->unacked_exist = 0;
+
+ if (read_seqretry(&bb->lock, seq))
+ goto retry;
+
+ return len;
+}
+
+#define DO_DEBUG 1
+
+static ssize_t
+badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack)
+{
+ unsigned long long sector;
+ int length;
+ char newline;
+#ifdef DO_DEBUG
+ /* Allow clearing via sysfs *only* for testing/debugging.
+ * Normally only a successful write may clear a badblock
+ */
+ int clear = 0;
+ if (page[0] == '-') {
+ clear = 1;
+ page++;
+ }
+#endif /* DO_DEBUG */
+
+ switch (sscanf(page, "%llu %d%c", &sector, &length, &newline)) {
+ case 3:
+ if (newline != '\n')
+ return -EINVAL;
+ case 2:
+ if (length <= 0)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+#ifdef DO_DEBUG
+ if (clear) {
+ md_clear_badblocks(bb, sector, length);
+ return len;
+ }
+#endif /* DO_DEBUG */
+ if (md_set_badblocks(bb, sector, length, !unack))
+ return len;
+ else
+ return -ENOSPC;
+}
+
static int md_notify_reboot(struct notifier_block *this,
unsigned long code, void *x)
{
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 1c26c7a..1e586bb 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -29,6 +29,13 @@
typedef struct mddev_s mddev_t;
typedef struct mdk_rdev_s mdk_rdev_t;
+/* Bad block numbers are stored sorted in a single page.
+ * 64bits is used for each block or extent.
+ * 54 bits are sector number, 9 bits are extent size,
+ * 1 bit is an 'acknowledged' flag.
+ */
+#define MD_MAX_BADBLOCKS (PAGE_SIZE/8)
+
/*
* MD's 'extended' device
*/
@@ -48,7 +55,7 @@ struct mdk_rdev_s
struct block_device *meta_bdev;
struct block_device *bdev; /* block device handle */
- struct page *sb_page;
+ struct page *sb_page, *bb_page;
int sb_loaded;
__u64 sb_events;
sector_t data_offset; /* start of data in array */
@@ -74,9 +81,29 @@ struct mdk_rdev_s
#define In_sync 2 /* device is in_sync with rest of array */
#define WriteMostly 4 /* Avoid reading if at all possible */
#define AutoDetected 7 /* added by auto-detect */
-#define Blocked 8 /* An error occurred on an externally
- * managed array, don't allow writes
+#define Blocked 8 /* An error occurred but has not yet
+ * been acknowledged by the metadata
+ * handler, so don't allow writes
* until it is cleared */
+#define WriteErrorSeen 9 /* A write error has been seen on this
+ * device
+ */
+#define FaultRecorded 10 /* Intermediate state for clearing
+ * Blocked. The Fault is/will-be
+ * recorded in the metadata, but that
+ * metadata hasn't been stored safely
+ * on disk yet.
+ */
+#define BlockedBadBlocks 11 /* A writer is blocked because they
+ * found an unacknowledged bad-block.
+ * This can safely be cleared at any
+ * time, and the writer will re-check.
+ * It may be set at any time, and at
+ * worst the writer will timeout and
+ * re-check. So setting it as
+ * accurately as possible is good, but
+ * not absolutely critical.
+ */
wait_queue_head_t blocked_wait;
int desc_nr; /* descriptor index in the superblock */
@@ -111,8 +138,54 @@ struct mdk_rdev_s
struct sysfs_dirent *sysfs_state; /* handle for 'state'
* sysfs entry */
+
+ struct badblocks {
+ int count; /* count of bad blocks */
+ int unacked_exist; /* there probably are unacknowledged
+ * bad blocks. This is only cleared
+ * when a read discovers none
+ */
+ int shift; /* shift from sectors to block size
+ * a -ve shift means badblocks are
+ * disabled.*/
+ u64 *page; /* badblock list */
+ int changed;
+ seqlock_t lock;
+
+ sector_t sector;
+ sector_t size; /* in sectors */
+ } badblocks;
};
+#define BB_LEN_MASK (0x00000000000001FFULL)
+#define BB_OFFSET_MASK (0x7FFFFFFFFFFFFE00ULL)
+#define BB_ACK_MASK (0x8000000000000000ULL)
+#define BB_MAX_LEN 512
+#define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9)
+#define BB_LEN(x) (((x) & BB_LEN_MASK) + 1)
+#define BB_ACK(x) (!!((x) & BB_ACK_MASK))
+#define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63))
+
+extern int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
+ sector_t *first_bad, int *bad_sectors);
+static inline int is_badblock(mdk_rdev_t *rdev, sector_t s, int sectors,
+ sector_t *first_bad, int *bad_sectors)
+{
+ if (unlikely(rdev->badblocks.count)) {
+ int rv = md_is_badblock(&rdev->badblocks, rdev->data_offset + s,
+ sectors,
+ first_bad, bad_sectors);
+ if (rv)
+ *first_bad -= rdev->data_offset;
+ return rv;
+ }
+ return 0;
+}
+extern int rdev_set_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors,
+ int acknowledged);
+extern int rdev_clear_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors);
+extern void md_ack_all_badblocks(struct badblocks *bb);
+
struct mddev_s
{
void *private;
@@ -239,9 +312,12 @@ struct mddev_s
#define MD_RECOVERY_FROZEN 9
unsigned long recovery;
- int recovery_disabled; /* if we detect that recovery
- * will always fail, set this
- * so we don't loop trying */
+ /* If a RAID personality determines that recovery (of a particular
+ * device) will fail due to a read error on the source device, it
+ * takes a copy of this number and does not attempt recovery again
+ * until this number changes.
+ */
+ int recovery_disabled;
int in_sync; /* know to not need resync */
/* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
@@ -304,11 +380,6 @@ struct mddev_s
* hot-adding a bitmap. It should
* eventually be settable by sysfs.
*/
- /* When md is serving under dm, it might use a
- * dirty_log to store the bits.
- */
- struct dm_dirty_log *log;
-
struct mutex mutex;
unsigned long chunksize;
unsigned long daemon_sleep; /* how many jiffies between updates? */
@@ -413,6 +484,20 @@ static inline char * mdname (mddev_t * mddev)
return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
}
+static inline int sysfs_link_rdev(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+ char nm[20];
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
+}
+
+static inline void sysfs_unlink_rdev(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+ char nm[20];
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ sysfs_remove_link(&mddev->kobj, nm);
+}
+
/*
* iterates through some rdev ringlist. It's safe to remove the
* current 'rdev'. Dont touch 'tmp' though.
@@ -505,7 +590,7 @@ extern void mddev_init(mddev_t *mddev);
extern int md_run(mddev_t *mddev);
extern void md_stop(mddev_t *mddev);
extern void md_stop_writes(mddev_t *mddev);
-extern void md_rdev_init(mdk_rdev_t *rdev);
+extern int md_rdev_init(mdk_rdev_t *rdev);
extern void mddev_suspend(mddev_t *mddev);
extern void mddev_resume(mddev_t *mddev);
@@ -514,4 +599,5 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
mddev_t *mddev);
extern int mddev_check_plugged(mddev_t *mddev);
+extern void md_trim_bio(struct bio *bio, int offset, int size);
#endif /* _MD_MD_H */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f7431b6..f4622dd 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -35,16 +35,13 @@
#include <linux/delay.h>
#include <linux/blkdev.h>
#include <linux/seq_file.h>
+#include <linux/ratelimit.h>
#include "md.h"
#include "raid1.h"
#include "bitmap.h"
#define DEBUG 0
-#if DEBUG
-#define PRINTK(x...) printk(x)
-#else
-#define PRINTK(x...)
-#endif
+#define PRINTK(x...) do { if (DEBUG) printk(x); } while (0)
/*
* Number of guaranteed r1bios in case of extreme VM load:
@@ -166,7 +163,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
for (i = 0; i < conf->raid_disks; i++) {
struct bio **bio = r1_bio->bios + i;
- if (*bio && *bio != IO_BLOCKED)
+ if (!BIO_SPECIAL(*bio))
bio_put(*bio);
*bio = NULL;
}
@@ -176,12 +173,6 @@ static void free_r1bio(r1bio_t *r1_bio)
{
conf_t *conf = r1_bio->mddev->private;
- /*
- * Wake up any possible resync thread that waits for the device
- * to go idle.
- */
- allow_barrier(conf);
-
put_all_bios(conf, r1_bio);
mempool_free(r1_bio, conf->r1bio_pool);
}
@@ -222,6 +213,33 @@ static void reschedule_retry(r1bio_t *r1_bio)
* operation and are ready to return a success/failure code to the buffer
* cache layer.
*/
+static void call_bio_endio(r1bio_t *r1_bio)
+{
+ struct bio *bio = r1_bio->master_bio;
+ int done;
+ conf_t *conf = r1_bio->mddev->private;
+
+ if (bio->bi_phys_segments) {
+ unsigned long flags;
+ spin_lock_irqsave(&conf->device_lock, flags);
+ bio->bi_phys_segments--;
+ done = (bio->bi_phys_segments == 0);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ } else
+ done = 1;
+
+ if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ if (done) {
+ bio_endio(bio, 0);
+ /*
+ * Wake up any possible resync thread that waits for the device
+ * to go idle.
+ */
+ allow_barrier(conf);
+ }
+}
+
static void raid_end_bio_io(r1bio_t *r1_bio)
{
struct bio *bio = r1_bio->master_bio;
@@ -234,8 +252,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio)
(unsigned long long) bio->bi_sector +
(bio->bi_size >> 9) - 1);
- bio_endio(bio,
- test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
+ call_bio_endio(r1_bio);
}
free_r1bio(r1_bio);
}
@@ -287,36 +304,52 @@ static void raid1_end_read_request(struct bio *bio, int error)
* oops, read error:
*/
char b[BDEVNAME_SIZE];
- if (printk_ratelimit())
- printk(KERN_ERR "md/raid1:%s: %s: rescheduling sector %llu\n",
- mdname(conf->mddev),
- bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector);
+ printk_ratelimited(
+ KERN_ERR "md/raid1:%s: %s: "
+ "rescheduling sector %llu\n",
+ mdname(conf->mddev),
+ bdevname(conf->mirrors[mirror].rdev->bdev,
+ b),
+ (unsigned long long)r1_bio->sector);
+ set_bit(R1BIO_ReadError, &r1_bio->state);
reschedule_retry(r1_bio);
}
rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
}
+static void close_write(r1bio_t *r1_bio)
+{
+ /* it really is the end of this request */
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
+ /* free extra copy of the data pages */
+ int i = r1_bio->behind_page_count;
+ while (i--)
+ safe_put_page(r1_bio->behind_bvecs[i].bv_page);
+ kfree(r1_bio->behind_bvecs);
+ r1_bio->behind_bvecs = NULL;
+ }
+ /* clear the bitmap if all writes complete successfully */
+ bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
+ r1_bio->sectors,
+ !test_bit(R1BIO_Degraded, &r1_bio->state),
+ test_bit(R1BIO_BehindIO, &r1_bio->state));
+ md_write_end(r1_bio->mddev);
+}
+
static void r1_bio_write_done(r1bio_t *r1_bio)
{
- if (atomic_dec_and_test(&r1_bio->remaining))
- {
- /* it really is the end of this request */
- if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
- /* free extra copy of the data pages */
- int i = r1_bio->behind_page_count;
- while (i--)
- safe_put_page(r1_bio->behind_pages[i]);
- kfree(r1_bio->behind_pages);
- r1_bio->behind_pages = NULL;
- }
- /* clear the bitmap if all writes complete successfully */
- bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
- r1_bio->sectors,
- !test_bit(R1BIO_Degraded, &r1_bio->state),
- test_bit(R1BIO_BehindIO, &r1_bio->state));
- md_write_end(r1_bio->mddev);
- raid_end_bio_io(r1_bio);
+ if (!atomic_dec_and_test(&r1_bio->remaining))
+ return;
+
+ if (test_bit(R1BIO_WriteError, &r1_bio->state))
+ reschedule_retry(r1_bio);
+ else {
+ close_write(r1_bio);
+ if (test_bit(R1BIO_MadeGood, &r1_bio->state))
+ reschedule_retry(r1_bio);
+ else
+ raid_end_bio_io(r1_bio);
}
}
@@ -336,13 +369,11 @@ static void raid1_end_write_request(struct bio *bio, int error)
/*
* 'one mirror IO has finished' event handler:
*/
- r1_bio->bios[mirror] = NULL;
- to_put = bio;
if (!uptodate) {
- md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
- /* an I/O failed, we can't clear the bitmap */
- set_bit(R1BIO_Degraded, &r1_bio->state);
- } else
+ set_bit(WriteErrorSeen,
+ &conf->mirrors[mirror].rdev->flags);
+ set_bit(R1BIO_WriteError, &r1_bio->state);
+ } else {
/*
* Set R1BIO_Uptodate in our master bio, so that we
* will return a good error code for to the higher
@@ -353,8 +384,22 @@ static void raid1_end_write_request(struct bio *bio, int error)
* to user-side. So if something waits for IO, then it
* will wait for the 'master' bio.
*/
+ sector_t first_bad;
+ int bad_sectors;
+
+ r1_bio->bios[mirror] = NULL;
+ to_put = bio;
set_bit(R1BIO_Uptodate, &r1_bio->state);
+ /* Maybe we can clear some bad blocks. */
+ if (is_badblock(conf->mirrors[mirror].rdev,
+ r1_bio->sector, r1_bio->sectors,
+ &first_bad, &bad_sectors)) {
+ r1_bio->bios[mirror] = IO_MADE_GOOD;
+ set_bit(R1BIO_MadeGood, &r1_bio->state);
+ }
+ }
+
update_head_pos(mirror, r1_bio);
if (behind) {
@@ -377,11 +422,13 @@ static void raid1_end_write_request(struct bio *bio, int error)
(unsigned long long) mbio->bi_sector,
(unsigned long long) mbio->bi_sector +
(mbio->bi_size >> 9) - 1);
- bio_endio(mbio, 0);
+ call_bio_endio(r1_bio);
}
}
}
- rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
+ if (r1_bio->bios[mirror] == NULL)
+ rdev_dec_pending(conf->mirrors[mirror].rdev,
+ conf->mddev);
/*
* Let's see if all mirrored write operations have finished
@@ -408,10 +455,11 @@ static void raid1_end_write_request(struct bio *bio, int error)
*
* The rdev for the device selected will have nr_pending incremented.
*/
-static int read_balance(conf_t *conf, r1bio_t *r1_bio)
+static int read_balance(conf_t *conf, r1bio_t *r1_bio, int *max_sectors)
{
const sector_t this_sector = r1_bio->sector;
- const int sectors = r1_bio->sectors;
+ int sectors;
+ int best_good_sectors;
int start_disk;
int best_disk;
int i;
@@ -426,8 +474,11 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
* We take the first readable disk when above the resync window.
*/
retry:
+ sectors = r1_bio->sectors;
best_disk = -1;
best_dist = MaxSector;
+ best_good_sectors = 0;
+
if (conf->mddev->recovery_cp < MaxSector &&
(this_sector + sectors >= conf->next_resync)) {
choose_first = 1;
@@ -439,6 +490,9 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
for (i = 0 ; i < conf->raid_disks ; i++) {
sector_t dist;
+ sector_t first_bad;
+ int bad_sectors;
+
int disk = start_disk + i;
if (disk >= conf->raid_disks)
disk -= conf->raid_disks;
@@ -461,6 +515,35 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
/* This is a reasonable device to use. It might
* even be best.
*/
+ if (is_badblock(rdev, this_sector, sectors,
+ &first_bad, &bad_sectors)) {
+ if (best_dist < MaxSector)
+ /* already have a better device */
+ continue;
+ if (first_bad <= this_sector) {
+ /* cannot read here. If this is the 'primary'
+ * device, then we must not read beyond
+ * bad_sectors from another device..
+ */
+ bad_sectors -= (this_sector - first_bad);
+ if (choose_first && sectors > bad_sectors)
+ sectors = bad_sectors;
+ if (best_good_sectors > sectors)
+ best_good_sectors = sectors;
+
+ } else {
+ sector_t good_sectors = first_bad - this_sector;
+ if (good_sectors > best_good_sectors) {
+ best_good_sectors = good_sectors;
+ best_disk = disk;
+ }
+ if (choose_first)
+ break;
+ }
+ continue;
+ } else
+ best_good_sectors = sectors;
+
dist = abs(this_sector - conf->mirrors[disk].head_position);
if (choose_first
/* Don't change to another disk for sequential reads */
@@ -489,10 +572,12 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
rdev_dec_pending(rdev, conf->mddev);
goto retry;
}
+ sectors = best_good_sectors;
conf->next_seq_sect = this_sector + sectors;
conf->last_used = best_disk;
}
rcu_read_unlock();
+ *max_sectors = sectors;
return best_disk;
}
@@ -672,30 +757,31 @@ static void alloc_behind_pages(struct bio *bio, r1bio_t *r1_bio)
{
int i;
struct bio_vec *bvec;
- struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page*),
+ struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
GFP_NOIO);
- if (unlikely(!pages))
+ if (unlikely(!bvecs))
return;
bio_for_each_segment(bvec, bio, i) {
- pages[i] = alloc_page(GFP_NOIO);
- if (unlikely(!pages[i]))
+ bvecs[i] = *bvec;
+ bvecs[i].bv_page = alloc_page(GFP_NOIO);
+ if (unlikely(!bvecs[i].bv_page))
goto do_sync_io;
- memcpy(kmap(pages[i]) + bvec->bv_offset,
- kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
- kunmap(pages[i]);
+ memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
+ kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
+ kunmap(bvecs[i].bv_page);
kunmap(bvec->bv_page);
}
- r1_bio->behind_pages = pages;
+ r1_bio->behind_bvecs = bvecs;
r1_bio->behind_page_count = bio->bi_vcnt;
set_bit(R1BIO_BehindIO, &r1_bio->state);
return;
do_sync_io:
for (i = 0; i < bio->bi_vcnt; i++)
- if (pages[i])
- put_page(pages[i]);
- kfree(pages);
+ if (bvecs[i].bv_page)
+ put_page(bvecs[i].bv_page);
+ kfree(bvecs);
PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
}
@@ -705,7 +791,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
mirror_info_t *mirror;
r1bio_t *r1_bio;
struct bio *read_bio;
- int i, targets = 0, disks;
+ int i, disks;
struct bitmap *bitmap;
unsigned long flags;
const int rw = bio_data_dir(bio);
@@ -713,6 +799,9 @@ static int make_request(mddev_t *mddev, struct bio * bio)
const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
mdk_rdev_t *blocked_rdev;
int plugged;
+ int first_clone;
+ int sectors_handled;
+ int max_sectors;
/*
* Register the new request and wait if the reconstruction
@@ -759,11 +848,24 @@ static int make_request(mddev_t *mddev, struct bio * bio)
r1_bio->mddev = mddev;
r1_bio->sector = bio->bi_sector;
+ /* We might need to issue multiple reads to different
+ * devices if there are bad blocks around, so we keep
+ * track of the number of reads in bio->bi_phys_segments.
+ * If this is 0, there is only one r1_bio and no locking
+ * will be needed when requests complete. If it is
+ * non-zero, then it is the number of not-completed requests.
+ */
+ bio->bi_phys_segments = 0;
+ clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+
if (rw == READ) {
/*
* read balancing logic:
*/
- int rdisk = read_balance(conf, r1_bio);
+ int rdisk;
+
+read_again:
+ rdisk = read_balance(conf, r1_bio, &max_sectors);
if (rdisk < 0) {
/* couldn't find anywhere to read from */
@@ -784,6 +886,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
r1_bio->read_disk = rdisk;
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector,
+ max_sectors);
r1_bio->bios[rdisk] = read_bio;
@@ -793,16 +897,52 @@ static int make_request(mddev_t *mddev, struct bio * bio)
read_bio->bi_rw = READ | do_sync;
read_bio->bi_private = r1_bio;
- generic_make_request(read_bio);
+ if (max_sectors < r1_bio->sectors) {
+ /* could not read all from this device, so we will
+ * need another r1_bio.
+ */
+
+ sectors_handled = (r1_bio->sector + max_sectors
+ - bio->bi_sector);
+ r1_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+ if (bio->bi_phys_segments == 0)
+ bio->bi_phys_segments = 2;
+ else
+ bio->bi_phys_segments++;
+ spin_unlock_irq(&conf->device_lock);
+ /* Cannot call generic_make_request directly
+ * as that will be queued in __make_request
+ * and subsequent mempool_alloc might block waiting
+ * for it. So hand bio over to raid1d.
+ */
+ reschedule_retry(r1_bio);
+
+ r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+
+ r1_bio->master_bio = bio;
+ r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+ r1_bio->state = 0;
+ r1_bio->mddev = mddev;
+ r1_bio->sector = bio->bi_sector + sectors_handled;
+ goto read_again;
+ } else
+ generic_make_request(read_bio);
return 0;
}
/*
* WRITE:
*/
- /* first select target devices under spinlock and
+ /* first select target devices under rcu_lock and
* inc refcount on their rdev. Record them by setting
* bios[x] to bio
+ * If there are known/acknowledged bad blocks on any device on
+ * which we have seen a write error, we want to avoid writing those
+ * blocks.
+ * This potentially requires several writes to write around
+ * the bad blocks. Each set of writes gets it's own r1bio
+ * with a set of bios attached.
*/
plugged = mddev_check_plugged(mddev);
@@ -810,6 +950,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
retry_write:
blocked_rdev = NULL;
rcu_read_lock();
+ max_sectors = r1_bio->sectors;
for (i = 0; i < disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
@@ -817,17 +958,56 @@ static int make_request(mddev_t *mddev, struct bio * bio)
blocked_rdev = rdev;
break;
}
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- atomic_inc(&rdev->nr_pending);
- if (test_bit(Faulty, &rdev->flags)) {
+ r1_bio->bios[i] = NULL;
+ if (!rdev || test_bit(Faulty, &rdev->flags)) {
+ set_bit(R1BIO_Degraded, &r1_bio->state);
+ continue;
+ }
+
+ atomic_inc(&rdev->nr_pending);
+ if (test_bit(WriteErrorSeen, &rdev->flags)) {
+ sector_t first_bad;
+ int bad_sectors;
+ int is_bad;
+
+ is_bad = is_badblock(rdev, r1_bio->sector,
+ max_sectors,
+ &first_bad, &bad_sectors);
+ if (is_bad < 0) {
+ /* mustn't write here until the bad block is
+ * acknowledged*/
+ set_bit(BlockedBadBlocks, &rdev->flags);
+ blocked_rdev = rdev;
+ break;
+ }
+ if (is_bad && first_bad <= r1_bio->sector) {
+ /* Cannot write here at all */
+ bad_sectors -= (r1_bio->sector - first_bad);
+ if (bad_sectors < max_sectors)
+ /* mustn't write more than bad_sectors
+ * to other devices yet
+ */
+ max_sectors = bad_sectors;
rdev_dec_pending(rdev, mddev);
- r1_bio->bios[i] = NULL;
- } else {
- r1_bio->bios[i] = bio;
- targets++;
+ /* We don't set R1BIO_Degraded as that
+ * only applies if the disk is
+ * missing, so it might be re-added,
+ * and we want to know to recover this
+ * chunk.
+ * In this case the device is here,
+ * and the fact that this chunk is not
+ * in-sync is recorded in the bad
+ * block log
+ */
+ continue;
}
- } else
- r1_bio->bios[i] = NULL;
+ if (is_bad) {
+ int good_sectors = first_bad - r1_bio->sector;
+ if (good_sectors < max_sectors)
+ max_sectors = good_sectors;
+ }
+ }
+ r1_bio->bios[i] = bio;
}
rcu_read_unlock();
@@ -838,51 +1018,57 @@ static int make_request(mddev_t *mddev, struct bio * bio)
for (j = 0; j < i; j++)
if (r1_bio->bios[j])
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
-
+ r1_bio->state = 0;
allow_barrier(conf);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
wait_barrier(conf);
goto retry_write;
}
- BUG_ON(targets == 0); /* we never fail the last device */
-
- if (targets < conf->raid_disks) {
- /* array is degraded, we will not clear the bitmap
- * on I/O completion (see raid1_end_write_request) */
- set_bit(R1BIO_Degraded, &r1_bio->state);
+ if (max_sectors < r1_bio->sectors) {
+ /* We are splitting this write into multiple parts, so
+ * we need to prepare for allocating another r1_bio.
+ */
+ r1_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+ if (bio->bi_phys_segments == 0)
+ bio->bi_phys_segments = 2;
+ else
+ bio->bi_phys_segments++;
+ spin_unlock_irq(&conf->device_lock);
}
-
- /* do behind I/O ?
- * Not if there are too many, or cannot allocate memory,
- * or a reader on WriteMostly is waiting for behind writes
- * to flush */
- if (bitmap &&
- (atomic_read(&bitmap->behind_writes)
- < mddev->bitmap_info.max_write_behind) &&
- !waitqueue_active(&bitmap->behind_wait))
- alloc_behind_pages(bio, r1_bio);
+ sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
atomic_set(&r1_bio->remaining, 1);
atomic_set(&r1_bio->behind_remaining, 0);
- bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors,
- test_bit(R1BIO_BehindIO, &r1_bio->state));
+ first_clone = 1;
for (i = 0; i < disks; i++) {
struct bio *mbio;
if (!r1_bio->bios[i])
continue;
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- r1_bio->bios[i] = mbio;
-
- mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
- mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
- mbio->bi_end_io = raid1_end_write_request;
- mbio->bi_rw = WRITE | do_flush_fua | do_sync;
- mbio->bi_private = r1_bio;
-
- if (r1_bio->behind_pages) {
+ md_trim_bio(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
+
+ if (first_clone) {
+ /* do behind I/O ?
+ * Not if there are too many, or cannot
+ * allocate memory, or a reader on WriteMostly
+ * is waiting for behind writes to flush */
+ if (bitmap &&
+ (atomic_read(&bitmap->behind_writes)
+ < mddev->bitmap_info.max_write_behind) &&
+ !waitqueue_active(&bitmap->behind_wait))
+ alloc_behind_pages(mbio, r1_bio);
+
+ bitmap_startwrite(bitmap, r1_bio->sector,
+ r1_bio->sectors,
+ test_bit(R1BIO_BehindIO,
+ &r1_bio->state));
+ first_clone = 0;
+ }
+ if (r1_bio->behind_bvecs) {
struct bio_vec *bvec;
int j;
@@ -894,16 +1080,42 @@ static int make_request(mddev_t *mddev, struct bio * bio)
* them all
*/
__bio_for_each_segment(bvec, mbio, j, 0)
- bvec->bv_page = r1_bio->behind_pages[j];
+ bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
atomic_inc(&r1_bio->behind_remaining);
}
+ r1_bio->bios[i] = mbio;
+
+ mbio->bi_sector = (r1_bio->sector +
+ conf->mirrors[i].rdev->data_offset);
+ mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
+ mbio->bi_end_io = raid1_end_write_request;
+ mbio->bi_rw = WRITE | do_flush_fua | do_sync;
+ mbio->bi_private = r1_bio;
+
atomic_inc(&r1_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
+ /* Mustn't call r1_bio_write_done before this next test,
+ * as it could result in the bio being freed.
+ */
+ if (sectors_handled < (bio->bi_size >> 9)) {
+ r1_bio_write_done(r1_bio);
+ /* We need another r1_bio. It has already been counted
+ * in bio->bi_phys_segments
+ */
+ r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+ r1_bio->master_bio = bio;
+ r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+ r1_bio->state = 0;
+ r1_bio->mddev = mddev;
+ r1_bio->sector = bio->bi_sector + sectors_handled;
+ goto retry_write;
+ }
+
r1_bio_write_done(r1_bio);
/* In case raid1d snuck in to freeze_array */
@@ -952,9 +1164,10 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
* However don't try a recovery from this drive as
* it is very likely to fail.
*/
- mddev->recovery_disabled = 1;
+ conf->recovery_disabled = mddev->recovery_disabled;
return;
}
+ set_bit(Blocked, &rdev->flags);
if (test_and_clear_bit(In_sync, &rdev->flags)) {
unsigned long flags;
spin_lock_irqsave(&conf->device_lock, flags);
@@ -1027,7 +1240,7 @@ static int raid1_spare_active(mddev_t *mddev)
&& !test_bit(Faulty, &rdev->flags)
&& !test_and_set_bit(In_sync, &rdev->flags)) {
count++;
- sysfs_notify_dirent(rdev->sysfs_state);
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
}
}
spin_lock_irqsave(&conf->device_lock, flags);
@@ -1048,6 +1261,9 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
int first = 0;
int last = mddev->raid_disks - 1;
+ if (mddev->recovery_disabled == conf->recovery_disabled)
+ return -EBUSY;
+
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
@@ -1103,7 +1319,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
* is not possible.
*/
if (!test_bit(Faulty, &rdev->flags) &&
- !mddev->recovery_disabled &&
+ mddev->recovery_disabled != conf->recovery_disabled &&
mddev->degraded < conf->raid_disks) {
err = -EBUSY;
goto abort;
@@ -1155,6 +1371,8 @@ static void end_sync_write(struct bio *bio, int error)
conf_t *conf = mddev->private;
int i;
int mirror=0;
+ sector_t first_bad;
+ int bad_sectors;
for (i = 0; i < conf->raid_disks; i++)
if (r1_bio->bios[i] == bio) {
@@ -1172,18 +1390,48 @@ static void end_sync_write(struct bio *bio, int error)
s += sync_blocks;
sectors_to_go -= sync_blocks;
} while (sectors_to_go > 0);
- md_error(mddev, conf->mirrors[mirror].rdev);
- }
+ set_bit(WriteErrorSeen,
+ &conf->mirrors[mirror].rdev->flags);
+ set_bit(R1BIO_WriteError, &r1_bio->state);
+ } else if (is_badblock(conf->mirrors[mirror].rdev,
+ r1_bio->sector,
+ r1_bio->sectors,
+ &first_bad, &bad_sectors) &&
+ !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
+ r1_bio->sector,
+ r1_bio->sectors,
+ &first_bad, &bad_sectors)
+ )
+ set_bit(R1BIO_MadeGood, &r1_bio->state);
update_head_pos(mirror, r1_bio);
if (atomic_dec_and_test(&r1_bio->remaining)) {
- sector_t s = r1_bio->sectors;
- put_buf(r1_bio);
- md_done_sync(mddev, s, uptodate);
+ int s = r1_bio->sectors;
+ if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
+ test_bit(R1BIO_WriteError, &r1_bio->state))
+ reschedule_retry(r1_bio);
+ else {
+ put_buf(r1_bio);
+ md_done_sync(mddev, s, uptodate);
+ }
}
}
+static int r1_sync_page_io(mdk_rdev_t *rdev, sector_t sector,
+ int sectors, struct page *page, int rw)
+{
+ if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+ /* success */
+ return 1;
+ if (rw == WRITE)
+ set_bit(WriteErrorSeen, &rdev->flags);
+ /* need to record an error - either for the block or the device */
+ if (!rdev_set_badblocks(rdev, sector, sectors, 0))
+ md_error(rdev->mddev, rdev);
+ return 0;
+}
+
static int fix_sync_read_error(r1bio_t *r1_bio)
{
/* Try some synchronous reads of other devices to get
@@ -1193,6 +1441,9 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
* We don't need to freeze the array, because being in an
* active sync request, there is no normal IO, and
* no overlapping syncs.
+ * We don't need to check is_badblock() again as we
+ * made sure that anything with a bad block in range
+ * will have bi_end_io clear.
*/
mddev_t *mddev = r1_bio->mddev;
conf_t *conf = mddev->private;
@@ -1217,9 +1468,7 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
* active, and resync is currently active
*/
rdev = conf->mirrors[d].rdev;
- if (sync_page_io(rdev,
- sect,
- s<<9,
+ if (sync_page_io(rdev, sect, s<<9,
bio->bi_io_vec[idx].bv_page,
READ, false)) {
success = 1;
@@ -1233,16 +1482,36 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
if (!success) {
char b[BDEVNAME_SIZE];
- /* Cannot read from anywhere, array is toast */
- md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
+ int abort = 0;
+ /* Cannot read from anywhere, this block is lost.
+ * Record a bad block on each device. If that doesn't
+ * work just disable and interrupt the recovery.
+ * Don't fail devices as that won't really help.
+ */
printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
" for block %llu\n",
mdname(mddev),
bdevname(bio->bi_bdev, b),
(unsigned long long)r1_bio->sector);
- md_done_sync(mddev, r1_bio->sectors, 0);
- put_buf(r1_bio);
- return 0;
+ for (d = 0; d < conf->raid_disks; d++) {
+ rdev = conf->mirrors[d].rdev;
+ if (!rdev || test_bit(Faulty, &rdev->flags))
+ continue;
+ if (!rdev_set_badblocks(rdev, sect, s, 0))
+ abort = 1;
+ }
+ if (abort) {
+ mddev->recovery_disabled = 1;
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_done_sync(mddev, r1_bio->sectors, 0);
+ put_buf(r1_bio);
+ return 0;
+ }
+ /* Try next page */
+ sectors -= s;
+ sect += s;
+ idx++;
+ continue;
}
start = d;
@@ -1254,16 +1523,12 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
if (r1_bio->bios[d]->bi_end_io != end_sync_read)
continue;
rdev = conf->mirrors[d].rdev;
- if (sync_page_io(rdev,
- sect,
- s<<9,
- bio->bi_io_vec[idx].bv_page,
- WRITE, false) == 0) {
+ if (r1_sync_page_io(rdev, sect, s,
+ bio->bi_io_vec[idx].bv_page,
+ WRITE) == 0) {
r1_bio->bios[d]->bi_end_io = NULL;
rdev_dec_pending(rdev, mddev);
- md_error(mddev, rdev);
- } else
- atomic_add(s, &rdev->corrected_errors);
+ }
}
d = start;
while (d != r1_bio->read_disk) {
@@ -1273,12 +1538,10 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
if (r1_bio->bios[d]->bi_end_io != end_sync_read)
continue;
rdev = conf->mirrors[d].rdev;
- if (sync_page_io(rdev,
- sect,
- s<<9,
- bio->bi_io_vec[idx].bv_page,
- READ, false) == 0)
- md_error(mddev, rdev);
+ if (r1_sync_page_io(rdev, sect, s,
+ bio->bi_io_vec[idx].bv_page,
+ READ) != 0)
+ atomic_add(s, &rdev->corrected_errors);
}
sectors -= s;
sect += s;
@@ -1420,7 +1683,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
*
* 1. Retries failed read operations on working mirrors.
* 2. Updates the raid superblock when problems encounter.
- * 3. Performs writes following reads for array syncronising.
+ * 3. Performs writes following reads for array synchronising.
*/
static void fix_read_error(conf_t *conf, int read_disk,
@@ -1443,9 +1706,14 @@ static void fix_read_error(conf_t *conf, int read_disk,
* which is the thread that might remove
* a device. If raid1d ever becomes multi-threaded....
*/
+ sector_t first_bad;
+ int bad_sectors;
+
rdev = conf->mirrors[d].rdev;
if (rdev &&
test_bit(In_sync, &rdev->flags) &&
+ is_badblock(rdev, sect, s,
+ &first_bad, &bad_sectors) == 0 &&
sync_page_io(rdev, sect, s<<9,
conf->tmppage, READ, false))
success = 1;
@@ -1457,8 +1725,10 @@ static void fix_read_error(conf_t *conf, int read_disk,
} while (!success && d != read_disk);
if (!success) {
- /* Cannot read from anywhere -- bye bye array */
- md_error(mddev, conf->mirrors[read_disk].rdev);
+ /* Cannot read from anywhere - mark it bad */
+ mdk_rdev_t *rdev = conf->mirrors[read_disk].rdev;
+ if (!rdev_set_badblocks(rdev, sect, s, 0))
+ md_error(mddev, rdev);
break;
}
/* write it back and re-read */
@@ -1469,13 +1739,9 @@ static void fix_read_error(conf_t *conf, int read_disk,
d--;
rdev = conf->mirrors[d].rdev;
if (rdev &&
- test_bit(In_sync, &rdev->flags)) {
- if (sync_page_io(rdev, sect, s<<9,
- conf->tmppage, WRITE, false)
- == 0)
- /* Well, this device is dead */
- md_error(mddev, rdev);
- }
+ test_bit(In_sync, &rdev->flags))
+ r1_sync_page_io(rdev, sect, s,
+ conf->tmppage, WRITE);
}
d = start;
while (d != read_disk) {
@@ -1486,12 +1752,8 @@ static void fix_read_error(conf_t *conf, int read_disk,
rdev = conf->mirrors[d].rdev;
if (rdev &&
test_bit(In_sync, &rdev->flags)) {
- if (sync_page_io(rdev, sect, s<<9,
- conf->tmppage, READ, false)
- == 0)
- /* Well, this device is dead */
- md_error(mddev, rdev);
- else {
+ if (r1_sync_page_io(rdev, sect, s,
+ conf->tmppage, READ)) {
atomic_add(s, &rdev->corrected_errors);
printk(KERN_INFO
"md/raid1:%s: read error corrected "
@@ -1508,21 +1770,255 @@ static void fix_read_error(conf_t *conf, int read_disk,
}
}
+static void bi_complete(struct bio *bio, int error)
+{
+ complete((struct completion *)bio->bi_private);
+}
+
+static int submit_bio_wait(int rw, struct bio *bio)
+{
+ struct completion event;
+ rw |= REQ_SYNC;
+
+ init_completion(&event);
+ bio->bi_private = &event;
+ bio->bi_end_io = bi_complete;
+ submit_bio(rw, bio);
+ wait_for_completion(&event);
+
+ return test_bit(BIO_UPTODATE, &bio->bi_flags);
+}
+
+static int narrow_write_error(r1bio_t *r1_bio, int i)
+{
+ mddev_t *mddev = r1_bio->mddev;
+ conf_t *conf = mddev->private;
+ mdk_rdev_t *rdev = conf->mirrors[i].rdev;
+ int vcnt, idx;
+ struct bio_vec *vec;
+
+ /* bio has the data to be written to device 'i' where
+ * we just recently had a write error.
+ * We repeatedly clone the bio and trim down to one block,
+ * then try the write. Where the write fails we record
+ * a bad block.
+ * It is conceivable that the bio doesn't exactly align with
+ * blocks. We must handle this somehow.
+ *
+ * We currently own a reference on the rdev.
+ */
+
+ int block_sectors;
+ sector_t sector;
+ int sectors;
+ int sect_to_write = r1_bio->sectors;
+ int ok = 1;
+
+ if (rdev->badblocks.shift < 0)
+ return 0;
+
+ block_sectors = 1 << rdev->badblocks.shift;
+ sector = r1_bio->sector;
+ sectors = ((sector + block_sectors)
+ & ~(sector_t)(block_sectors - 1))
+ - sector;
+
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
+ vcnt = r1_bio->behind_page_count;
+ vec = r1_bio->behind_bvecs;
+ idx = 0;
+ while (vec[idx].bv_page == NULL)
+ idx++;
+ } else {
+ vcnt = r1_bio->master_bio->bi_vcnt;
+ vec = r1_bio->master_bio->bi_io_vec;
+ idx = r1_bio->master_bio->bi_idx;
+ }
+ while (sect_to_write) {
+ struct bio *wbio;
+ if (sectors > sect_to_write)
+ sectors = sect_to_write;
+ /* Write at 'sector' for 'sectors'*/
+
+ wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
+ memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
+ wbio->bi_sector = r1_bio->sector;
+ wbio->bi_rw = WRITE;
+ wbio->bi_vcnt = vcnt;
+ wbio->bi_size = r1_bio->sectors << 9;
+ wbio->bi_idx = idx;
+
+ md_trim_bio(wbio, sector - r1_bio->sector, sectors);
+ wbio->bi_sector += rdev->data_offset;
+ wbio->bi_bdev = rdev->bdev;
+ if (submit_bio_wait(WRITE, wbio) == 0)
+ /* failure! */
+ ok = rdev_set_badblocks(rdev, sector,
+ sectors, 0)
+ && ok;
+
+ bio_put(wbio);
+ sect_to_write -= sectors;
+ sector += sectors;
+ sectors = block_sectors;
+ }
+ return ok;
+}
+
+static void handle_sync_write_finished(conf_t *conf, r1bio_t *r1_bio)
+{
+ int m;
+ int s = r1_bio->sectors;
+ for (m = 0; m < conf->raid_disks ; m++) {
+ mdk_rdev_t *rdev = conf->mirrors[m].rdev;
+ struct bio *bio = r1_bio->bios[m];
+ if (bio->bi_end_io == NULL)
+ continue;
+ if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
+ test_bit(R1BIO_MadeGood, &r1_bio->state)) {
+ rdev_clear_badblocks(rdev, r1_bio->sector, s);
+ }
+ if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
+ test_bit(R1BIO_WriteError, &r1_bio->state)) {
+ if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
+ md_error(conf->mddev, rdev);
+ }
+ }
+ put_buf(r1_bio);
+ md_done_sync(conf->mddev, s, 1);
+}
+
+static void handle_write_finished(conf_t *conf, r1bio_t *r1_bio)
+{
+ int m;
+ for (m = 0; m < conf->raid_disks ; m++)
+ if (r1_bio->bios[m] == IO_MADE_GOOD) {
+ mdk_rdev_t *rdev = conf->mirrors[m].rdev;
+ rdev_clear_badblocks(rdev,
+ r1_bio->sector,
+ r1_bio->sectors);
+ rdev_dec_pending(rdev, conf->mddev);
+ } else if (r1_bio->bios[m] != NULL) {
+ /* This drive got a write error. We need to
+ * narrow down and record precise write
+ * errors.
+ */
+ if (!narrow_write_error(r1_bio, m)) {
+ md_error(conf->mddev,
+ conf->mirrors[m].rdev);
+ /* an I/O failed, we can't clear the bitmap */
+ set_bit(R1BIO_Degraded, &r1_bio->state);
+ }
+ rdev_dec_pending(conf->mirrors[m].rdev,
+ conf->mddev);
+ }
+ if (test_bit(R1BIO_WriteError, &r1_bio->state))
+ close_write(r1_bio);
+ raid_end_bio_io(r1_bio);
+}
+
+static void handle_read_error(conf_t *conf, r1bio_t *r1_bio)
+{
+ int disk;
+ int max_sectors;
+ mddev_t *mddev = conf->mddev;
+ struct bio *bio;
+ char b[BDEVNAME_SIZE];
+ mdk_rdev_t *rdev;
+
+ clear_bit(R1BIO_ReadError, &r1_bio->state);
+ /* we got a read error. Maybe the drive is bad. Maybe just
+ * the block and we can fix it.
+ * We freeze all other IO, and try reading the block from
+ * other devices. When we find one, we re-write
+ * and check it that fixes the read error.
+ * This is all done synchronously while the array is
+ * frozen
+ */
+ if (mddev->ro == 0) {
+ freeze_array(conf);
+ fix_read_error(conf, r1_bio->read_disk,
+ r1_bio->sector, r1_bio->sectors);
+ unfreeze_array(conf);
+ } else
+ md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
+
+ bio = r1_bio->bios[r1_bio->read_disk];
+ bdevname(bio->bi_bdev, b);
+read_more:
+ disk = read_balance(conf, r1_bio, &max_sectors);
+ if (disk == -1) {
+ printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
+ " read error for block %llu\n",
+ mdname(mddev), b, (unsigned long long)r1_bio->sector);
+ raid_end_bio_io(r1_bio);
+ } else {
+ const unsigned long do_sync
+ = r1_bio->master_bio->bi_rw & REQ_SYNC;
+ if (bio) {
+ r1_bio->bios[r1_bio->read_disk] =
+ mddev->ro ? IO_BLOCKED : NULL;
+ bio_put(bio);
+ }
+ r1_bio->read_disk = disk;
+ bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
+ md_trim_bio(bio, r1_bio->sector - bio->bi_sector, max_sectors);
+ r1_bio->bios[r1_bio->read_disk] = bio;
+ rdev = conf->mirrors[disk].rdev;
+ printk_ratelimited(KERN_ERR
+ "md/raid1:%s: redirecting sector %llu"
+ " to other mirror: %s\n",
+ mdname(mddev),
+ (unsigned long long)r1_bio->sector,
+ bdevname(rdev->bdev, b));
+ bio->bi_sector = r1_bio->sector + rdev->data_offset;
+ bio->bi_bdev = rdev->bdev;
+ bio->bi_end_io = raid1_end_read_request;
+ bio->bi_rw = READ | do_sync;
+ bio->bi_private = r1_bio;
+ if (max_sectors < r1_bio->sectors) {
+ /* Drat - have to split this up more */
+ struct bio *mbio = r1_bio->master_bio;
+ int sectors_handled = (r1_bio->sector + max_sectors
+ - mbio->bi_sector);
+ r1_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+ if (mbio->bi_phys_segments == 0)
+ mbio->bi_phys_segments = 2;
+ else
+ mbio->bi_phys_segments++;
+ spin_unlock_irq(&conf->device_lock);
+ generic_make_request(bio);
+ bio = NULL;
+
+ r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+
+ r1_bio->master_bio = mbio;
+ r1_bio->sectors = (mbio->bi_size >> 9)
+ - sectors_handled;
+ r1_bio->state = 0;
+ set_bit(R1BIO_ReadError, &r1_bio->state);
+ r1_bio->mddev = mddev;
+ r1_bio->sector = mbio->bi_sector + sectors_handled;
+
+ goto read_more;
+ } else
+ generic_make_request(bio);
+ }
+}
+
static void raid1d(mddev_t *mddev)
{
r1bio_t *r1_bio;
- struct bio *bio;
unsigned long flags;
conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
- mdk_rdev_t *rdev;
struct blk_plug plug;
md_check_recovery(mddev);
blk_start_plug(&plug);
for (;;) {
- char b[BDEVNAME_SIZE];
if (atomic_read(&mddev->plug_cnt) == 0)
flush_pending_writes(conf);
@@ -1539,62 +2035,26 @@ static void raid1d(mddev_t *mddev)
mddev = r1_bio->mddev;
conf = mddev->private;
- if (test_bit(R1BIO_IsSync, &r1_bio->state))
- sync_request_write(mddev, r1_bio);
- else {
- int disk;
-
- /* we got a read error. Maybe the drive is bad. Maybe just
- * the block and we can fix it.
- * We freeze all other IO, and try reading the block from
- * other devices. When we find one, we re-write
- * and check it that fixes the read error.
- * This is all done synchronously while the array is
- * frozen
+ if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
+ if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
+ test_bit(R1BIO_WriteError, &r1_bio->state))
+ handle_sync_write_finished(conf, r1_bio);
+ else
+ sync_request_write(mddev, r1_bio);
+ } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
+ test_bit(R1BIO_WriteError, &r1_bio->state))
+ handle_write_finished(conf, r1_bio);
+ else if (test_bit(R1BIO_ReadError, &r1_bio->state))
+ handle_read_error(conf, r1_bio);
+ else
+ /* just a partial read to be scheduled from separate
+ * context
*/
- if (mddev->ro == 0) {
- freeze_array(conf);
- fix_read_error(conf, r1_bio->read_disk,
- r1_bio->sector,
- r1_bio->sectors);
- unfreeze_array(conf);
- } else
- md_error(mddev,
- conf->mirrors[r1_bio->read_disk].rdev);
-
- bio = r1_bio->bios[r1_bio->read_disk];
- if ((disk=read_balance(conf, r1_bio)) == -1) {
- printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
- " read error for block %llu\n",
- mdname(mddev),
- bdevname(bio->bi_bdev,b),
- (unsigned long long)r1_bio->sector);
- raid_end_bio_io(r1_bio);
- } else {
- const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
- r1_bio->bios[r1_bio->read_disk] =
- mddev->ro ? IO_BLOCKED : NULL;
- r1_bio->read_disk = disk;
- bio_put(bio);
- bio = bio_clone_mddev(r1_bio->master_bio,
- GFP_NOIO, mddev);
- r1_bio->bios[r1_bio->read_disk] = bio;
- rdev = conf->mirrors[disk].rdev;
- if (printk_ratelimit())
- printk(KERN_ERR "md/raid1:%s: redirecting sector %llu to"
- " other mirror: %s\n",
- mdname(mddev),
- (unsigned long long)r1_bio->sector,
- bdevname(rdev->bdev,b));
- bio->bi_sector = r1_bio->sector + rdev->data_offset;
- bio->bi_bdev = rdev->bdev;
- bio->bi_end_io = raid1_end_read_request;
- bio->bi_rw = READ | do_sync;
- bio->bi_private = r1_bio;
- generic_make_request(bio);
- }
- }
+ generic_make_request(r1_bio->bios[r1_bio->read_disk]);
+
cond_resched();
+ if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+ md_check_recovery(mddev);
}
blk_finish_plug(&plug);
}
@@ -1636,6 +2096,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
int write_targets = 0, read_targets = 0;
sector_t sync_blocks;
int still_degraded = 0;
+ int good_sectors = RESYNC_SECTORS;
+ int min_bad = 0; /* number of sectors that are bad in all devices */
if (!conf->r1buf_pool)
if (init_resync(conf))
@@ -1723,36 +2185,89 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev == NULL ||
- test_bit(Faulty, &rdev->flags)) {
+ test_bit(Faulty, &rdev->flags)) {
still_degraded = 1;
- continue;
} else if (!test_bit(In_sync, &rdev->flags)) {
bio->bi_rw = WRITE;
bio->bi_end_io = end_sync_write;
write_targets ++;
} else {
/* may need to read from here */
- bio->bi_rw = READ;
- bio->bi_end_io = end_sync_read;
- if (test_bit(WriteMostly, &rdev->flags)) {
- if (wonly < 0)
- wonly = i;
- } else {
- if (disk < 0)
- disk = i;
+ sector_t first_bad = MaxSector;
+ int bad_sectors;
+
+ if (is_badblock(rdev, sector_nr, good_sectors,
+ &first_bad, &bad_sectors)) {
+ if (first_bad > sector_nr)
+ good_sectors = first_bad - sector_nr;
+ else {
+ bad_sectors -= (sector_nr - first_bad);
+ if (min_bad == 0 ||
+ min_bad > bad_sectors)
+ min_bad = bad_sectors;
+ }
+ }
+ if (sector_nr < first_bad) {
+ if (test_bit(WriteMostly, &rdev->flags)) {
+ if (wonly < 0)
+ wonly = i;
+ } else {
+ if (disk < 0)
+ disk = i;
+ }
+ bio->bi_rw = READ;
+ bio->bi_end_io = end_sync_read;
+ read_targets++;
}
- read_targets++;
}
- atomic_inc(&rdev->nr_pending);
- bio->bi_sector = sector_nr + rdev->data_offset;
- bio->bi_bdev = rdev->bdev;
- bio->bi_private = r1_bio;
+ if (bio->bi_end_io) {
+ atomic_inc(&rdev->nr_pending);
+ bio->bi_sector = sector_nr + rdev->data_offset;
+ bio->bi_bdev = rdev->bdev;
+ bio->bi_private = r1_bio;
+ }
}
rcu_read_unlock();
if (disk < 0)
disk = wonly;
r1_bio->read_disk = disk;
+ if (read_targets == 0 && min_bad > 0) {
+ /* These sectors are bad on all InSync devices, so we
+ * need to mark them bad on all write targets
+ */
+ int ok = 1;
+ for (i = 0 ; i < conf->raid_disks ; i++)
+ if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
+ mdk_rdev_t *rdev =
+ rcu_dereference(conf->mirrors[i].rdev);
+ ok = rdev_set_badblocks(rdev, sector_nr,
+ min_bad, 0
+ ) && ok;
+ }
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ *skipped = 1;
+ put_buf(r1_bio);
+
+ if (!ok) {
+ /* Cannot record the badblocks, so need to
+ * abort the resync.
+ * If there are multiple read targets, could just
+ * fail the really bad ones ???
+ */
+ conf->recovery_disabled = mddev->recovery_disabled;
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ return 0;
+ } else
+ return min_bad;
+
+ }
+ if (min_bad > 0 && min_bad < good_sectors) {
+ /* only resync enough to reach the next bad->good
+ * transition */
+ good_sectors = min_bad;
+ }
+
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
/* extra read targets are also write targets */
write_targets += read_targets-1;
@@ -1769,6 +2284,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
if (max_sector > mddev->resync_max)
max_sector = mddev->resync_max; /* Don't do IO beyond here */
+ if (max_sector > sector_nr + good_sectors)
+ max_sector = sector_nr + good_sectors;
nr_sectors = 0;
sync_blocks = 0;
do {
@@ -2154,18 +2671,13 @@ static int raid1_reshape(mddev_t *mddev)
for (d = d2 = 0; d < conf->raid_disks; d++) {
mdk_rdev_t *rdev = conf->mirrors[d].rdev;
if (rdev && rdev->raid_disk != d2) {
- char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
+ sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = d2;
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
- if (sysfs_create_link(&mddev->kobj,
- &rdev->kobj, nm))
+ sysfs_unlink_rdev(mddev, rdev);
+ if (sysfs_link_rdev(mddev, rdev))
printk(KERN_WARNING
- "md/raid1:%s: cannot register "
- "%s\n",
- mdname(mddev), nm);
+ "md/raid1:%s: cannot register rd%d\n",
+ mdname(mddev), rdev->raid_disk);
}
if (rdev)
newmirrors[d2++].rdev = rdev;
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index e743a64..e0d676b 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -48,6 +48,12 @@ struct r1_private_data_s {
* (fresh device added).
* Cleared when a sync completes.
*/
+ int recovery_disabled; /* when the same as
+ * mddev->recovery_disabled
+ * we don't allow recovery
+ * to be attempted as we
+ * expect a read error
+ */
wait_queue_head_t wait_barrier;
@@ -95,7 +101,7 @@ struct r1bio_s {
struct list_head retry_list;
/* Next two are only valid when R1BIO_BehindIO is set */
- struct page **behind_pages;
+ struct bio_vec *behind_bvecs;
int behind_page_count;
/*
* if the IO is in WRITE direction, then multiple bios are used.
@@ -110,13 +116,24 @@ struct r1bio_s {
* correct the read error. To keep track of bad blocks on a per-bio
* level, we store IO_BLOCKED in the appropriate 'bios' pointer
*/
-#define IO_BLOCKED ((struct bio*)1)
+#define IO_BLOCKED ((struct bio *)1)
+/* When we successfully write to a known bad-block, we need to remove the
+ * bad-block marking which must be done from process context. So we record
+ * the success by setting bios[n] to IO_MADE_GOOD
+ */
+#define IO_MADE_GOOD ((struct bio *)2)
+
+#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
/* bits for r1bio.state */
#define R1BIO_Uptodate 0
#define R1BIO_IsSync 1
#define R1BIO_Degraded 2
#define R1BIO_BehindIO 3
+/* Set ReadError on bios that experience a readerror so that
+ * raid1d knows what to do with them.
+ */
+#define R1BIO_ReadError 4
/* For write-behind requests, we call bi_end_io when
* the last non-write-behind device completes, providing
* any write was successful. Otherwise we call when
@@ -125,6 +142,11 @@ struct r1bio_s {
* Record that bi_end_io was called with this flag...
*/
#define R1BIO_Returned 6
+/* If a write for this request means we can clear some
+ * known-bad-block records, we set this flag
+ */
+#define R1BIO_MadeGood 7
+#define R1BIO_WriteError 8
extern int md_raid1_congested(mddev_t *mddev, int bits);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 6e84668..d7a8468 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/blkdev.h>
#include <linux/seq_file.h>
+#include <linux/ratelimit.h>
#include "md.h"
#include "raid10.h"
#include "raid0.h"
@@ -123,7 +124,14 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
for (j = 0 ; j < nalloc; j++) {
bio = r10_bio->devs[j].bio;
for (i = 0; i < RESYNC_PAGES; i++) {
- page = alloc_page(gfp_flags);
+ if (j == 1 && !test_bit(MD_RECOVERY_SYNC,
+ &conf->mddev->recovery)) {
+ /* we can share bv_page's during recovery */
+ struct bio *rbio = r10_bio->devs[0].bio;
+ page = rbio->bi_io_vec[i].bv_page;
+ get_page(page);
+ } else
+ page = alloc_page(gfp_flags);
if (unlikely(!page))
goto out_free_pages;
@@ -173,7 +181,7 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
for (i = 0; i < conf->copies; i++) {
struct bio **bio = & r10_bio->devs[i].bio;
- if (*bio && *bio != IO_BLOCKED)
+ if (!BIO_SPECIAL(*bio))
bio_put(*bio);
*bio = NULL;
}
@@ -183,12 +191,6 @@ static void free_r10bio(r10bio_t *r10_bio)
{
conf_t *conf = r10_bio->mddev->private;
- /*
- * Wake up any possible resync thread that waits for the device
- * to go idle.
- */
- allow_barrier(conf);
-
put_all_bios(conf, r10_bio);
mempool_free(r10_bio, conf->r10bio_pool);
}
@@ -227,9 +229,27 @@ static void reschedule_retry(r10bio_t *r10_bio)
static void raid_end_bio_io(r10bio_t *r10_bio)
{
struct bio *bio = r10_bio->master_bio;
+ int done;
+ conf_t *conf = r10_bio->mddev->private;
- bio_endio(bio,
- test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO);
+ if (bio->bi_phys_segments) {
+ unsigned long flags;
+ spin_lock_irqsave(&conf->device_lock, flags);
+ bio->bi_phys_segments--;
+ done = (bio->bi_phys_segments == 0);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ } else
+ done = 1;
+ if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ if (done) {
+ bio_endio(bio, 0);
+ /*
+ * Wake up any possible resync thread that waits for the device
+ * to go idle.
+ */
+ allow_barrier(conf);
+ }
free_r10bio(r10_bio);
}
@@ -244,6 +264,26 @@ static inline void update_head_pos(int slot, r10bio_t *r10_bio)
r10_bio->devs[slot].addr + (r10_bio->sectors);
}
+/*
+ * Find the disk number which triggered given bio
+ */
+static int find_bio_disk(conf_t *conf, r10bio_t *r10_bio,
+ struct bio *bio, int *slotp)
+{
+ int slot;
+
+ for (slot = 0; slot < conf->copies; slot++)
+ if (r10_bio->devs[slot].bio == bio)
+ break;
+
+ BUG_ON(slot == conf->copies);
+ update_head_pos(slot, r10_bio);
+
+ if (slotp)
+ *slotp = slot;
+ return r10_bio->devs[slot].devnum;
+}
+
static void raid10_end_read_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -277,34 +317,60 @@ static void raid10_end_read_request(struct bio *bio, int error)
* oops, read error - keep the refcount on the rdev
*/
char b[BDEVNAME_SIZE];
- if (printk_ratelimit())
- printk(KERN_ERR "md/raid10:%s: %s: rescheduling sector %llu\n",
- mdname(conf->mddev),
- bdevname(conf->mirrors[dev].rdev->bdev,b), (unsigned long long)r10_bio->sector);
+ printk_ratelimited(KERN_ERR
+ "md/raid10:%s: %s: rescheduling sector %llu\n",
+ mdname(conf->mddev),
+ bdevname(conf->mirrors[dev].rdev->bdev, b),
+ (unsigned long long)r10_bio->sector);
+ set_bit(R10BIO_ReadError, &r10_bio->state);
reschedule_retry(r10_bio);
}
}
+static void close_write(r10bio_t *r10_bio)
+{
+ /* clear the bitmap if all writes complete successfully */
+ bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
+ r10_bio->sectors,
+ !test_bit(R10BIO_Degraded, &r10_bio->state),
+ 0);
+ md_write_end(r10_bio->mddev);
+}
+
+static void one_write_done(r10bio_t *r10_bio)
+{
+ if (atomic_dec_and_test(&r10_bio->remaining)) {
+ if (test_bit(R10BIO_WriteError, &r10_bio->state))
+ reschedule_retry(r10_bio);
+ else {
+ close_write(r10_bio);
+ if (test_bit(R10BIO_MadeGood, &r10_bio->state))
+ reschedule_retry(r10_bio);
+ else
+ raid_end_bio_io(r10_bio);
+ }
+ }
+}
+
static void raid10_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t *r10_bio = bio->bi_private;
- int slot, dev;
+ int dev;
+ int dec_rdev = 1;
conf_t *conf = r10_bio->mddev->private;
+ int slot;
- for (slot = 0; slot < conf->copies; slot++)
- if (r10_bio->devs[slot].bio == bio)
- break;
- dev = r10_bio->devs[slot].devnum;
+ dev = find_bio_disk(conf, r10_bio, bio, &slot);
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
if (!uptodate) {
- md_error(r10_bio->mddev, conf->mirrors[dev].rdev);
- /* an I/O failed, we can't clear the bitmap */
- set_bit(R10BIO_Degraded, &r10_bio->state);
- } else
+ set_bit(WriteErrorSeen, &conf->mirrors[dev].rdev->flags);
+ set_bit(R10BIO_WriteError, &r10_bio->state);
+ dec_rdev = 0;
+ } else {
/*
* Set R10BIO_Uptodate in our master bio, so that
* we will return a good error code for to the higher
@@ -314,26 +380,31 @@ static void raid10_end_write_request(struct bio *bio, int error)
* user-side. So if something waits for IO, then it will
* wait for the 'master' bio.
*/
+ sector_t first_bad;
+ int bad_sectors;
+
set_bit(R10BIO_Uptodate, &r10_bio->state);
- update_head_pos(slot, r10_bio);
+ /* Maybe we can clear some bad blocks. */
+ if (is_badblock(conf->mirrors[dev].rdev,
+ r10_bio->devs[slot].addr,
+ r10_bio->sectors,
+ &first_bad, &bad_sectors)) {
+ bio_put(bio);
+ r10_bio->devs[slot].bio = IO_MADE_GOOD;
+ dec_rdev = 0;
+ set_bit(R10BIO_MadeGood, &r10_bio->state);
+ }
+ }
/*
*
* Let's see if all mirrored write operations have finished
* already.
*/
- if (atomic_dec_and_test(&r10_bio->remaining)) {
- /* clear the bitmap if all writes complete successfully */
- bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
- r10_bio->sectors,
- !test_bit(R10BIO_Degraded, &r10_bio->state),
- 0);
- md_write_end(r10_bio->mddev);
- raid_end_bio_io(r10_bio);
- }
-
- rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
+ one_write_done(r10_bio);
+ if (dec_rdev)
+ rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
}
@@ -484,11 +555,12 @@ static int raid10_mergeable_bvec(struct request_queue *q,
* FIXME: possibly should rethink readbalancing and do it differently
* depending on near_copies / far_copies geometry.
*/
-static int read_balance(conf_t *conf, r10bio_t *r10_bio)
+static int read_balance(conf_t *conf, r10bio_t *r10_bio, int *max_sectors)
{
const sector_t this_sector = r10_bio->sector;
int disk, slot;
- const int sectors = r10_bio->sectors;
+ int sectors = r10_bio->sectors;
+ int best_good_sectors;
sector_t new_distance, best_dist;
mdk_rdev_t *rdev;
int do_balance;
@@ -497,8 +569,10 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
raid10_find_phys(conf, r10_bio);
rcu_read_lock();
retry:
+ sectors = r10_bio->sectors;
best_slot = -1;
best_dist = MaxSector;
+ best_good_sectors = 0;
do_balance = 1;
/*
* Check if we can balance. We can balance on the whole
@@ -511,6 +585,10 @@ retry:
do_balance = 0;
for (slot = 0; slot < conf->copies ; slot++) {
+ sector_t first_bad;
+ int bad_sectors;
+ sector_t dev_sector;
+
if (r10_bio->devs[slot].bio == IO_BLOCKED)
continue;
disk = r10_bio->devs[slot].devnum;
@@ -520,6 +598,37 @@ retry:
if (!test_bit(In_sync, &rdev->flags))
continue;
+ dev_sector = r10_bio->devs[slot].addr;
+ if (is_badblock(rdev, dev_sector, sectors,
+ &first_bad, &bad_sectors)) {
+ if (best_dist < MaxSector)
+ /* Already have a better slot */
+ continue;
+ if (first_bad <= dev_sector) {
+ /* Cannot read here. If this is the
+ * 'primary' device, then we must not read
+ * beyond 'bad_sectors' from another device.
+ */
+ bad_sectors -= (dev_sector - first_bad);
+ if (!do_balance && sectors > bad_sectors)
+ sectors = bad_sectors;
+ if (best_good_sectors > sectors)
+ best_good_sectors = sectors;
+ } else {
+ sector_t good_sectors =
+ first_bad - dev_sector;
+ if (good_sectors > best_good_sectors) {
+ best_good_sectors = good_sectors;
+ best_slot = slot;
+ }
+ if (!do_balance)
+ /* Must read from here */
+ break;
+ }
+ continue;
+ } else
+ best_good_sectors = sectors;
+
if (!do_balance)
break;
@@ -561,6 +670,7 @@ retry:
} else
disk = -1;
rcu_read_unlock();
+ *max_sectors = best_good_sectors;
return disk;
}
@@ -734,6 +844,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
unsigned long flags;
mdk_rdev_t *blocked_rdev;
int plugged;
+ int sectors_handled;
+ int max_sectors;
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio);
@@ -808,12 +920,26 @@ static int make_request(mddev_t *mddev, struct bio * bio)
r10_bio->sector = bio->bi_sector;
r10_bio->state = 0;
+ /* We might need to issue multiple reads to different
+ * devices if there are bad blocks around, so we keep
+ * track of the number of reads in bio->bi_phys_segments.
+ * If this is 0, there is only one r10_bio and no locking
+ * will be needed when the request completes. If it is
+ * non-zero, then it is the number of not-completed requests.
+ */
+ bio->bi_phys_segments = 0;
+ clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+
if (rw == READ) {
/*
* read balancing logic:
*/
- int disk = read_balance(conf, r10_bio);
- int slot = r10_bio->read_slot;
+ int disk;
+ int slot;
+
+read_again:
+ disk = read_balance(conf, r10_bio, &max_sectors);
+ slot = r10_bio->read_slot;
if (disk < 0) {
raid_end_bio_io(r10_bio);
return 0;
@@ -821,6 +947,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
mirror = conf->mirrors + disk;
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
+ max_sectors);
r10_bio->devs[slot].bio = read_bio;
@@ -831,7 +959,37 @@ static int make_request(mddev_t *mddev, struct bio * bio)
read_bio->bi_rw = READ | do_sync;
read_bio->bi_private = r10_bio;
- generic_make_request(read_bio);
+ if (max_sectors < r10_bio->sectors) {
+ /* Could not read all from this device, so we will
+ * need another r10_bio.
+ */
+ sectors_handled = (r10_bio->sectors + max_sectors
+ - bio->bi_sector);
+ r10_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+ if (bio->bi_phys_segments == 0)
+ bio->bi_phys_segments = 2;
+ else
+ bio->bi_phys_segments++;
+ spin_unlock(&conf->device_lock);
+ /* Cannot call generic_make_request directly
+ * as that will be queued in __generic_make_request
+ * and subsequent mempool_alloc might block
+ * waiting for it. so hand bio over to raid10d.
+ */
+ reschedule_retry(r10_bio);
+
+ r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
+
+ r10_bio->master_bio = bio;
+ r10_bio->sectors = ((bio->bi_size >> 9)
+ - sectors_handled);
+ r10_bio->state = 0;
+ r10_bio->mddev = mddev;
+ r10_bio->sector = bio->bi_sector + sectors_handled;
+ goto read_again;
+ } else
+ generic_make_request(read_bio);
return 0;
}
@@ -841,13 +999,22 @@ static int make_request(mddev_t *mddev, struct bio * bio)
/* first select target devices under rcu_lock and
* inc refcount on their rdev. Record them by setting
* bios[x] to bio
+ * If there are known/acknowledged bad blocks on any device
+ * on which we have seen a write error, we want to avoid
+ * writing to those blocks. This potentially requires several
+ * writes to write around the bad blocks. Each set of writes
+ * gets its own r10_bio with a set of bios attached. The number
+ * of r10_bios is recored in bio->bi_phys_segments just as with
+ * the read case.
*/
plugged = mddev_check_plugged(mddev);
raid10_find_phys(conf, r10_bio);
- retry_write:
+retry_write:
blocked_rdev = NULL;
rcu_read_lock();
+ max_sectors = r10_bio->sectors;
+
for (i = 0; i < conf->copies; i++) {
int d = r10_bio->devs[i].devnum;
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
@@ -856,13 +1023,55 @@ static int make_request(mddev_t *mddev, struct bio * bio)
blocked_rdev = rdev;
break;
}
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- atomic_inc(&rdev->nr_pending);
- r10_bio->devs[i].bio = bio;
- } else {
- r10_bio->devs[i].bio = NULL;
+ r10_bio->devs[i].bio = NULL;
+ if (!rdev || test_bit(Faulty, &rdev->flags)) {
set_bit(R10BIO_Degraded, &r10_bio->state);
+ continue;
+ }
+ if (test_bit(WriteErrorSeen, &rdev->flags)) {
+ sector_t first_bad;
+ sector_t dev_sector = r10_bio->devs[i].addr;
+ int bad_sectors;
+ int is_bad;
+
+ is_bad = is_badblock(rdev, dev_sector,
+ max_sectors,
+ &first_bad, &bad_sectors);
+ if (is_bad < 0) {
+ /* Mustn't write here until the bad block
+ * is acknowledged
+ */
+ atomic_inc(&rdev->nr_pending);
+ set_bit(BlockedBadBlocks, &rdev->flags);
+ blocked_rdev = rdev;
+ break;
+ }
+ if (is_bad && first_bad <= dev_sector) {
+ /* Cannot write here at all */
+ bad_sectors -= (dev_sector - first_bad);
+ if (bad_sectors < max_sectors)
+ /* Mustn't write more than bad_sectors
+ * to other devices yet
+ */
+ max_sectors = bad_sectors;
+ /* We don't set R10BIO_Degraded as that
+ * only applies if the disk is missing,
+ * so it might be re-added, and we want to
+ * know to recover this chunk.
+ * In this case the device is here, and the
+ * fact that this chunk is not in-sync is
+ * recorded in the bad block log.
+ */
+ continue;
+ }
+ if (is_bad) {
+ int good_sectors = first_bad - dev_sector;
+ if (good_sectors < max_sectors)
+ max_sectors = good_sectors;
+ }
}
+ r10_bio->devs[i].bio = bio;
+ atomic_inc(&rdev->nr_pending);
}
rcu_read_unlock();
@@ -882,8 +1091,22 @@ static int make_request(mddev_t *mddev, struct bio * bio)
goto retry_write;
}
+ if (max_sectors < r10_bio->sectors) {
+ /* We are splitting this into multiple parts, so
+ * we need to prepare for allocating another r10_bio.
+ */
+ r10_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+ if (bio->bi_phys_segments == 0)
+ bio->bi_phys_segments = 2;
+ else
+ bio->bi_phys_segments++;
+ spin_unlock_irq(&conf->device_lock);
+ }
+ sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
+
atomic_set(&r10_bio->remaining, 1);
- bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
+ bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
for (i = 0; i < conf->copies; i++) {
struct bio *mbio;
@@ -892,10 +1115,12 @@ static int make_request(mddev_t *mddev, struct bio * bio)
continue;
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+ max_sectors);
r10_bio->devs[i].bio = mbio;
- mbio->bi_sector = r10_bio->devs[i].addr+
- conf->mirrors[d].rdev->data_offset;
+ mbio->bi_sector = (r10_bio->devs[i].addr+
+ conf->mirrors[d].rdev->data_offset);
mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
mbio->bi_rw = WRITE | do_sync | do_fua;
@@ -907,15 +1132,26 @@ static int make_request(mddev_t *mddev, struct bio * bio)
spin_unlock_irqrestore(&conf->device_lock, flags);
}
- if (atomic_dec_and_test(&r10_bio->remaining)) {
- /* This matches the end of raid10_end_write_request() */
- bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
- r10_bio->sectors,
- !test_bit(R10BIO_Degraded, &r10_bio->state),
- 0);
- md_write_end(mddev);
- raid_end_bio_io(r10_bio);
+ /* Don't remove the bias on 'remaining' (one_write_done) until
+ * after checking if we need to go around again.
+ */
+
+ if (sectors_handled < (bio->bi_size >> 9)) {
+ one_write_done(r10_bio);
+ /* We need another r10_bio. It has already been counted
+ * in bio->bi_phys_segments.
+ */
+ r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
+
+ r10_bio->master_bio = bio;
+ r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+
+ r10_bio->mddev = mddev;
+ r10_bio->sector = bio->bi_sector + sectors_handled;
+ r10_bio->state = 0;
+ goto retry_write;
}
+ one_write_done(r10_bio);
/* In case raid10d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
@@ -949,6 +1185,30 @@ static void status(struct seq_file *seq, mddev_t *mddev)
seq_printf(seq, "]");
}
+/* check if there are enough drives for
+ * every block to appear on atleast one.
+ * Don't consider the device numbered 'ignore'
+ * as we might be about to remove it.
+ */
+static int enough(conf_t *conf, int ignore)
+{
+ int first = 0;
+
+ do {
+ int n = conf->copies;
+ int cnt = 0;
+ while (n--) {
+ if (conf->mirrors[first].rdev &&
+ first != ignore)
+ cnt++;
+ first = (first+1) % conf->raid_disks;
+ }
+ if (cnt == 0)
+ return 0;
+ } while (first != 0);
+ return 1;
+}
+
static void error(mddev_t *mddev, mdk_rdev_t *rdev)
{
char b[BDEVNAME_SIZE];
@@ -961,13 +1221,9 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
* else mark the drive as failed
*/
if (test_bit(In_sync, &rdev->flags)
- && conf->raid_disks-mddev->degraded == 1)
+ && !enough(conf, rdev->raid_disk))
/*
* Don't fail the drive, just return an IO error.
- * The test should really be more sophisticated than
- * "working_disks == 1", but it isn't critical, and
- * can wait until we do more sophisticated "is the drive
- * really dead" tests...
*/
return;
if (test_and_clear_bit(In_sync, &rdev->flags)) {
@@ -980,6 +1236,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
}
+ set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
printk(KERN_ALERT
@@ -1022,27 +1279,6 @@ static void close_sync(conf_t *conf)
conf->r10buf_pool = NULL;
}
-/* check if there are enough drives for
- * every block to appear on atleast one
- */
-static int enough(conf_t *conf)
-{
- int first = 0;
-
- do {
- int n = conf->copies;
- int cnt = 0;
- while (n--) {
- if (conf->mirrors[first].rdev)
- cnt++;
- first = (first+1) % conf->raid_disks;
- }
- if (cnt == 0)
- return 0;
- } while (first != 0);
- return 1;
-}
-
static int raid10_spare_active(mddev_t *mddev)
{
int i;
@@ -1078,7 +1314,6 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
conf_t *conf = mddev->private;
int err = -EEXIST;
int mirror;
- mirror_info_t *p;
int first = 0;
int last = conf->raid_disks - 1;
@@ -1087,44 +1322,47 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
* very different from resync
*/
return -EBUSY;
- if (!enough(conf))
+ if (!enough(conf, -1))
return -EINVAL;
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
- if (rdev->saved_raid_disk >= 0 &&
- rdev->saved_raid_disk >= first &&
+ if (rdev->saved_raid_disk >= first &&
conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
mirror = rdev->saved_raid_disk;
else
mirror = first;
- for ( ; mirror <= last ; mirror++)
- if ( !(p=conf->mirrors+mirror)->rdev) {
-
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
- /* as we don't honour merge_bvec_fn, we must
- * never risk violating it, so limit
- * ->max_segments to one lying with a single
- * page, as a one page request is never in
- * violation.
- */
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
- blk_queue_max_segments(mddev->queue, 1);
- blk_queue_segment_boundary(mddev->queue,
- PAGE_CACHE_SIZE - 1);
- }
+ for ( ; mirror <= last ; mirror++) {
+ mirror_info_t *p = &conf->mirrors[mirror];
+ if (p->recovery_disabled == mddev->recovery_disabled)
+ continue;
+ if (!p->rdev)
+ continue;
- p->head_position = 0;
- rdev->raid_disk = mirror;
- err = 0;
- if (rdev->saved_raid_disk != mirror)
- conf->fullsync = 1;
- rcu_assign_pointer(p->rdev, rdev);
- break;
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ /* as we don't honour merge_bvec_fn, we must
+ * never risk violating it, so limit
+ * ->max_segments to one lying with a single
+ * page, as a one page request is never in
+ * violation.
+ */
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
}
+ p->head_position = 0;
+ rdev->raid_disk = mirror;
+ err = 0;
+ if (rdev->saved_raid_disk != mirror)
+ conf->fullsync = 1;
+ rcu_assign_pointer(p->rdev, rdev);
+ break;
+ }
+
md_integrity_add_rdev(rdev, mddev);
print_conf(conf);
return err;
@@ -1149,7 +1387,8 @@ static int raid10_remove_disk(mddev_t *mddev, int number)
* is not possible.
*/
if (!test_bit(Faulty, &rdev->flags) &&
- enough(conf)) {
+ mddev->recovery_disabled != p->recovery_disabled &&
+ enough(conf, -1)) {
err = -EBUSY;
goto abort;
}
@@ -1174,24 +1413,18 @@ static void end_sync_read(struct bio *bio, int error)
{
r10bio_t *r10_bio = bio->bi_private;
conf_t *conf = r10_bio->mddev->private;
- int i,d;
+ int d;
- for (i=0; i<conf->copies; i++)
- if (r10_bio->devs[i].bio == bio)
- break;
- BUG_ON(i == conf->copies);
- update_head_pos(i, r10_bio);
- d = r10_bio->devs[i].devnum;
+ d = find_bio_disk(conf, r10_bio, bio, NULL);
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
set_bit(R10BIO_Uptodate, &r10_bio->state);
- else {
+ else
+ /* The write handler will notice the lack of
+ * R10BIO_Uptodate and record any errors etc
+ */
atomic_add(r10_bio->sectors,
&conf->mirrors[d].rdev->corrected_errors);
- if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
- md_error(r10_bio->mddev,
- conf->mirrors[d].rdev);
- }
/* for reconstruct, we always reschedule after a read.
* for resync, only after all reads
@@ -1206,40 +1439,60 @@ static void end_sync_read(struct bio *bio, int error)
}
}
-static void end_sync_write(struct bio *bio, int error)
+static void end_sync_request(r10bio_t *r10_bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r10bio_t *r10_bio = bio->bi_private;
mddev_t *mddev = r10_bio->mddev;
- conf_t *conf = mddev->private;
- int i,d;
-
- for (i = 0; i < conf->copies; i++)
- if (r10_bio->devs[i].bio == bio)
- break;
- d = r10_bio->devs[i].devnum;
-
- if (!uptodate)
- md_error(mddev, conf->mirrors[d].rdev);
-
- update_head_pos(i, r10_bio);
- rdev_dec_pending(conf->mirrors[d].rdev, mddev);
while (atomic_dec_and_test(&r10_bio->remaining)) {
if (r10_bio->master_bio == NULL) {
/* the primary of several recovery bios */
sector_t s = r10_bio->sectors;
- put_buf(r10_bio);
+ if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
+ test_bit(R10BIO_WriteError, &r10_bio->state))
+ reschedule_retry(r10_bio);
+ else
+ put_buf(r10_bio);
md_done_sync(mddev, s, 1);
break;
} else {
r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
- put_buf(r10_bio);
+ if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
+ test_bit(R10BIO_WriteError, &r10_bio->state))
+ reschedule_retry(r10_bio);
+ else
+ put_buf(r10_bio);
r10_bio = r10_bio2;
}
}
}
+static void end_sync_write(struct bio *bio, int error)
+{
+ int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ r10bio_t *r10_bio = bio->bi_private;
+ mddev_t *mddev = r10_bio->mddev;
+ conf_t *conf = mddev->private;
+ int d;
+ sector_t first_bad;
+ int bad_sectors;
+ int slot;
+
+ d = find_bio_disk(conf, r10_bio, bio, &slot);
+
+ if (!uptodate) {
+ set_bit(WriteErrorSeen, &conf->mirrors[d].rdev->flags);
+ set_bit(R10BIO_WriteError, &r10_bio->state);
+ } else if (is_badblock(conf->mirrors[d].rdev,
+ r10_bio->devs[slot].addr,
+ r10_bio->sectors,
+ &first_bad, &bad_sectors))
+ set_bit(R10BIO_MadeGood, &r10_bio->state);
+
+ rdev_dec_pending(conf->mirrors[d].rdev, mddev);
+
+ end_sync_request(r10_bio);
+}
+
/*
* Note: sync and recover and handled very differently for raid10
* This code is for resync.
@@ -1299,11 +1552,12 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
if (j == vcnt)
continue;
mddev->resync_mismatches += r10_bio->sectors;
+ if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
+ /* Don't fix anything. */
+ continue;
}
- if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
- /* Don't fix anything. */
- continue;
- /* Ok, we need to write this bio
+ /* Ok, we need to write this bio, either to correct an
+ * inconsistency or to correct an unreadable block.
* First we need to fixup bv_offset, bv_len and
* bi_vecs, as the read request might have corrupted these
*/
@@ -1355,32 +1609,107 @@ done:
* The second for writing.
*
*/
+static void fix_recovery_read_error(r10bio_t *r10_bio)
+{
+ /* We got a read error during recovery.
+ * We repeat the read in smaller page-sized sections.
+ * If a read succeeds, write it to the new device or record
+ * a bad block if we cannot.
+ * If a read fails, record a bad block on both old and
+ * new devices.
+ */
+ mddev_t *mddev = r10_bio->mddev;
+ conf_t *conf = mddev->private;
+ struct bio *bio = r10_bio->devs[0].bio;
+ sector_t sect = 0;
+ int sectors = r10_bio->sectors;
+ int idx = 0;
+ int dr = r10_bio->devs[0].devnum;
+ int dw = r10_bio->devs[1].devnum;
+
+ while (sectors) {
+ int s = sectors;
+ mdk_rdev_t *rdev;
+ sector_t addr;
+ int ok;
+
+ if (s > (PAGE_SIZE>>9))
+ s = PAGE_SIZE >> 9;
+
+ rdev = conf->mirrors[dr].rdev;
+ addr = r10_bio->devs[0].addr + sect,
+ ok = sync_page_io(rdev,
+ addr,
+ s << 9,
+ bio->bi_io_vec[idx].bv_page,
+ READ, false);
+ if (ok) {
+ rdev = conf->mirrors[dw].rdev;
+ addr = r10_bio->devs[1].addr + sect;
+ ok = sync_page_io(rdev,
+ addr,
+ s << 9,
+ bio->bi_io_vec[idx].bv_page,
+ WRITE, false);
+ if (!ok)
+ set_bit(WriteErrorSeen, &rdev->flags);
+ }
+ if (!ok) {
+ /* We don't worry if we cannot set a bad block -
+ * it really is bad so there is no loss in not
+ * recording it yet
+ */
+ rdev_set_badblocks(rdev, addr, s, 0);
+
+ if (rdev != conf->mirrors[dw].rdev) {
+ /* need bad block on destination too */
+ mdk_rdev_t *rdev2 = conf->mirrors[dw].rdev;
+ addr = r10_bio->devs[1].addr + sect;
+ ok = rdev_set_badblocks(rdev2, addr, s, 0);
+ if (!ok) {
+ /* just abort the recovery */
+ printk(KERN_NOTICE
+ "md/raid10:%s: recovery aborted"
+ " due to read error\n",
+ mdname(mddev));
+
+ conf->mirrors[dw].recovery_disabled
+ = mddev->recovery_disabled;
+ set_bit(MD_RECOVERY_INTR,
+ &mddev->recovery);
+ break;
+ }
+ }
+ }
+
+ sectors -= s;
+ sect += s;
+ idx++;
+ }
+}
static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
{
conf_t *conf = mddev->private;
- int i, d;
- struct bio *bio, *wbio;
+ int d;
+ struct bio *wbio;
+ if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
+ fix_recovery_read_error(r10_bio);
+ end_sync_request(r10_bio);
+ return;
+ }
- /* move the pages across to the second bio
+ /*
+ * share the pages with the first bio
* and submit the write request
*/
- bio = r10_bio->devs[0].bio;
wbio = r10_bio->devs[1].bio;
- for (i=0; i < wbio->bi_vcnt; i++) {
- struct page *p = bio->bi_io_vec[i].bv_page;
- bio->bi_io_vec[i].bv_page = wbio->bi_io_vec[i].bv_page;
- wbio->bi_io_vec[i].bv_page = p;
- }
d = r10_bio->devs[1].devnum;
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
- if (test_bit(R10BIO_Uptodate, &r10_bio->state))
- generic_make_request(wbio);
- else
- bio_endio(wbio, -EIO);
+ generic_make_request(wbio);
}
@@ -1421,6 +1750,26 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
}
+static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector,
+ int sectors, struct page *page, int rw)
+{
+ sector_t first_bad;
+ int bad_sectors;
+
+ if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
+ && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
+ return -1;
+ if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+ /* success */
+ return 1;
+ if (rw == WRITE)
+ set_bit(WriteErrorSeen, &rdev->flags);
+ /* need to record an error - either for the block or the device */
+ if (!rdev_set_badblocks(rdev, sector, sectors, 0))
+ md_error(rdev->mddev, rdev);
+ return 0;
+}
+
/*
* This is a kernel thread which:
*
@@ -1476,10 +1825,15 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
rcu_read_lock();
do {
+ sector_t first_bad;
+ int bad_sectors;
+
d = r10_bio->devs[sl].devnum;
rdev = rcu_dereference(conf->mirrors[d].rdev);
if (rdev &&
- test_bit(In_sync, &rdev->flags)) {
+ test_bit(In_sync, &rdev->flags) &&
+ is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
+ &first_bad, &bad_sectors) == 0) {
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
success = sync_page_io(rdev,
@@ -1499,9 +1853,19 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
rcu_read_unlock();
if (!success) {
- /* Cannot read from anywhere -- bye bye array */
+ /* Cannot read from anywhere, just mark the block
+ * as bad on the first device to discourage future
+ * reads.
+ */
int dn = r10_bio->devs[r10_bio->read_slot].devnum;
- md_error(mddev, conf->mirrors[dn].rdev);
+ rdev = conf->mirrors[dn].rdev;
+
+ if (!rdev_set_badblocks(
+ rdev,
+ r10_bio->devs[r10_bio->read_slot].addr
+ + sect,
+ s, 0))
+ md_error(mddev, rdev);
break;
}
@@ -1516,80 +1880,82 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
sl--;
d = r10_bio->devs[sl].devnum;
rdev = rcu_dereference(conf->mirrors[d].rdev);
- if (rdev &&
- test_bit(In_sync, &rdev->flags)) {
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
- atomic_add(s, &rdev->corrected_errors);
- if (sync_page_io(rdev,
- r10_bio->devs[sl].addr +
- sect,
- s<<9, conf->tmppage, WRITE, false)
- == 0) {
- /* Well, this device is dead */
- printk(KERN_NOTICE
- "md/raid10:%s: read correction "
- "write failed"
- " (%d sectors at %llu on %s)\n",
- mdname(mddev), s,
- (unsigned long long)(
- sect + rdev->data_offset),
- bdevname(rdev->bdev, b));
- printk(KERN_NOTICE "md/raid10:%s: %s: failing "
- "drive\n",
- mdname(mddev),
- bdevname(rdev->bdev, b));
- md_error(mddev, rdev);
- }
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
+ if (!rdev ||
+ !test_bit(In_sync, &rdev->flags))
+ continue;
+
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+ if (r10_sync_page_io(rdev,
+ r10_bio->devs[sl].addr +
+ sect,
+ s<<9, conf->tmppage, WRITE)
+ == 0) {
+ /* Well, this device is dead */
+ printk(KERN_NOTICE
+ "md/raid10:%s: read correction "
+ "write failed"
+ " (%d sectors at %llu on %s)\n",
+ mdname(mddev), s,
+ (unsigned long long)(
+ sect + rdev->data_offset),
+ bdevname(rdev->bdev, b));
+ printk(KERN_NOTICE "md/raid10:%s: %s: failing "
+ "drive\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b));
}
+ rdev_dec_pending(rdev, mddev);
+ rcu_read_lock();
}
sl = start;
while (sl != r10_bio->read_slot) {
+ char b[BDEVNAME_SIZE];
if (sl==0)
sl = conf->copies;
sl--;
d = r10_bio->devs[sl].devnum;
rdev = rcu_dereference(conf->mirrors[d].rdev);
- if (rdev &&
- test_bit(In_sync, &rdev->flags)) {
- char b[BDEVNAME_SIZE];
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
- if (sync_page_io(rdev,
- r10_bio->devs[sl].addr +
- sect,
- s<<9, conf->tmppage,
- READ, false) == 0) {
- /* Well, this device is dead */
- printk(KERN_NOTICE
- "md/raid10:%s: unable to read back "
- "corrected sectors"
- " (%d sectors at %llu on %s)\n",
- mdname(mddev), s,
- (unsigned long long)(
- sect + rdev->data_offset),
- bdevname(rdev->bdev, b));
- printk(KERN_NOTICE "md/raid10:%s: %s: failing drive\n",
- mdname(mddev),
- bdevname(rdev->bdev, b));
-
- md_error(mddev, rdev);
- } else {
- printk(KERN_INFO
- "md/raid10:%s: read error corrected"
- " (%d sectors at %llu on %s)\n",
- mdname(mddev), s,
- (unsigned long long)(
- sect + rdev->data_offset),
- bdevname(rdev->bdev, b));
- }
+ if (!rdev ||
+ !test_bit(In_sync, &rdev->flags))
+ continue;
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+ switch (r10_sync_page_io(rdev,
+ r10_bio->devs[sl].addr +
+ sect,
+ s<<9, conf->tmppage,
+ READ)) {
+ case 0:
+ /* Well, this device is dead */
+ printk(KERN_NOTICE
+ "md/raid10:%s: unable to read back "
+ "corrected sectors"
+ " (%d sectors at %llu on %s)\n",
+ mdname(mddev), s,
+ (unsigned long long)(
+ sect + rdev->data_offset),
+ bdevname(rdev->bdev, b));
+ printk(KERN_NOTICE "md/raid10:%s: %s: failing "
+ "drive\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b));
+ break;
+ case 1:
+ printk(KERN_INFO
+ "md/raid10:%s: read error corrected"
+ " (%d sectors at %llu on %s)\n",
+ mdname(mddev), s,
+ (unsigned long long)(
+ sect + rdev->data_offset),
+ bdevname(rdev->bdev, b));
+ atomic_add(s, &rdev->corrected_errors);
}
+
+ rdev_dec_pending(rdev, mddev);
+ rcu_read_lock();
}
rcu_read_unlock();
@@ -1598,21 +1964,254 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
}
}
+static void bi_complete(struct bio *bio, int error)
+{
+ complete((struct completion *)bio->bi_private);
+}
+
+static int submit_bio_wait(int rw, struct bio *bio)
+{
+ struct completion event;
+ rw |= REQ_SYNC;
+
+ init_completion(&event);
+ bio->bi_private = &event;
+ bio->bi_end_io = bi_complete;
+ submit_bio(rw, bio);
+ wait_for_completion(&event);
+
+ return test_bit(BIO_UPTODATE, &bio->bi_flags);
+}
+
+static int narrow_write_error(r10bio_t *r10_bio, int i)
+{
+ struct bio *bio = r10_bio->master_bio;
+ mddev_t *mddev = r10_bio->mddev;
+ conf_t *conf = mddev->private;
+ mdk_rdev_t *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
+ /* bio has the data to be written to slot 'i' where
+ * we just recently had a write error.
+ * We repeatedly clone the bio and trim down to one block,
+ * then try the write. Where the write fails we record
+ * a bad block.
+ * It is conceivable that the bio doesn't exactly align with
+ * blocks. We must handle this.
+ *
+ * We currently own a reference to the rdev.
+ */
+
+ int block_sectors;
+ sector_t sector;
+ int sectors;
+ int sect_to_write = r10_bio->sectors;
+ int ok = 1;
+
+ if (rdev->badblocks.shift < 0)
+ return 0;
+
+ block_sectors = 1 << rdev->badblocks.shift;
+ sector = r10_bio->sector;
+ sectors = ((r10_bio->sector + block_sectors)
+ & ~(sector_t)(block_sectors - 1))
+ - sector;
+
+ while (sect_to_write) {
+ struct bio *wbio;
+ if (sectors > sect_to_write)
+ sectors = sect_to_write;
+ /* Write at 'sector' for 'sectors' */
+ wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ md_trim_bio(wbio, sector - bio->bi_sector, sectors);
+ wbio->bi_sector = (r10_bio->devs[i].addr+
+ rdev->data_offset+
+ (sector - r10_bio->sector));
+ wbio->bi_bdev = rdev->bdev;
+ if (submit_bio_wait(WRITE, wbio) == 0)
+ /* Failure! */
+ ok = rdev_set_badblocks(rdev, sector,
+ sectors, 0)
+ && ok;
+
+ bio_put(wbio);
+ sect_to_write -= sectors;
+ sector += sectors;
+ sectors = block_sectors;
+ }
+ return ok;
+}
+
+static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio)
+{
+ int slot = r10_bio->read_slot;
+ int mirror = r10_bio->devs[slot].devnum;
+ struct bio *bio;
+ conf_t *conf = mddev->private;
+ mdk_rdev_t *rdev;
+ char b[BDEVNAME_SIZE];
+ unsigned long do_sync;
+ int max_sectors;
+
+ /* we got a read error. Maybe the drive is bad. Maybe just
+ * the block and we can fix it.
+ * We freeze all other IO, and try reading the block from
+ * other devices. When we find one, we re-write
+ * and check it that fixes the read error.
+ * This is all done synchronously while the array is
+ * frozen.
+ */
+ if (mddev->ro == 0) {
+ freeze_array(conf);
+ fix_read_error(conf, mddev, r10_bio);
+ unfreeze_array(conf);
+ }
+ rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);
+
+ bio = r10_bio->devs[slot].bio;
+ bdevname(bio->bi_bdev, b);
+ r10_bio->devs[slot].bio =
+ mddev->ro ? IO_BLOCKED : NULL;
+read_more:
+ mirror = read_balance(conf, r10_bio, &max_sectors);
+ if (mirror == -1) {
+ printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
+ " read error for block %llu\n",
+ mdname(mddev), b,
+ (unsigned long long)r10_bio->sector);
+ raid_end_bio_io(r10_bio);
+ bio_put(bio);
+ return;
+ }
+
+ do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
+ if (bio)
+ bio_put(bio);
+ slot = r10_bio->read_slot;
+ rdev = conf->mirrors[mirror].rdev;
+ printk_ratelimited(
+ KERN_ERR
+ "md/raid10:%s: %s: redirecting"
+ "sector %llu to another mirror\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b),
+ (unsigned long long)r10_bio->sector);
+ bio = bio_clone_mddev(r10_bio->master_bio,
+ GFP_NOIO, mddev);
+ md_trim_bio(bio,
+ r10_bio->sector - bio->bi_sector,
+ max_sectors);
+ r10_bio->devs[slot].bio = bio;
+ bio->bi_sector = r10_bio->devs[slot].addr
+ + rdev->data_offset;
+ bio->bi_bdev = rdev->bdev;
+ bio->bi_rw = READ | do_sync;
+ bio->bi_private = r10_bio;
+ bio->bi_end_io = raid10_end_read_request;
+ if (max_sectors < r10_bio->sectors) {
+ /* Drat - have to split this up more */
+ struct bio *mbio = r10_bio->master_bio;
+ int sectors_handled =
+ r10_bio->sector + max_sectors
+ - mbio->bi_sector;
+ r10_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+ if (mbio->bi_phys_segments == 0)
+ mbio->bi_phys_segments = 2;
+ else
+ mbio->bi_phys_segments++;
+ spin_unlock_irq(&conf->device_lock);
+ generic_make_request(bio);
+ bio = NULL;
+
+ r10_bio = mempool_alloc(conf->r10bio_pool,
+ GFP_NOIO);
+ r10_bio->master_bio = mbio;
+ r10_bio->sectors = (mbio->bi_size >> 9)
+ - sectors_handled;
+ r10_bio->state = 0;
+ set_bit(R10BIO_ReadError,
+ &r10_bio->state);
+ r10_bio->mddev = mddev;
+ r10_bio->sector = mbio->bi_sector
+ + sectors_handled;
+
+ goto read_more;
+ } else
+ generic_make_request(bio);
+}
+
+static void handle_write_completed(conf_t *conf, r10bio_t *r10_bio)
+{
+ /* Some sort of write request has finished and it
+ * succeeded in writing where we thought there was a
+ * bad block. So forget the bad block.
+ * Or possibly if failed and we need to record
+ * a bad block.
+ */
+ int m;
+ mdk_rdev_t *rdev;
+
+ if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
+ test_bit(R10BIO_IsRecover, &r10_bio->state)) {
+ for (m = 0; m < conf->copies; m++) {
+ int dev = r10_bio->devs[m].devnum;
+ rdev = conf->mirrors[dev].rdev;
+ if (r10_bio->devs[m].bio == NULL)
+ continue;
+ if (test_bit(BIO_UPTODATE,
+ &r10_bio->devs[m].bio->bi_flags)) {
+ rdev_clear_badblocks(
+ rdev,
+ r10_bio->devs[m].addr,
+ r10_bio->sectors);
+ } else {
+ if (!rdev_set_badblocks(
+ rdev,
+ r10_bio->devs[m].addr,
+ r10_bio->sectors, 0))
+ md_error(conf->mddev, rdev);
+ }
+ }
+ put_buf(r10_bio);
+ } else {
+ for (m = 0; m < conf->copies; m++) {
+ int dev = r10_bio->devs[m].devnum;
+ struct bio *bio = r10_bio->devs[m].bio;
+ rdev = conf->mirrors[dev].rdev;
+ if (bio == IO_MADE_GOOD) {
+ rdev_clear_badblocks(
+ rdev,
+ r10_bio->devs[m].addr,
+ r10_bio->sectors);
+ rdev_dec_pending(rdev, conf->mddev);
+ } else if (bio != NULL &&
+ !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+ if (!narrow_write_error(r10_bio, m)) {
+ md_error(conf->mddev, rdev);
+ set_bit(R10BIO_Degraded,
+ &r10_bio->state);
+ }
+ rdev_dec_pending(rdev, conf->mddev);
+ }
+ }
+ if (test_bit(R10BIO_WriteError,
+ &r10_bio->state))
+ close_write(r10_bio);
+ raid_end_bio_io(r10_bio);
+ }
+}
+
static void raid10d(mddev_t *mddev)
{
r10bio_t *r10_bio;
- struct bio *bio;
unsigned long flags;
conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
- mdk_rdev_t *rdev;
struct blk_plug plug;
md_check_recovery(mddev);
blk_start_plug(&plug);
for (;;) {
- char b[BDEVNAME_SIZE];
flush_pending_writes(conf);
@@ -1628,64 +2227,26 @@ static void raid10d(mddev_t *mddev)
mddev = r10_bio->mddev;
conf = mddev->private;
- if (test_bit(R10BIO_IsSync, &r10_bio->state))
+ if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
+ test_bit(R10BIO_WriteError, &r10_bio->state))
+ handle_write_completed(conf, r10_bio);
+ else if (test_bit(R10BIO_IsSync, &r10_bio->state))
sync_request_write(mddev, r10_bio);
else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
recovery_request_write(mddev, r10_bio);
+ else if (test_bit(R10BIO_ReadError, &r10_bio->state))
+ handle_read_error(mddev, r10_bio);
else {
- int slot = r10_bio->read_slot;
- int mirror = r10_bio->devs[slot].devnum;
- /* we got a read error. Maybe the drive is bad. Maybe just
- * the block and we can fix it.
- * We freeze all other IO, and try reading the block from
- * other devices. When we find one, we re-write
- * and check it that fixes the read error.
- * This is all done synchronously while the array is
- * frozen.
+ /* just a partial read to be scheduled from a
+ * separate context
*/
- if (mddev->ro == 0) {
- freeze_array(conf);
- fix_read_error(conf, mddev, r10_bio);
- unfreeze_array(conf);
- }
- rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);
-
- bio = r10_bio->devs[slot].bio;
- r10_bio->devs[slot].bio =
- mddev->ro ? IO_BLOCKED : NULL;
- mirror = read_balance(conf, r10_bio);
- if (mirror == -1) {
- printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
- " read error for block %llu\n",
- mdname(mddev),
- bdevname(bio->bi_bdev,b),
- (unsigned long long)r10_bio->sector);
- raid_end_bio_io(r10_bio);
- bio_put(bio);
- } else {
- const unsigned long do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
- bio_put(bio);
- slot = r10_bio->read_slot;
- rdev = conf->mirrors[mirror].rdev;
- if (printk_ratelimit())
- printk(KERN_ERR "md/raid10:%s: %s: redirecting sector %llu to"
- " another mirror\n",
- mdname(mddev),
- bdevname(rdev->bdev,b),
- (unsigned long long)r10_bio->sector);
- bio = bio_clone_mddev(r10_bio->master_bio,
- GFP_NOIO, mddev);
- r10_bio->devs[slot].bio = bio;
- bio->bi_sector = r10_bio->devs[slot].addr
- + rdev->data_offset;
- bio->bi_bdev = rdev->bdev;
- bio->bi_rw = READ | do_sync;
- bio->bi_private = r10_bio;
- bio->bi_end_io = raid10_end_read_request;
- generic_make_request(bio);
- }
+ int slot = r10_bio->read_slot;
+ generic_make_request(r10_bio->devs[slot].bio);
}
+
cond_resched();
+ if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+ md_check_recovery(mddev);
}
blk_finish_plug(&plug);
}
@@ -1746,7 +2307,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
int i;
int max_sync;
sector_t sync_blocks;
-
sector_t sectors_skipped = 0;
int chunks_skipped = 0;
@@ -1828,7 +2388,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
/* recovery... the complicated one */
- int j, k;
+ int j;
r10_bio = NULL;
for (i=0 ; i<conf->raid_disks; i++) {
@@ -1836,6 +2396,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
r10bio_t *rb2;
sector_t sect;
int must_sync;
+ int any_working;
if (conf->mirrors[i].rdev == NULL ||
test_bit(In_sync, &conf->mirrors[i].rdev->flags))
@@ -1887,19 +2448,42 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
must_sync = bitmap_start_sync(mddev->bitmap, sect,
&sync_blocks, still_degraded);
+ any_working = 0;
for (j=0; j<conf->copies;j++) {
+ int k;
int d = r10_bio->devs[j].devnum;
+ sector_t from_addr, to_addr;
+ mdk_rdev_t *rdev;
+ sector_t sector, first_bad;
+ int bad_sectors;
if (!conf->mirrors[d].rdev ||
!test_bit(In_sync, &conf->mirrors[d].rdev->flags))
continue;
/* This is where we read from */
+ any_working = 1;
+ rdev = conf->mirrors[d].rdev;
+ sector = r10_bio->devs[j].addr;
+
+ if (is_badblock(rdev, sector, max_sync,
+ &first_bad, &bad_sectors)) {
+ if (first_bad > sector)
+ max_sync = first_bad - sector;
+ else {
+ bad_sectors -= (sector
+ - first_bad);
+ if (max_sync > bad_sectors)
+ max_sync = bad_sectors;
+ continue;
+ }
+ }
bio = r10_bio->devs[0].bio;
bio->bi_next = biolist;
biolist = bio;
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read;
bio->bi_rw = READ;
- bio->bi_sector = r10_bio->devs[j].addr +
+ from_addr = r10_bio->devs[j].addr;
+ bio->bi_sector = from_addr +
conf->mirrors[d].rdev->data_offset;
bio->bi_bdev = conf->mirrors[d].rdev->bdev;
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
@@ -1916,26 +2500,48 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
bio->bi_rw = WRITE;
- bio->bi_sector = r10_bio->devs[k].addr +
+ to_addr = r10_bio->devs[k].addr;
+ bio->bi_sector = to_addr +
conf->mirrors[i].rdev->data_offset;
bio->bi_bdev = conf->mirrors[i].rdev->bdev;
r10_bio->devs[0].devnum = d;
+ r10_bio->devs[0].addr = from_addr;
r10_bio->devs[1].devnum = i;
+ r10_bio->devs[1].addr = to_addr;
break;
}
if (j == conf->copies) {
- /* Cannot recover, so abort the recovery */
+ /* Cannot recover, so abort the recovery or
+ * record a bad block */
put_buf(r10_bio);
if (rb2)
atomic_dec(&rb2->remaining);
r10_bio = rb2;
- if (!test_and_set_bit(MD_RECOVERY_INTR,
- &mddev->recovery))
- printk(KERN_INFO "md/raid10:%s: insufficient "
- "working devices for recovery.\n",
- mdname(mddev));
+ if (any_working) {
+ /* problem is that there are bad blocks
+ * on other device(s)
+ */
+ int k;
+ for (k = 0; k < conf->copies; k++)
+ if (r10_bio->devs[k].devnum == i)
+ break;
+ if (!rdev_set_badblocks(
+ conf->mirrors[i].rdev,
+ r10_bio->devs[k].addr,
+ max_sync, 0))
+ any_working = 0;
+ }
+ if (!any_working) {
+ if (!test_and_set_bit(MD_RECOVERY_INTR,
+ &mddev->recovery))
+ printk(KERN_INFO "md/raid10:%s: insufficient "
+ "working devices for recovery.\n",
+ mdname(mddev));
+ conf->mirrors[i].recovery_disabled
+ = mddev->recovery_disabled;
+ }
break;
}
}
@@ -1979,12 +2585,28 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
for (i=0; i<conf->copies; i++) {
int d = r10_bio->devs[i].devnum;
+ sector_t first_bad, sector;
+ int bad_sectors;
+
bio = r10_bio->devs[i].bio;
bio->bi_end_io = NULL;
clear_bit(BIO_UPTODATE, &bio->bi_flags);
if (conf->mirrors[d].rdev == NULL ||
test_bit(Faulty, &conf->mirrors[d].rdev->flags))
continue;
+ sector = r10_bio->devs[i].addr;
+ if (is_badblock(conf->mirrors[d].rdev,
+ sector, max_sync,
+ &first_bad, &bad_sectors)) {
+ if (first_bad > sector)
+ max_sync = first_bad - sector;
+ else {
+ bad_sectors -= (sector - first_bad);
+ if (max_sync > bad_sectors)
+ max_sync = max_sync;
+ continue;
+ }
+ }
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
atomic_inc(&r10_bio->remaining);
bio->bi_next = biolist;
@@ -1992,7 +2614,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read;
bio->bi_rw = READ;
- bio->bi_sector = r10_bio->devs[i].addr +
+ bio->bi_sector = sector +
conf->mirrors[d].rdev->data_offset;
bio->bi_bdev = conf->mirrors[d].rdev->bdev;
count++;
@@ -2079,7 +2701,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
return sectors_skipped + nr_sectors;
giveup:
/* There is nowhere to write, so all non-sync
- * drives must be failed, so try the next chunk...
+ * drives must be failed or in resync, all drives
+ * have a bad block, so try the next chunk...
*/
if (sector_nr + max_sync < max_sector)
max_sector = sector_nr + max_sync;
@@ -2249,6 +2872,7 @@ static int run(mddev_t *mddev)
(conf->raid_disks / conf->near_copies));
list_for_each_entry(rdev, &mddev->disks, same_set) {
+
disk_idx = rdev->raid_disk;
if (disk_idx >= conf->raid_disks
|| disk_idx < 0)
@@ -2271,7 +2895,7 @@ static int run(mddev_t *mddev)
disk->head_position = 0;
}
/* need to check that every block has at least one working mirror */
- if (!enough(conf)) {
+ if (!enough(conf, -1)) {
printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
mdname(mddev));
goto out_free_conf;
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 944b110..79cb52a 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -6,6 +6,11 @@ typedef struct mirror_info mirror_info_t;
struct mirror_info {
mdk_rdev_t *rdev;
sector_t head_position;
+ int recovery_disabled; /* matches
+ * mddev->recovery_disabled
+ * when we shouldn't try
+ * recovering this device.
+ */
};
typedef struct r10bio_s r10bio_t;
@@ -113,10 +118,26 @@ struct r10bio_s {
* level, we store IO_BLOCKED in the appropriate 'bios' pointer
*/
#define IO_BLOCKED ((struct bio*)1)
+/* When we successfully write to a known bad-block, we need to remove the
+ * bad-block marking which must be done from process context. So we record
+ * the success by setting devs[n].bio to IO_MADE_GOOD
+ */
+#define IO_MADE_GOOD ((struct bio *)2)
+
+#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
/* bits for r10bio.state */
#define R10BIO_Uptodate 0
#define R10BIO_IsSync 1
#define R10BIO_IsRecover 2
#define R10BIO_Degraded 3
+/* Set ReadError on bios that experience a read error
+ * so that raid10d knows what to do with them.
+ */
+#define R10BIO_ReadError 4
+/* If a write for this request means we can clear some
+ * known-bad-block records, we set this flag.
+ */
+#define R10BIO_MadeGood 5
+#define R10BIO_WriteError 6
#endif
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b72edf3..43709fa 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -51,6 +51,7 @@
#include <linux/seq_file.h>
#include <linux/cpu.h>
#include <linux/slab.h>
+#include <linux/ratelimit.h>
#include "md.h"
#include "raid5.h"
#include "raid0.h"
@@ -96,8 +97,6 @@
#define __inline__
#endif
-#define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
-
/*
* We maintain a biased count of active stripes in the bottom 16 bits of
* bi_phys_segments, and a count of processed stripes in the upper 16 bits
@@ -341,7 +340,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
(unsigned long long)sh->sector, i, dev->toread,
dev->read, dev->towrite, dev->written,
test_bit(R5_LOCKED, &dev->flags));
- BUG();
+ WARN_ON(1);
}
dev->flags = 0;
raid5_build_block(sh, i, previous);
@@ -527,6 +526,36 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
+ /* We have already checked bad blocks for reads. Now
+ * need to check for writes.
+ */
+ while ((rw & WRITE) && rdev &&
+ test_bit(WriteErrorSeen, &rdev->flags)) {
+ sector_t first_bad;
+ int bad_sectors;
+ int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
+ &first_bad, &bad_sectors);
+ if (!bad)
+ break;
+
+ if (bad < 0) {
+ set_bit(BlockedBadBlocks, &rdev->flags);
+ if (!conf->mddev->external &&
+ conf->mddev->flags) {
+ /* It is very unlikely, but we might
+ * still need to write out the
+ * bad block log - better give it
+ * a chance*/
+ md_check_recovery(conf->mddev);
+ }
+ md_wait_for_blocked_rdev(rdev, conf->mddev);
+ } else {
+ /* Acknowledged bad block - skip the write */
+ rdev_dec_pending(rdev, conf->mddev);
+ rdev = NULL;
+ }
+ }
+
if (rdev) {
if (s->syncing || s->expanding || s->expanded)
md_sync_acct(rdev->bdev, STRIPE_SECTORS);
@@ -548,10 +577,6 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_io_vec[0].bv_offset = 0;
bi->bi_size = STRIPE_SIZE;
bi->bi_next = NULL;
- if ((rw & WRITE) &&
- test_bit(R5_ReWrite, &sh->dev[i].flags))
- atomic_add(STRIPE_SECTORS,
- &rdev->corrected_errors);
generic_make_request(bi);
} else {
if (rw & WRITE)
@@ -1020,12 +1045,12 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
struct bio *wbi;
- spin_lock(&sh->lock);
+ spin_lock_irq(&sh->raid_conf->device_lock);
chosen = dev->towrite;
dev->towrite = NULL;
BUG_ON(dev->written);
wbi = dev->written = chosen;
- spin_unlock(&sh->lock);
+ spin_unlock_irq(&sh->raid_conf->device_lock);
while (wbi && wbi->bi_sector <
dev->sector + STRIPE_SECTORS) {
@@ -1315,12 +1340,11 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
static int grow_one_stripe(raid5_conf_t *conf)
{
struct stripe_head *sh;
- sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
+ sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
if (!sh)
return 0;
- memset(sh, 0, sizeof(*sh) + (conf->pool_size-1)*sizeof(struct r5dev));
+
sh->raid_conf = conf;
- spin_lock_init(&sh->lock);
#ifdef CONFIG_MULTICORE_RAID456
init_waitqueue_head(&sh->ops.wait_for_ops);
#endif
@@ -1435,14 +1459,11 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
return -ENOMEM;
for (i = conf->max_nr_stripes; i; i--) {
- nsh = kmem_cache_alloc(sc, GFP_KERNEL);
+ nsh = kmem_cache_zalloc(sc, GFP_KERNEL);
if (!nsh)
break;
- memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev));
-
nsh->raid_conf = conf;
- spin_lock_init(&nsh->lock);
#ifdef CONFIG_MULTICORE_RAID456
init_waitqueue_head(&nsh->ops.wait_for_ops);
#endif
@@ -1587,12 +1608,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
set_bit(R5_UPTODATE, &sh->dev[i].flags);
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
rdev = conf->disks[i].rdev;
- printk_rl(KERN_INFO "md/raid:%s: read error corrected"
- " (%lu sectors at %llu on %s)\n",
- mdname(conf->mddev), STRIPE_SECTORS,
- (unsigned long long)(sh->sector
- + rdev->data_offset),
- bdevname(rdev->bdev, b));
+ printk_ratelimited(
+ KERN_INFO
+ "md/raid:%s: read error corrected"
+ " (%lu sectors at %llu on %s)\n",
+ mdname(conf->mddev), STRIPE_SECTORS,
+ (unsigned long long)(sh->sector
+ + rdev->data_offset),
+ bdevname(rdev->bdev, b));
+ atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
clear_bit(R5_ReadError, &sh->dev[i].flags);
clear_bit(R5_ReWrite, &sh->dev[i].flags);
}
@@ -1606,22 +1630,24 @@ static void raid5_end_read_request(struct bio * bi, int error)
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
atomic_inc(&rdev->read_errors);
if (conf->mddev->degraded >= conf->max_degraded)
- printk_rl(KERN_WARNING
- "md/raid:%s: read error not correctable "
- "(sector %llu on %s).\n",
- mdname(conf->mddev),
- (unsigned long long)(sh->sector
- + rdev->data_offset),
- bdn);
+ printk_ratelimited(
+ KERN_WARNING
+ "md/raid:%s: read error not correctable "
+ "(sector %llu on %s).\n",
+ mdname(conf->mddev),
+ (unsigned long long)(sh->sector
+ + rdev->data_offset),
+ bdn);
else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
/* Oh, no!!! */
- printk_rl(KERN_WARNING
- "md/raid:%s: read error NOT corrected!! "
- "(sector %llu on %s).\n",
- mdname(conf->mddev),
- (unsigned long long)(sh->sector
- + rdev->data_offset),
- bdn);
+ printk_ratelimited(
+ KERN_WARNING
+ "md/raid:%s: read error NOT corrected!! "
+ "(sector %llu on %s).\n",
+ mdname(conf->mddev),
+ (unsigned long long)(sh->sector
+ + rdev->data_offset),
+ bdn);
else if (atomic_read(&rdev->read_errors)
> conf->max_nr_stripes)
printk(KERN_WARNING
@@ -1649,6 +1675,8 @@ static void raid5_end_write_request(struct bio *bi, int error)
raid5_conf_t *conf = sh->raid_conf;
int disks = sh->disks, i;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
+ sector_t first_bad;
+ int bad_sectors;
for (i=0 ; i<disks; i++)
if (bi == &sh->dev[i].req)
@@ -1662,8 +1690,12 @@ static void raid5_end_write_request(struct bio *bi, int error)
return;
}
- if (!uptodate)
- md_error(conf->mddev, conf->disks[i].rdev);
+ if (!uptodate) {
+ set_bit(WriteErrorSeen, &conf->disks[i].rdev->flags);
+ set_bit(R5_WriteError, &sh->dev[i].flags);
+ } else if (is_badblock(conf->disks[i].rdev, sh->sector, STRIPE_SECTORS,
+ &first_bad, &bad_sectors))
+ set_bit(R5_MadeGood, &sh->dev[i].flags);
rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
@@ -1710,6 +1742,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
}
+ set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
printk(KERN_ALERT
@@ -1760,7 +1793,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
/*
* Select the parity disk based on the user selected algorithm.
*/
- pd_idx = qd_idx = ~0;
+ pd_idx = qd_idx = -1;
switch(conf->level) {
case 4:
pd_idx = data_disks;
@@ -2143,12 +2176,11 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
raid5_conf_t *conf = sh->raid_conf;
int firstwrite=0;
- pr_debug("adding bh b#%llu to stripe s#%llu\n",
+ pr_debug("adding bi b#%llu to stripe s#%llu\n",
(unsigned long long)bi->bi_sector,
(unsigned long long)sh->sector);
- spin_lock(&sh->lock);
spin_lock_irq(&conf->device_lock);
if (forwrite) {
bip = &sh->dev[dd_idx].towrite;
@@ -2169,19 +2201,6 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
bi->bi_next = *bip;
*bip = bi;
bi->bi_phys_segments++;
- spin_unlock_irq(&conf->device_lock);
- spin_unlock(&sh->lock);
-
- pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
- (unsigned long long)bi->bi_sector,
- (unsigned long long)sh->sector, dd_idx);
-
- if (conf->mddev->bitmap && firstwrite) {
- bitmap_startwrite(conf->mddev->bitmap, sh->sector,
- STRIPE_SECTORS, 0);
- sh->bm_seq = conf->seq_flush+1;
- set_bit(STRIPE_BIT_DELAY, &sh->state);
- }
if (forwrite) {
/* check if page is covered */
@@ -2196,12 +2215,23 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
}
+ spin_unlock_irq(&conf->device_lock);
+
+ pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
+ (unsigned long long)(*bip)->bi_sector,
+ (unsigned long long)sh->sector, dd_idx);
+
+ if (conf->mddev->bitmap && firstwrite) {
+ bitmap_startwrite(conf->mddev->bitmap, sh->sector,
+ STRIPE_SECTORS, 0);
+ sh->bm_seq = conf->seq_flush+1;
+ set_bit(STRIPE_BIT_DELAY, &sh->state);
+ }
return 1;
overlap:
set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
spin_unlock_irq(&conf->device_lock);
- spin_unlock(&sh->lock);
return 0;
}
@@ -2238,9 +2268,18 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
rcu_read_lock();
rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && test_bit(In_sync, &rdev->flags))
- /* multiple read failures in one stripe */
- md_error(conf->mddev, rdev);
+ atomic_inc(&rdev->nr_pending);
+ else
+ rdev = NULL;
rcu_read_unlock();
+ if (rdev) {
+ if (!rdev_set_badblocks(
+ rdev,
+ sh->sector,
+ STRIPE_SECTORS, 0))
+ md_error(conf->mddev, rdev);
+ rdev_dec_pending(rdev, conf->mddev);
+ }
}
spin_lock_irq(&conf->device_lock);
/* fail all writes first */
@@ -2308,6 +2347,10 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
if (bitmap_end)
bitmap_endwrite(conf->mddev->bitmap, sh->sector,
STRIPE_SECTORS, 0, 0);
+ /* If we were in the middle of a write the parity block might
+ * still be locked - so just clear all R5_LOCKED flags
+ */
+ clear_bit(R5_LOCKED, &sh->dev[i].flags);
}
if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
@@ -2315,109 +2358,73 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
md_wakeup_thread(conf->mddev->thread);
}
-/* fetch_block5 - checks the given member device to see if its data needs
- * to be read or computed to satisfy a request.
- *
- * Returns 1 when no more member devices need to be checked, otherwise returns
- * 0 to tell the loop in handle_stripe_fill5 to continue
- */
-static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s,
- int disk_idx, int disks)
-{
- struct r5dev *dev = &sh->dev[disk_idx];
- struct r5dev *failed_dev = &sh->dev[s->failed_num];
-
- /* is the data in this block needed, and can we get it? */
- if (!test_bit(R5_LOCKED, &dev->flags) &&
- !test_bit(R5_UPTODATE, &dev->flags) &&
- (dev->toread ||
- (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
- s->syncing || s->expanding ||
- (s->failed &&
- (failed_dev->toread ||
- (failed_dev->towrite &&
- !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) {
- /* We would like to get this block, possibly by computing it,
- * otherwise read it if the backing disk is insync
- */
- if ((s->uptodate == disks - 1) &&
- (s->failed && disk_idx == s->failed_num)) {
- set_bit(STRIPE_COMPUTE_RUN, &sh->state);
- set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
- set_bit(R5_Wantcompute, &dev->flags);
- sh->ops.target = disk_idx;
- sh->ops.target2 = -1;
- s->req_compute = 1;
- /* Careful: from this point on 'uptodate' is in the eye
- * of raid_run_ops which services 'compute' operations
- * before writes. R5_Wantcompute flags a block that will
- * be R5_UPTODATE by the time it is needed for a
- * subsequent operation.
- */
- s->uptodate++;
- return 1; /* uptodate + compute == disks */
- } else if (test_bit(R5_Insync, &dev->flags)) {
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantread, &dev->flags);
- s->locked++;
- pr_debug("Reading block %d (sync=%d)\n", disk_idx,
- s->syncing);
- }
- }
-
- return 0;
-}
-
-/**
- * handle_stripe_fill5 - read or compute data to satisfy pending requests.
- */
-static void handle_stripe_fill5(struct stripe_head *sh,
- struct stripe_head_state *s, int disks)
+static void
+handle_failed_sync(raid5_conf_t *conf, struct stripe_head *sh,
+ struct stripe_head_state *s)
{
+ int abort = 0;
int i;
- /* look for blocks to read/compute, skip this if a compute
- * is already in flight, or if the stripe contents are in the
- * midst of changing due to a write
+ md_done_sync(conf->mddev, STRIPE_SECTORS, 0);
+ clear_bit(STRIPE_SYNCING, &sh->state);
+ s->syncing = 0;
+ /* There is nothing more to do for sync/check/repair.
+ * For recover we need to record a bad block on all
+ * non-sync devices, or abort the recovery
*/
- if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
- !sh->reconstruct_state)
- for (i = disks; i--; )
- if (fetch_block5(sh, s, i, disks))
- break;
- set_bit(STRIPE_HANDLE, &sh->state);
+ if (!test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery))
+ return;
+ /* During recovery devices cannot be removed, so locking and
+ * refcounting of rdevs is not needed
+ */
+ for (i = 0; i < conf->raid_disks; i++) {
+ mdk_rdev_t *rdev = conf->disks[i].rdev;
+ if (!rdev
+ || test_bit(Faulty, &rdev->flags)
+ || test_bit(In_sync, &rdev->flags))
+ continue;
+ if (!rdev_set_badblocks(rdev, sh->sector,
+ STRIPE_SECTORS, 0))
+ abort = 1;
+ }
+ if (abort) {
+ conf->recovery_disabled = conf->mddev->recovery_disabled;
+ set_bit(MD_RECOVERY_INTR, &conf->mddev->recovery);
+ }
}
-/* fetch_block6 - checks the given member device to see if its data needs
+/* fetch_block - checks the given member device to see if its data needs
* to be read or computed to satisfy a request.
*
* Returns 1 when no more member devices need to be checked, otherwise returns
- * 0 to tell the loop in handle_stripe_fill6 to continue
+ * 0 to tell the loop in handle_stripe_fill to continue
*/
-static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s,
- struct r6_state *r6s, int disk_idx, int disks)
+static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
+ int disk_idx, int disks)
{
struct r5dev *dev = &sh->dev[disk_idx];
- struct r5dev *fdev[2] = { &sh->dev[r6s->failed_num[0]],
- &sh->dev[r6s->failed_num[1]] };
+ struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
+ &sh->dev[s->failed_num[1]] };
+ /* is the data in this block needed, and can we get it? */
if (!test_bit(R5_LOCKED, &dev->flags) &&
!test_bit(R5_UPTODATE, &dev->flags) &&
(dev->toread ||
(dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
s->syncing || s->expanding ||
- (s->failed >= 1 &&
- (fdev[0]->toread || s->to_write)) ||
- (s->failed >= 2 &&
- (fdev[1]->toread || s->to_write)))) {
+ (s->failed >= 1 && fdev[0]->toread) ||
+ (s->failed >= 2 && fdev[1]->toread) ||
+ (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
+ !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
+ (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
/* we would like to get this block, possibly by computing it,
* otherwise read it if the backing disk is insync
*/
BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
BUG_ON(test_bit(R5_Wantread, &dev->flags));
if ((s->uptodate == disks - 1) &&
- (s->failed && (disk_idx == r6s->failed_num[0] ||
- disk_idx == r6s->failed_num[1]))) {
+ (s->failed && (disk_idx == s->failed_num[0] ||
+ disk_idx == s->failed_num[1]))) {
/* have disk failed, and we're requested to fetch it;
* do compute it
*/
@@ -2429,6 +2436,12 @@ static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s,
sh->ops.target = disk_idx;
sh->ops.target2 = -1; /* no 2nd target */
s->req_compute = 1;
+ /* Careful: from this point on 'uptodate' is in the eye
+ * of raid_run_ops which services 'compute' operations
+ * before writes. R5_Wantcompute flags a block that will
+ * be R5_UPTODATE by the time it is needed for a
+ * subsequent operation.
+ */
s->uptodate++;
return 1;
} else if (s->uptodate == disks-2 && s->failed >= 2) {
@@ -2469,11 +2482,11 @@ static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s,
}
/**
- * handle_stripe_fill6 - read or compute data to satisfy pending requests.
+ * handle_stripe_fill - read or compute data to satisfy pending requests.
*/
-static void handle_stripe_fill6(struct stripe_head *sh,
- struct stripe_head_state *s, struct r6_state *r6s,
- int disks)
+static void handle_stripe_fill(struct stripe_head *sh,
+ struct stripe_head_state *s,
+ int disks)
{
int i;
@@ -2484,7 +2497,7 @@ static void handle_stripe_fill6(struct stripe_head *sh,
if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
!sh->reconstruct_state)
for (i = disks; i--; )
- if (fetch_block6(sh, s, r6s, i, disks))
+ if (fetch_block(sh, s, i, disks))
break;
set_bit(STRIPE_HANDLE, &sh->state);
}
@@ -2540,11 +2553,19 @@ static void handle_stripe_clean_event(raid5_conf_t *conf,
md_wakeup_thread(conf->mddev->thread);
}
-static void handle_stripe_dirtying5(raid5_conf_t *conf,
- struct stripe_head *sh, struct stripe_head_state *s, int disks)
+static void handle_stripe_dirtying(raid5_conf_t *conf,
+ struct stripe_head *sh,
+ struct stripe_head_state *s,
+ int disks)
{
int rmw = 0, rcw = 0, i;
- for (i = disks; i--; ) {
+ if (conf->max_degraded == 2) {
+ /* RAID6 requires 'rcw' in current implementation
+ * Calculate the real rcw later - for now fake it
+ * look like rcw is cheaper
+ */
+ rcw = 1; rmw = 2;
+ } else for (i = disks; i--; ) {
/* would I have to read this buffer for read_modify_write */
struct r5dev *dev = &sh->dev[i];
if ((dev->towrite || i == sh->pd_idx) &&
@@ -2591,16 +2612,19 @@ static void handle_stripe_dirtying5(raid5_conf_t *conf,
}
}
}
- if (rcw <= rmw && rcw > 0)
+ if (rcw <= rmw && rcw > 0) {
/* want reconstruct write, but need to get some data */
+ rcw = 0;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (!test_bit(R5_OVERWRITE, &dev->flags) &&
- i != sh->pd_idx &&
+ i != sh->pd_idx && i != sh->qd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
- test_bit(R5_Wantcompute, &dev->flags)) &&
- test_bit(R5_Insync, &dev->flags)) {
+ test_bit(R5_Wantcompute, &dev->flags))) {
+ rcw++;
+ if (!test_bit(R5_Insync, &dev->flags))
+ continue; /* it's a failed drive */
if (
test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
pr_debug("Read_old block "
@@ -2614,6 +2638,7 @@ static void handle_stripe_dirtying5(raid5_conf_t *conf,
}
}
}
+ }
/* now if nothing is locked, and if we have enough data,
* we can start a write request
*/
@@ -2630,53 +2655,6 @@ static void handle_stripe_dirtying5(raid5_conf_t *conf,
schedule_reconstruction(sh, s, rcw == 0, 0);
}
-static void handle_stripe_dirtying6(raid5_conf_t *conf,
- struct stripe_head *sh, struct stripe_head_state *s,
- struct r6_state *r6s, int disks)
-{
- int rcw = 0, pd_idx = sh->pd_idx, i;
- int qd_idx = sh->qd_idx;
-
- set_bit(STRIPE_HANDLE, &sh->state);
- for (i = disks; i--; ) {
- struct r5dev *dev = &sh->dev[i];
- /* check if we haven't enough data */
- if (!test_bit(R5_OVERWRITE, &dev->flags) &&
- i != pd_idx && i != qd_idx &&
- !test_bit(R5_LOCKED, &dev->flags) &&
- !(test_bit(R5_UPTODATE, &dev->flags) ||
- test_bit(R5_Wantcompute, &dev->flags))) {
- rcw++;
- if (!test_bit(R5_Insync, &dev->flags))
- continue; /* it's a failed drive */
-
- if (
- test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
- pr_debug("Read_old stripe %llu "
- "block %d for Reconstruct\n",
- (unsigned long long)sh->sector, i);
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantread, &dev->flags);
- s->locked++;
- } else {
- pr_debug("Request delayed stripe %llu "
- "block %d for Reconstruct\n",
- (unsigned long long)sh->sector, i);
- set_bit(STRIPE_DELAYED, &sh->state);
- set_bit(STRIPE_HANDLE, &sh->state);
- }
- }
- }
- /* now if nothing is locked, and if we have enough data, we can start a
- * write request
- */
- if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
- s->locked == 0 && rcw == 0 &&
- !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
- schedule_reconstruction(sh, s, 1, 0);
- }
-}
-
static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
struct stripe_head_state *s, int disks)
{
@@ -2695,7 +2673,7 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
s->uptodate--;
break;
}
- dev = &sh->dev[s->failed_num];
+ dev = &sh->dev[s->failed_num[0]];
/* fall through */
case check_state_compute_result:
sh->check_state = check_state_idle;
@@ -2767,7 +2745,7 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
struct stripe_head_state *s,
- struct r6_state *r6s, int disks)
+ int disks)
{
int pd_idx = sh->pd_idx;
int qd_idx = sh->qd_idx;
@@ -2786,14 +2764,14 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
switch (sh->check_state) {
case check_state_idle:
/* start a new check operation if there are < 2 failures */
- if (s->failed == r6s->q_failed) {
+ if (s->failed == s->q_failed) {
/* The only possible failed device holds Q, so it
* makes sense to check P (If anything else were failed,
* we would have used P to recreate it).
*/
sh->check_state = check_state_run;
}
- if (!r6s->q_failed && s->failed < 2) {
+ if (!s->q_failed && s->failed < 2) {
/* Q is not failed, and we didn't use it to generate
* anything, so it makes sense to check it
*/
@@ -2835,13 +2813,13 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
*/
BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
if (s->failed == 2) {
- dev = &sh->dev[r6s->failed_num[1]];
+ dev = &sh->dev[s->failed_num[1]];
s->locked++;
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantwrite, &dev->flags);
}
if (s->failed >= 1) {
- dev = &sh->dev[r6s->failed_num[0]];
+ dev = &sh->dev[s->failed_num[0]];
s->locked++;
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantwrite, &dev->flags);
@@ -2928,8 +2906,7 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
}
}
-static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
- struct r6_state *r6s)
+static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh)
{
int i;
@@ -2971,7 +2948,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
for (j = 0; j < conf->raid_disks; j++)
if (j != sh2->pd_idx &&
- (!r6s || j != sh2->qd_idx) &&
+ j != sh2->qd_idx &&
!test_bit(R5_Expanded, &sh2->dev[j].flags))
break;
if (j == conf->raid_disks) {
@@ -3006,43 +2983,35 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
*
*/
-static void handle_stripe5(struct stripe_head *sh)
+static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
{
raid5_conf_t *conf = sh->raid_conf;
- int disks = sh->disks, i;
- struct bio *return_bi = NULL;
- struct stripe_head_state s;
+ int disks = sh->disks;
struct r5dev *dev;
- mdk_rdev_t *blocked_rdev = NULL;
- int prexor;
- int dec_preread_active = 0;
+ int i;
- memset(&s, 0, sizeof(s));
- pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
- "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state,
- atomic_read(&sh->count), sh->pd_idx, sh->check_state,
- sh->reconstruct_state);
+ memset(s, 0, sizeof(*s));
- spin_lock(&sh->lock);
- clear_bit(STRIPE_HANDLE, &sh->state);
- clear_bit(STRIPE_DELAYED, &sh->state);
-
- s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
- s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
- s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
+ s->syncing = test_bit(STRIPE_SYNCING, &sh->state);
+ s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
+ s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
+ s->failed_num[0] = -1;
+ s->failed_num[1] = -1;
/* Now to look around and see what can be done */
rcu_read_lock();
+ spin_lock_irq(&conf->device_lock);
for (i=disks; i--; ) {
mdk_rdev_t *rdev;
+ sector_t first_bad;
+ int bad_sectors;
+ int is_bad = 0;
dev = &sh->dev[i];
- pr_debug("check %d: state 0x%lx toread %p read %p write %p "
- "written %p\n", i, dev->flags, dev->toread, dev->read,
- dev->towrite, dev->written);
-
- /* maybe we can request a biofill operation
+ pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
+ i, dev->flags, dev->toread, dev->towrite, dev->written);
+ /* maybe we can reply to a read
*
* new wantfill requests are only permitted while
* ops_complete_biofill is guaranteed to be inactive
@@ -3052,37 +3021,74 @@ static void handle_stripe5(struct stripe_head *sh)
set_bit(R5_Wantfill, &dev->flags);
/* now count some things */
- if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
- if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
- if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++;
+ if (test_bit(R5_LOCKED, &dev->flags))
+ s->locked++;
+ if (test_bit(R5_UPTODATE, &dev->flags))
+ s->uptodate++;
+ if (test_bit(R5_Wantcompute, &dev->flags)) {
+ s->compute++;
+ BUG_ON(s->compute > 2);
+ }
if (test_bit(R5_Wantfill, &dev->flags))
- s.to_fill++;
+ s->to_fill++;
else if (dev->toread)
- s.to_read++;
+ s->to_read++;
if (dev->towrite) {
- s.to_write++;
+ s->to_write++;
if (!test_bit(R5_OVERWRITE, &dev->flags))
- s.non_overwrite++;
+ s->non_overwrite++;
}
if (dev->written)
- s.written++;
+ s->written++;
rdev = rcu_dereference(conf->disks[i].rdev);
- if (blocked_rdev == NULL &&
- rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
- blocked_rdev = rdev;
- atomic_inc(&rdev->nr_pending);
+ if (rdev) {
+ is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
+ &first_bad, &bad_sectors);
+ if (s->blocked_rdev == NULL
+ && (test_bit(Blocked, &rdev->flags)
+ || is_bad < 0)) {
+ if (is_bad < 0)
+ set_bit(BlockedBadBlocks,
+ &rdev->flags);
+ s->blocked_rdev = rdev;
+ atomic_inc(&rdev->nr_pending);
+ }
}
clear_bit(R5_Insync, &dev->flags);
if (!rdev)
/* Not in-sync */;
- else if (test_bit(In_sync, &rdev->flags))
+ else if (is_bad) {
+ /* also not in-sync */
+ if (!test_bit(WriteErrorSeen, &rdev->flags)) {
+ /* treat as in-sync, but with a read error
+ * which we can now try to correct
+ */
+ set_bit(R5_Insync, &dev->flags);
+ set_bit(R5_ReadError, &dev->flags);
+ }
+ } else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
else {
- /* could be in-sync depending on recovery/reshape status */
+ /* in sync if before recovery_offset */
if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
set_bit(R5_Insync, &dev->flags);
}
+ if (test_bit(R5_WriteError, &dev->flags)) {
+ clear_bit(R5_Insync, &dev->flags);
+ if (!test_bit(Faulty, &rdev->flags)) {
+ s->handle_bad_blocks = 1;
+ atomic_inc(&rdev->nr_pending);
+ } else
+ clear_bit(R5_WriteError, &dev->flags);
+ }
+ if (test_bit(R5_MadeGood, &dev->flags)) {
+ if (!test_bit(Faulty, &rdev->flags)) {
+ s->handle_bad_blocks = 1;
+ atomic_inc(&rdev->nr_pending);
+ } else
+ clear_bit(R5_MadeGood, &dev->flags);
+ }
if (!test_bit(R5_Insync, &dev->flags)) {
/* The ReadError flag will just be confusing now */
clear_bit(R5_ReadError, &dev->flags);
@@ -3091,313 +3097,60 @@ static void handle_stripe5(struct stripe_head *sh)
if (test_bit(R5_ReadError, &dev->flags))
clear_bit(R5_Insync, &dev->flags);
if (!test_bit(R5_Insync, &dev->flags)) {
- s.failed++;
- s.failed_num = i;
+ if (s->failed < 2)
+ s->failed_num[s->failed] = i;
+ s->failed++;
}
}
+ spin_unlock_irq(&conf->device_lock);
rcu_read_unlock();
-
- if (unlikely(blocked_rdev)) {
- if (s.syncing || s.expanding || s.expanded ||
- s.to_write || s.written) {
- set_bit(STRIPE_HANDLE, &sh->state);
- goto unlock;
- }
- /* There is nothing for the blocked_rdev to block */
- rdev_dec_pending(blocked_rdev, conf->mddev);
- blocked_rdev = NULL;
- }
-
- if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
- set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
- set_bit(STRIPE_BIOFILL_RUN, &sh->state);
- }
-
- pr_debug("locked=%d uptodate=%d to_read=%d"
- " to_write=%d failed=%d failed_num=%d\n",
- s.locked, s.uptodate, s.to_read, s.to_write,
- s.failed, s.failed_num);
- /* check if the array has lost two devices and, if so, some requests might
- * need to be failed
- */
- if (s.failed > 1 && s.to_read+s.to_write+s.written)
- handle_failed_stripe(conf, sh, &s, disks, &return_bi);
- if (s.failed > 1 && s.syncing) {
- md_done_sync(conf->mddev, STRIPE_SECTORS,0);
- clear_bit(STRIPE_SYNCING, &sh->state);
- s.syncing = 0;
- }
-
- /* might be able to return some write requests if the parity block
- * is safe, or on a failed drive
- */
- dev = &sh->dev[sh->pd_idx];
- if ( s.written &&
- ((test_bit(R5_Insync, &dev->flags) &&
- !test_bit(R5_LOCKED, &dev->flags) &&
- test_bit(R5_UPTODATE, &dev->flags)) ||
- (s.failed == 1 && s.failed_num == sh->pd_idx)))
- handle_stripe_clean_event(conf, sh, disks, &return_bi);
-
- /* Now we might consider reading some blocks, either to check/generate
- * parity, or to satisfy requests
- * or to load a block that is being partially written.
- */
- if (s.to_read || s.non_overwrite ||
- (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
- handle_stripe_fill5(sh, &s, disks);
-
- /* Now we check to see if any write operations have recently
- * completed
- */
- prexor = 0;
- if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
- prexor = 1;
- if (sh->reconstruct_state == reconstruct_state_drain_result ||
- sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
- sh->reconstruct_state = reconstruct_state_idle;
-
- /* All the 'written' buffers and the parity block are ready to
- * be written back to disk
- */
- BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
- for (i = disks; i--; ) {
- dev = &sh->dev[i];
- if (test_bit(R5_LOCKED, &dev->flags) &&
- (i == sh->pd_idx || dev->written)) {
- pr_debug("Writing block %d\n", i);
- set_bit(R5_Wantwrite, &dev->flags);
- if (prexor)
- continue;
- if (!test_bit(R5_Insync, &dev->flags) ||
- (i == sh->pd_idx && s.failed == 0))
- set_bit(STRIPE_INSYNC, &sh->state);
- }
- }
- if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
- dec_preread_active = 1;
- }
-
- /* Now to consider new write requests and what else, if anything
- * should be read. We do not handle new writes when:
- * 1/ A 'write' operation (copy+xor) is already in flight.
- * 2/ A 'check' operation is in flight, as it may clobber the parity
- * block.
- */
- if (s.to_write && !sh->reconstruct_state && !sh->check_state)
- handle_stripe_dirtying5(conf, sh, &s, disks);
-
- /* maybe we need to check and possibly fix the parity for this stripe
- * Any reads will already have been scheduled, so we just see if enough
- * data is available. The parity check is held off while parity
- * dependent operations are in flight.
- */
- if (sh->check_state ||
- (s.syncing && s.locked == 0 &&
- !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
- !test_bit(STRIPE_INSYNC, &sh->state)))
- handle_parity_checks5(conf, sh, &s, disks);
-
- if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
- md_done_sync(conf->mddev, STRIPE_SECTORS,1);
- clear_bit(STRIPE_SYNCING, &sh->state);
- }
-
- /* If the failed drive is just a ReadError, then we might need to progress
- * the repair/check process
- */
- if (s.failed == 1 && !conf->mddev->ro &&
- test_bit(R5_ReadError, &sh->dev[s.failed_num].flags)
- && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags)
- && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags)
- ) {
- dev = &sh->dev[s.failed_num];
- if (!test_bit(R5_ReWrite, &dev->flags)) {
- set_bit(R5_Wantwrite, &dev->flags);
- set_bit(R5_ReWrite, &dev->flags);
- set_bit(R5_LOCKED, &dev->flags);
- s.locked++;
- } else {
- /* let's read it back */
- set_bit(R5_Wantread, &dev->flags);
- set_bit(R5_LOCKED, &dev->flags);
- s.locked++;
- }
- }
-
- /* Finish reconstruct operations initiated by the expansion process */
- if (sh->reconstruct_state == reconstruct_state_result) {
- struct stripe_head *sh2
- = get_active_stripe(conf, sh->sector, 1, 1, 1);
- if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
- /* sh cannot be written until sh2 has been read.
- * so arrange for sh to be delayed a little
- */
- set_bit(STRIPE_DELAYED, &sh->state);
- set_bit(STRIPE_HANDLE, &sh->state);
- if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
- &sh2->state))
- atomic_inc(&conf->preread_active_stripes);
- release_stripe(sh2);
- goto unlock;
- }
- if (sh2)
- release_stripe(sh2);
-
- sh->reconstruct_state = reconstruct_state_idle;
- clear_bit(STRIPE_EXPANDING, &sh->state);
- for (i = conf->raid_disks; i--; ) {
- set_bit(R5_Wantwrite, &sh->dev[i].flags);
- set_bit(R5_LOCKED, &sh->dev[i].flags);
- s.locked++;
- }
- }
-
- if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
- !sh->reconstruct_state) {
- /* Need to write out all blocks after computing parity */
- sh->disks = conf->raid_disks;
- stripe_set_idx(sh->sector, conf, 0, sh);
- schedule_reconstruction(sh, &s, 1, 1);
- } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
- clear_bit(STRIPE_EXPAND_READY, &sh->state);
- atomic_dec(&conf->reshape_stripes);
- wake_up(&conf->wait_for_overlap);
- md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
- }
-
- if (s.expanding && s.locked == 0 &&
- !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
- handle_stripe_expansion(conf, sh, NULL);
-
- unlock:
- spin_unlock(&sh->lock);
-
- /* wait for this device to become unblocked */
- if (unlikely(blocked_rdev))
- md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
-
- if (s.ops_request)
- raid_run_ops(sh, s.ops_request);
-
- ops_run_io(sh, &s);
-
- if (dec_preread_active) {
- /* We delay this until after ops_run_io so that if make_request
- * is waiting on a flush, it won't continue until the writes
- * have actually been submitted.
- */
- atomic_dec(&conf->preread_active_stripes);
- if (atomic_read(&conf->preread_active_stripes) <
- IO_THRESHOLD)
- md_wakeup_thread(conf->mddev->thread);
- }
- return_io(return_bi);
}
-static void handle_stripe6(struct stripe_head *sh)
+static void handle_stripe(struct stripe_head *sh)
{
+ struct stripe_head_state s;
raid5_conf_t *conf = sh->raid_conf;
+ int i;
+ int prexor;
int disks = sh->disks;
- struct bio *return_bi = NULL;
- int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx;
- struct stripe_head_state s;
- struct r6_state r6s;
- struct r5dev *dev, *pdev, *qdev;
- mdk_rdev_t *blocked_rdev = NULL;
- int dec_preread_active = 0;
+ struct r5dev *pdev, *qdev;
+
+ clear_bit(STRIPE_HANDLE, &sh->state);
+ if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) {
+ /* already being handled, ensure it gets handled
+ * again when current action finishes */
+ set_bit(STRIPE_HANDLE, &sh->state);
+ return;
+ }
+
+ if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
+ set_bit(STRIPE_SYNCING, &sh->state);
+ clear_bit(STRIPE_INSYNC, &sh->state);
+ }
+ clear_bit(STRIPE_DELAYED, &sh->state);
pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
"pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
(unsigned long long)sh->sector, sh->state,
- atomic_read(&sh->count), pd_idx, qd_idx,
+ atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
sh->check_state, sh->reconstruct_state);
- memset(&s, 0, sizeof(s));
-
- spin_lock(&sh->lock);
- clear_bit(STRIPE_HANDLE, &sh->state);
- clear_bit(STRIPE_DELAYED, &sh->state);
-
- s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
- s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
- s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
- /* Now to look around and see what can be done */
-
- rcu_read_lock();
- for (i=disks; i--; ) {
- mdk_rdev_t *rdev;
- dev = &sh->dev[i];
- pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
- i, dev->flags, dev->toread, dev->towrite, dev->written);
- /* maybe we can reply to a read
- *
- * new wantfill requests are only permitted while
- * ops_complete_biofill is guaranteed to be inactive
- */
- if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
- !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
- set_bit(R5_Wantfill, &dev->flags);
+ analyse_stripe(sh, &s);
- /* now count some things */
- if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
- if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
- if (test_bit(R5_Wantcompute, &dev->flags)) {
- s.compute++;
- BUG_ON(s.compute > 2);
- }
-
- if (test_bit(R5_Wantfill, &dev->flags)) {
- s.to_fill++;
- } else if (dev->toread)
- s.to_read++;
- if (dev->towrite) {
- s.to_write++;
- if (!test_bit(R5_OVERWRITE, &dev->flags))
- s.non_overwrite++;
- }
- if (dev->written)
- s.written++;
- rdev = rcu_dereference(conf->disks[i].rdev);
- if (blocked_rdev == NULL &&
- rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
- blocked_rdev = rdev;
- atomic_inc(&rdev->nr_pending);
- }
- clear_bit(R5_Insync, &dev->flags);
- if (!rdev)
- /* Not in-sync */;
- else if (test_bit(In_sync, &rdev->flags))
- set_bit(R5_Insync, &dev->flags);
- else {
- /* in sync if before recovery_offset */
- if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
- set_bit(R5_Insync, &dev->flags);
- }
- if (!test_bit(R5_Insync, &dev->flags)) {
- /* The ReadError flag will just be confusing now */
- clear_bit(R5_ReadError, &dev->flags);
- clear_bit(R5_ReWrite, &dev->flags);
- }
- if (test_bit(R5_ReadError, &dev->flags))
- clear_bit(R5_Insync, &dev->flags);
- if (!test_bit(R5_Insync, &dev->flags)) {
- if (s.failed < 2)
- r6s.failed_num[s.failed] = i;
- s.failed++;
- }
+ if (s.handle_bad_blocks) {
+ set_bit(STRIPE_HANDLE, &sh->state);
+ goto finish;
}
- rcu_read_unlock();
- if (unlikely(blocked_rdev)) {
+ if (unlikely(s.blocked_rdev)) {
if (s.syncing || s.expanding || s.expanded ||
s.to_write || s.written) {
set_bit(STRIPE_HANDLE, &sh->state);
- goto unlock;
+ goto finish;
}
/* There is nothing for the blocked_rdev to block */
- rdev_dec_pending(blocked_rdev, conf->mddev);
- blocked_rdev = NULL;
+ rdev_dec_pending(s.blocked_rdev, conf->mddev);
+ s.blocked_rdev = NULL;
}
if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
@@ -3408,83 +3161,88 @@ static void handle_stripe6(struct stripe_head *sh)
pr_debug("locked=%d uptodate=%d to_read=%d"
" to_write=%d failed=%d failed_num=%d,%d\n",
s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
- r6s.failed_num[0], r6s.failed_num[1]);
- /* check if the array has lost >2 devices and, if so, some requests
- * might need to be failed
+ s.failed_num[0], s.failed_num[1]);
+ /* check if the array has lost more than max_degraded devices and,
+ * if so, some requests might need to be failed.
*/
- if (s.failed > 2 && s.to_read+s.to_write+s.written)
- handle_failed_stripe(conf, sh, &s, disks, &return_bi);
- if (s.failed > 2 && s.syncing) {
- md_done_sync(conf->mddev, STRIPE_SECTORS,0);
- clear_bit(STRIPE_SYNCING, &sh->state);
- s.syncing = 0;
- }
+ if (s.failed > conf->max_degraded && s.to_read+s.to_write+s.written)
+ handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
+ if (s.failed > conf->max_degraded && s.syncing)
+ handle_failed_sync(conf, sh, &s);
/*
* might be able to return some write requests if the parity blocks
* are safe, or on a failed drive
*/
- pdev = &sh->dev[pd_idx];
- r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx)
- || (s.failed >= 2 && r6s.failed_num[1] == pd_idx);
- qdev = &sh->dev[qd_idx];
- r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == qd_idx)
- || (s.failed >= 2 && r6s.failed_num[1] == qd_idx);
-
- if ( s.written &&
- ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
+ pdev = &sh->dev[sh->pd_idx];
+ s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
+ || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
+ qdev = &sh->dev[sh->qd_idx];
+ s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
+ || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
+ || conf->level < 6;
+
+ if (s.written &&
+ (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
&& !test_bit(R5_LOCKED, &pdev->flags)
&& test_bit(R5_UPTODATE, &pdev->flags)))) &&
- ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
+ (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
&& !test_bit(R5_LOCKED, &qdev->flags)
&& test_bit(R5_UPTODATE, &qdev->flags)))))
- handle_stripe_clean_event(conf, sh, disks, &return_bi);
+ handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
/* Now we might consider reading some blocks, either to check/generate
* parity, or to satisfy requests
* or to load a block that is being partially written.
*/
- if (s.to_read || s.non_overwrite || (s.to_write && s.failed) ||
- (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
- handle_stripe_fill6(sh, &s, &r6s, disks);
+ if (s.to_read || s.non_overwrite
+ || (conf->level == 6 && s.to_write && s.failed)
+ || (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
+ handle_stripe_fill(sh, &s, disks);
/* Now we check to see if any write operations have recently
* completed
*/
- if (sh->reconstruct_state == reconstruct_state_drain_result) {
-
+ prexor = 0;
+ if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
+ prexor = 1;
+ if (sh->reconstruct_state == reconstruct_state_drain_result ||
+ sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
sh->reconstruct_state = reconstruct_state_idle;
- /* All the 'written' buffers and the parity blocks are ready to
+
+ /* All the 'written' buffers and the parity block are ready to
* be written back to disk
*/
BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
- BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags));
+ BUG_ON(sh->qd_idx >= 0 &&
+ !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags));
for (i = disks; i--; ) {
- dev = &sh->dev[i];
+ struct r5dev *dev = &sh->dev[i];
if (test_bit(R5_LOCKED, &dev->flags) &&
- (i == sh->pd_idx || i == qd_idx ||
- dev->written)) {
+ (i == sh->pd_idx || i == sh->qd_idx ||
+ dev->written)) {
pr_debug("Writing block %d\n", i);
- BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
set_bit(R5_Wantwrite, &dev->flags);
+ if (prexor)
+ continue;
if (!test_bit(R5_Insync, &dev->flags) ||
- ((i == sh->pd_idx || i == qd_idx) &&
- s.failed == 0))
+ ((i == sh->pd_idx || i == sh->qd_idx) &&
+ s.failed == 0))
set_bit(STRIPE_INSYNC, &sh->state);
}
}
if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
- dec_preread_active = 1;
+ s.dec_preread_active = 1;
}
/* Now to consider new write requests and what else, if anything
* should be read. We do not handle new writes when:
- * 1/ A 'write' operation (copy+gen_syndrome) is already in flight.
+ * 1/ A 'write' operation (copy+xor) is already in flight.
* 2/ A 'check' operation is in flight, as it may clobber the parity
* block.
*/
if (s.to_write && !sh->reconstruct_state && !sh->check_state)
- handle_stripe_dirtying6(conf, sh, &s, &r6s, disks);
+ handle_stripe_dirtying(conf, sh, &s, disks);
/* maybe we need to check and possibly fix the parity for this stripe
* Any reads will already have been scheduled, so we just see if enough
@@ -3494,20 +3252,24 @@ static void handle_stripe6(struct stripe_head *sh)
if (sh->check_state ||
(s.syncing && s.locked == 0 &&
!test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
- !test_bit(STRIPE_INSYNC, &sh->state)))
- handle_parity_checks6(conf, sh, &s, &r6s, disks);
+ !test_bit(STRIPE_INSYNC, &sh->state))) {
+ if (conf->level == 6)
+ handle_parity_checks6(conf, sh, &s, disks);
+ else
+ handle_parity_checks5(conf, sh, &s, disks);
+ }
if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
- md_done_sync(conf->mddev, STRIPE_SECTORS,1);
+ md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
clear_bit(STRIPE_SYNCING, &sh->state);
}
/* If the failed drives are just a ReadError, then we might need
* to progress the repair/check process
*/
- if (s.failed <= 2 && !conf->mddev->ro)
+ if (s.failed <= conf->max_degraded && !conf->mddev->ro)
for (i = 0; i < s.failed; i++) {
- dev = &sh->dev[r6s.failed_num[i]];
+ struct r5dev *dev = &sh->dev[s.failed_num[i]];
if (test_bit(R5_ReadError, &dev->flags)
&& !test_bit(R5_LOCKED, &dev->flags)
&& test_bit(R5_UPTODATE, &dev->flags)
@@ -3526,8 +3288,26 @@ static void handle_stripe6(struct stripe_head *sh)
}
}
+
/* Finish reconstruct operations initiated by the expansion process */
if (sh->reconstruct_state == reconstruct_state_result) {
+ struct stripe_head *sh_src
+ = get_active_stripe(conf, sh->sector, 1, 1, 1);
+ if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
+ /* sh cannot be written until sh_src has been read.
+ * so arrange for sh to be delayed a little
+ */
+ set_bit(STRIPE_DELAYED, &sh->state);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
+ &sh_src->state))
+ atomic_inc(&conf->preread_active_stripes);
+ release_stripe(sh_src);
+ goto finish;
+ }
+ if (sh_src)
+ release_stripe(sh_src);
+
sh->reconstruct_state = reconstruct_state_idle;
clear_bit(STRIPE_EXPANDING, &sh->state);
for (i = conf->raid_disks; i--; ) {
@@ -3539,24 +3319,7 @@ static void handle_stripe6(struct stripe_head *sh)
if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
!sh->reconstruct_state) {
- struct stripe_head *sh2
- = get_active_stripe(conf, sh->sector, 1, 1, 1);
- if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
- /* sh cannot be written until sh2 has been read.
- * so arrange for sh to be delayed a little
- */
- set_bit(STRIPE_DELAYED, &sh->state);
- set_bit(STRIPE_HANDLE, &sh->state);
- if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
- &sh2->state))
- atomic_inc(&conf->preread_active_stripes);
- release_stripe(sh2);
- goto unlock;
- }
- if (sh2)
- release_stripe(sh2);
-
- /* Need to write out all blocks after computing P&Q */
+ /* Need to write out all blocks after computing parity */
sh->disks = conf->raid_disks;
stripe_set_idx(sh->sector, conf, 0, sh);
schedule_reconstruction(sh, &s, 1, 1);
@@ -3569,22 +3332,39 @@ static void handle_stripe6(struct stripe_head *sh)
if (s.expanding && s.locked == 0 &&
!test_bit(STRIPE_COMPUTE_RUN, &sh->state))
- handle_stripe_expansion(conf, sh, &r6s);
-
- unlock:
- spin_unlock(&sh->lock);
+ handle_stripe_expansion(conf, sh);
+finish:
/* wait for this device to become unblocked */
- if (unlikely(blocked_rdev))
- md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
+ if (conf->mddev->external && unlikely(s.blocked_rdev))
+ md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
+
+ if (s.handle_bad_blocks)
+ for (i = disks; i--; ) {
+ mdk_rdev_t *rdev;
+ struct r5dev *dev = &sh->dev[i];
+ if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
+ /* We own a safe reference to the rdev */
+ rdev = conf->disks[i].rdev;
+ if (!rdev_set_badblocks(rdev, sh->sector,
+ STRIPE_SECTORS, 0))
+ md_error(conf->mddev, rdev);
+ rdev_dec_pending(rdev, conf->mddev);
+ }
+ if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
+ rdev = conf->disks[i].rdev;
+ rdev_clear_badblocks(rdev, sh->sector,
+ STRIPE_SECTORS);
+ rdev_dec_pending(rdev, conf->mddev);
+ }
+ }
if (s.ops_request)
raid_run_ops(sh, s.ops_request);
ops_run_io(sh, &s);
-
- if (dec_preread_active) {
+ if (s.dec_preread_active) {
/* We delay this until after ops_run_io so that if make_request
* is waiting on a flush, it won't continue until the writes
* have actually been submitted.
@@ -3595,15 +3375,9 @@ static void handle_stripe6(struct stripe_head *sh)
md_wakeup_thread(conf->mddev->thread);
}
- return_io(return_bi);
-}
+ return_io(s.return_bi);
-static void handle_stripe(struct stripe_head *sh)
-{
- if (sh->raid_conf->level == 6)
- handle_stripe6(sh);
- else
- handle_stripe5(sh);
+ clear_bit(STRIPE_ACTIVE, &sh->state);
}
static void raid5_activate_delayed(raid5_conf_t *conf)
@@ -3833,6 +3607,9 @@ static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio)
rcu_read_lock();
rdev = rcu_dereference(conf->disks[dd_idx].rdev);
if (rdev && test_bit(In_sync, &rdev->flags)) {
+ sector_t first_bad;
+ int bad_sectors;
+
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
raid_bio->bi_next = (void*)rdev;
@@ -3840,8 +3617,10 @@ static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio)
align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
align_bi->bi_sector += rdev->data_offset;
- if (!bio_fits_rdev(align_bi)) {
- /* too big in some way */
+ if (!bio_fits_rdev(align_bi) ||
+ is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
+ &first_bad, &bad_sectors)) {
+ /* too big in some way, or has a known bad block */
bio_put(align_bi);
rdev_dec_pending(rdev, mddev);
return 0;
@@ -4016,7 +3795,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
}
}
- if (bio_data_dir(bi) == WRITE &&
+ if (rw == WRITE &&
logical_sector >= mddev->suspend_lo &&
logical_sector < mddev->suspend_hi) {
release_stripe(sh);
@@ -4034,7 +3813,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
}
if (test_bit(STRIPE_EXPANDING, &sh->state) ||
- !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
+ !add_stripe_bio(sh, bi, dd_idx, rw)) {
/* Stripe is busy expanding or
* add failed due to overlap. Flush everything
* and wait a while
@@ -4375,10 +4154,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
- spin_lock(&sh->lock);
- set_bit(STRIPE_SYNCING, &sh->state);
- clear_bit(STRIPE_INSYNC, &sh->state);
- spin_unlock(&sh->lock);
+ set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
handle_stripe(sh);
release_stripe(sh);
@@ -4509,6 +4285,9 @@ static void raid5d(mddev_t *mddev)
release_stripe(sh);
cond_resched();
+ if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+ md_check_recovery(mddev);
+
spin_lock_irq(&conf->device_lock);
}
pr_debug("%d stripes handled\n", handled);
@@ -5313,6 +5092,7 @@ static int raid5_remove_disk(mddev_t *mddev, int number)
* isn't possible.
*/
if (!test_bit(Faulty, &rdev->flags) &&
+ mddev->recovery_disabled != conf->recovery_disabled &&
!has_failed(conf) &&
number < conf->raid_disks) {
err = -EBUSY;
@@ -5341,6 +5121,9 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
int first = 0;
int last = conf->raid_disks - 1;
+ if (mddev->recovery_disabled == conf->recovery_disabled)
+ return -EBUSY;
+
if (has_failed(conf))
/* no point adding a device */
return -EINVAL;
@@ -5519,16 +5302,14 @@ static int raid5_start_reshape(mddev_t *mddev)
if (rdev->raid_disk < 0 &&
!test_bit(Faulty, &rdev->flags)) {
if (raid5_add_disk(mddev, rdev) == 0) {
- char nm[20];
if (rdev->raid_disk
>= conf->previous_raid_disks) {
set_bit(In_sync, &rdev->flags);
added_devices++;
} else
rdev->recovery_offset = 0;
- sprintf(nm, "rd%d", rdev->raid_disk);
- if (sysfs_create_link(&mddev->kobj,
- &rdev->kobj, nm))
+
+ if (sysfs_link_rdev(mddev, rdev))
/* Failure here is OK */;
}
} else if (rdev->raid_disk >= conf->previous_raid_disks
@@ -5624,9 +5405,7 @@ static void raid5_finish_reshape(mddev_t *mddev)
d++) {
mdk_rdev_t *rdev = conf->disks[d].rdev;
if (rdev && raid5_remove_disk(mddev, d) == 0) {
- char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
+ sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = -1;
}
}
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 3ca77a2..11b9566 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -6,11 +6,11 @@
/*
*
- * Each stripe contains one buffer per disc. Each buffer can be in
+ * Each stripe contains one buffer per device. Each buffer can be in
* one of a number of states stored in "flags". Changes between
- * these states happen *almost* exclusively under a per-stripe
- * spinlock. Some very specific changes can happen in bi_end_io, and
- * these are not protected by the spin lock.
+ * these states happen *almost* exclusively under the protection of the
+ * STRIPE_ACTIVE flag. Some very specific changes can happen in bi_end_io, and
+ * these are not protected by STRIPE_ACTIVE.
*
* The flag bits that are used to represent these states are:
* R5_UPTODATE and R5_LOCKED
@@ -76,12 +76,10 @@
* block and the cached buffer are successfully written, any buffer on
* a written list can be returned with b_end_io.
*
- * The write list and read list both act as fifos. The read list is
- * protected by the device_lock. The write and written lists are
- * protected by the stripe lock. The device_lock, which can be
- * claimed while the stipe lock is held, is only for list
- * manipulations and will only be held for a very short time. It can
- * be claimed from interrupts.
+ * The write list and read list both act as fifos. The read list,
+ * write list and written list are protected by the device_lock.
+ * The device_lock is only for list manipulations and will only be
+ * held for a very short time. It can be claimed from interrupts.
*
*
* Stripes in the stripe cache can be on one of two lists (or on
@@ -96,7 +94,6 @@
*
* The inactive_list, handle_list and hash bucket lists are all protected by the
* device_lock.
- * - stripes on the inactive_list never have their stripe_lock held.
* - stripes have a reference counter. If count==0, they are on a list.
* - If a stripe might need handling, STRIPE_HANDLE is set.
* - When refcount reaches zero, then if STRIPE_HANDLE it is put on
@@ -116,10 +113,10 @@
* attach a request to an active stripe (add_stripe_bh())
* lockdev attach-buffer unlockdev
* handle a stripe (handle_stripe())
- * lockstripe clrSTRIPE_HANDLE ...
+ * setSTRIPE_ACTIVE, clrSTRIPE_HANDLE ...
* (lockdev check-buffers unlockdev) ..
* change-state ..
- * record io/ops needed unlockstripe schedule io/ops
+ * record io/ops needed clearSTRIPE_ACTIVE schedule io/ops
* release an active stripe (release_stripe())
* lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
*
@@ -128,8 +125,7 @@
* on a cached buffer, and plus one if the stripe is undergoing stripe
* operations.
*
- * Stripe operations are performed outside the stripe lock,
- * the stripe operations are:
+ * The stripe operations are:
* -copying data between the stripe cache and user application buffers
* -computing blocks to save a disk access, or to recover a missing block
* -updating the parity on a write operation (reconstruct write and
@@ -159,7 +155,8 @@
*/
/*
- * Operations state - intermediate states that are visible outside of sh->lock
+ * Operations state - intermediate states that are visible outside of
+ * STRIPE_ACTIVE.
* In general _idle indicates nothing is running, _run indicates a data
* processing operation is active, and _result means the data processing result
* is stable and can be acted upon. For simple operations like biofill and
@@ -209,7 +206,6 @@ struct stripe_head {
short ddf_layout;/* use DDF ordering to calculate Q */
unsigned long state; /* state flags */
atomic_t count; /* nr of active thread/requests */
- spinlock_t lock;
int bm_seq; /* sequence number for bitmap flushes */
int disks; /* disks in stripe */
enum check_states check_state;
@@ -240,19 +236,20 @@ struct stripe_head {
};
/* stripe_head_state - collects and tracks the dynamic state of a stripe_head
- * for handle_stripe. It is only valid under spin_lock(sh->lock);
+ * for handle_stripe.
*/
struct stripe_head_state {
int syncing, expanding, expanded;
int locked, uptodate, to_read, to_write, failed, written;
int to_fill, compute, req_compute, non_overwrite;
- int failed_num;
+ int failed_num[2];
+ int p_failed, q_failed;
+ int dec_preread_active;
unsigned long ops_request;
-};
-/* r6_state - extra state data only relevant to r6 */
-struct r6_state {
- int p_failed, q_failed, failed_num[2];
+ struct bio *return_bi;
+ mdk_rdev_t *blocked_rdev;
+ int handle_bad_blocks;
};
/* Flags */
@@ -268,14 +265,16 @@ struct r6_state {
#define R5_ReWrite 9 /* have tried to over-write the readerror */
#define R5_Expanded 10 /* This block now has post-expand data */
-#define R5_Wantcompute 11 /* compute_block in progress treat as
- * uptodate
- */
-#define R5_Wantfill 12 /* dev->toread contains a bio that needs
- * filling
- */
-#define R5_Wantdrain 13 /* dev->towrite needs to be drained */
-#define R5_WantFUA 14 /* Write should be FUA */
+#define R5_Wantcompute 11 /* compute_block in progress treat as
+ * uptodate
+ */
+#define R5_Wantfill 12 /* dev->toread contains a bio that needs
+ * filling
+ */
+#define R5_Wantdrain 13 /* dev->towrite needs to be drained */
+#define R5_WantFUA 14 /* Write should be FUA */
+#define R5_WriteError 15 /* got a write error - need to record it */
+#define R5_MadeGood 16 /* A bad block has been fixed by writing to it*/
/*
* Write method
*/
@@ -289,21 +288,25 @@ struct r6_state {
/*
* Stripe state
*/
-#define STRIPE_HANDLE 2
-#define STRIPE_SYNCING 3
-#define STRIPE_INSYNC 4
-#define STRIPE_PREREAD_ACTIVE 5
-#define STRIPE_DELAYED 6
-#define STRIPE_DEGRADED 7
-#define STRIPE_BIT_DELAY 8
-#define STRIPE_EXPANDING 9
-#define STRIPE_EXPAND_SOURCE 10
-#define STRIPE_EXPAND_READY 11
-#define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */
-#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */
-#define STRIPE_BIOFILL_RUN 14
-#define STRIPE_COMPUTE_RUN 15
-#define STRIPE_OPS_REQ_PENDING 16
+enum {
+ STRIPE_ACTIVE,
+ STRIPE_HANDLE,
+ STRIPE_SYNC_REQUESTED,
+ STRIPE_SYNCING,
+ STRIPE_INSYNC,
+ STRIPE_PREREAD_ACTIVE,
+ STRIPE_DELAYED,
+ STRIPE_DEGRADED,
+ STRIPE_BIT_DELAY,
+ STRIPE_EXPANDING,
+ STRIPE_EXPAND_SOURCE,
+ STRIPE_EXPAND_READY,
+ STRIPE_IO_STARTED, /* do not count towards 'bypass_count' */
+ STRIPE_FULL_WRITE, /* all blocks are set to be overwritten */
+ STRIPE_BIOFILL_RUN,
+ STRIPE_COMPUTE_RUN,
+ STRIPE_OPS_REQ_PENDING,
+};
/*
* Operation request flags
@@ -336,7 +339,7 @@ struct r6_state {
* PREREAD_ACTIVE.
* In stripe_handle, if we find pre-reading is necessary, we do it if
* PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
- * HANDLE gets cleared if stripe_handle leave nothing locked.
+ * HANDLE gets cleared if stripe_handle leaves nothing locked.
*/
@@ -399,7 +402,7 @@ struct raid5_private_data {
* (fresh device added).
* Cleared when a sync completes.
*/
-
+ int recovery_disabled;
/* per cpu variables */
struct raid5_percpu {
struct page *spare_page; /* Used when checking P/Q in raid6 */
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 6995940..9575db4 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -68,7 +68,6 @@ config VIDEO_V4L2_SUBDEV_API
config DVB_CORE
tristate "DVB for Linux"
- depends on NET && INET
select CRC32
help
DVB core utility functions for device handling, software fallbacks etc.
@@ -85,6 +84,19 @@ config DVB_CORE
If unsure say N.
+config DVB_NET
+ bool "DVB Network Support"
+ default (NET && INET)
+ depends on NET && INET && DVB_CORE
+ help
+ This option enables DVB Network Support which is a part of the DVB
+ standard. It is used, for example, by automatic firmware updates used
+ on Set-Top-Boxes. It can also be used to access the Internet via the
+ DVB card, if the network provider supports it.
+
+ You may want to disable the network support on embedded devices. If
+ unsure say Y.
+
config VIDEO_MEDIA
tristate
default (DVB_CORE && (VIDEO_DEV = n)) || (VIDEO_DEV && (DVB_CORE = n)) || (DVB_CORE && VIDEO_DEV)
diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig
index 22d3ca3..996302a 100644
--- a/drivers/media/common/tuners/Kconfig
+++ b/drivers/media/common/tuners/Kconfig
@@ -23,6 +23,7 @@ config MEDIA_TUNER
depends on VIDEO_MEDIA && I2C
select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_XC5000 if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_XC4000 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_MT20XX if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_TDA8290 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_TEA5761 if !MEDIA_TUNER_CUSTOMISE
@@ -152,6 +153,15 @@ config MEDIA_TUNER_XC5000
This device is only used inside a SiP called together with a
demodulator for now.
+config MEDIA_TUNER_XC4000
+ tristate "Xceive XC4000 silicon tuner"
+ depends on VIDEO_MEDIA && I2C
+ default m if MEDIA_TUNER_CUSTOMISE
+ help
+ A driver for the silicon tuner XC4000 from Xceive.
+ This device is only used inside a SiP called together with a
+ demodulator for now.
+
config MEDIA_TUNER_MXL5005S
tristate "MaxLinear MSL5005S silicon tuner"
depends on VIDEO_MEDIA && I2C
diff --git a/drivers/media/common/tuners/Makefile b/drivers/media/common/tuners/Makefile
index 2cb4f53..20d24fc 100644
--- a/drivers/media/common/tuners/Makefile
+++ b/drivers/media/common/tuners/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_MEDIA_TUNER_TDA9887) += tda9887.o
obj-$(CONFIG_MEDIA_TUNER_TDA827X) += tda827x.o
obj-$(CONFIG_MEDIA_TUNER_TDA18271) += tda18271.o
obj-$(CONFIG_MEDIA_TUNER_XC5000) += xc5000.o
+obj-$(CONFIG_MEDIA_TUNER_XC4000) += xc4000.o
obj-$(CONFIG_MEDIA_TUNER_MT2060) += mt2060.o
obj-$(CONFIG_MEDIA_TUNER_MT2266) += mt2266.o
obj-$(CONFIG_MEDIA_TUNER_QT1010) += qt1010.o
diff --git a/drivers/media/common/tuners/tuner-types.c b/drivers/media/common/tuners/tuner-types.c
index afba6dc..94a603a 100644
--- a/drivers/media/common/tuners/tuner-types.c
+++ b/drivers/media/common/tuners/tuner-types.c
@@ -1805,6 +1805,10 @@ struct tunertype tuners[] = {
.name = "Xceive 5000 tuner",
/* see xc5000.c for details */
},
+ [TUNER_XC4000] = { /* Xceive 4000 */
+ .name = "Xceive 4000 tuner",
+ /* see xc4000.c for details */
+ },
[TUNER_TCL_MF02GIP_5N] = { /* TCL tuner MF02GIP-5N-E */
.name = "TCL tuner MF02GIP-5N-E",
.params = tuner_tcl_mf02gip_5n_params,
diff --git a/drivers/media/common/tuners/xc4000.c b/drivers/media/common/tuners/xc4000.c
new file mode 100644
index 0000000..634f4d9
--- /dev/null
+++ b/drivers/media/common/tuners/xc4000.c
@@ -0,0 +1,1691 @@
+/*
+ * Driver for Xceive XC4000 "QAM/8VSB single chip tuner"
+ *
+ * Copyright (c) 2007 Xceive Corporation
+ * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org>
+ * Copyright (c) 2009 Devin Heitmueller <dheitmueller@kernellabs.com>
+ * Copyright (c) 2009 Davide Ferri <d.ferri@zero11.it>
+ * Copyright (c) 2010 Istvan Varga <istvan_v@mailbox.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/videodev2.h>
+#include <linux/delay.h>
+#include <linux/dvb/frontend.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <asm/unaligned.h>
+
+#include "dvb_frontend.h"
+
+#include "xc4000.h"
+#include "tuner-i2c.h"
+#include "tuner-xc2028-types.h"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debugging level (0 to 2, default: 0 (off)).");
+
+static int no_poweroff;
+module_param(no_poweroff, int, 0644);
+MODULE_PARM_DESC(no_poweroff, "Power management (1: disabled, 2: enabled, "
+ "0 (default): use device-specific default mode).");
+
+static int audio_std;
+module_param(audio_std, int, 0644);
+MODULE_PARM_DESC(audio_std, "Audio standard. XC4000 audio decoder explicitly "
+ "needs to know what audio standard is needed for some video standards "
+ "with audio A2 or NICAM. The valid settings are a sum of:\n"
+ " 1: use NICAM/B or A2/B instead of NICAM/A or A2/A\n"
+ " 2: use A2 instead of NICAM or BTSC\n"
+ " 4: use SECAM/K3 instead of K1\n"
+ " 8: use PAL-D/K audio for SECAM-D/K\n"
+ "16: use FM radio input 1 instead of input 2\n"
+ "32: use mono audio (the lower three bits are ignored)");
+
+static char firmware_name[30];
+module_param_string(firmware_name, firmware_name, sizeof(firmware_name), 0);
+MODULE_PARM_DESC(firmware_name, "Firmware file name. Allows overriding the "
+ "default firmware name.");
+
+static DEFINE_MUTEX(xc4000_list_mutex);
+static LIST_HEAD(hybrid_tuner_instance_list);
+
+#define dprintk(level, fmt, arg...) if (debug >= level) \
+ printk(KERN_INFO "%s: " fmt, "xc4000", ## arg)
+
+/* struct for storing firmware table */
+struct firmware_description {
+ unsigned int type;
+ v4l2_std_id id;
+ __u16 int_freq;
+ unsigned char *ptr;
+ unsigned int size;
+};
+
+struct firmware_properties {
+ unsigned int type;
+ v4l2_std_id id;
+ v4l2_std_id std_req;
+ __u16 int_freq;
+ unsigned int scode_table;
+ int scode_nr;
+};
+
+struct xc4000_priv {
+ struct tuner_i2c_props i2c_props;
+ struct list_head hybrid_tuner_instance_list;
+ struct firmware_description *firm;
+ int firm_size;
+ u32 if_khz;
+ u32 freq_hz;
+ u32 bandwidth;
+ u8 video_standard;
+ u8 rf_mode;
+ u8 default_pm;
+ u8 dvb_amplitude;
+ u8 set_smoothedcvbs;
+ u8 ignore_i2c_write_errors;
+ __u16 firm_version;
+ struct firmware_properties cur_fw;
+ __u16 hwmodel;
+ __u16 hwvers;
+ struct mutex lock;
+};
+
+#define XC4000_AUDIO_STD_B 1
+#define XC4000_AUDIO_STD_A2 2
+#define XC4000_AUDIO_STD_K3 4
+#define XC4000_AUDIO_STD_L 8
+#define XC4000_AUDIO_STD_INPUT1 16
+#define XC4000_AUDIO_STD_MONO 32
+
+#define XC4000_DEFAULT_FIRMWARE "dvb-fe-xc4000-1.4.fw"
+
+/* Misc Defines */
+#define MAX_TV_STANDARD 24
+#define XC_MAX_I2C_WRITE_LENGTH 64
+#define XC_POWERED_DOWN 0x80000000U
+
+/* Signal Types */
+#define XC_RF_MODE_AIR 0
+#define XC_RF_MODE_CABLE 1
+
+/* Product id */
+#define XC_PRODUCT_ID_FW_NOT_LOADED 0x2000
+#define XC_PRODUCT_ID_XC4000 0x0FA0
+#define XC_PRODUCT_ID_XC4100 0x1004
+
+/* Registers (Write-only) */
+#define XREG_INIT 0x00
+#define XREG_VIDEO_MODE 0x01
+#define XREG_AUDIO_MODE 0x02
+#define XREG_RF_FREQ 0x03
+#define XREG_D_CODE 0x04
+#define XREG_DIRECTSITTING_MODE 0x05
+#define XREG_SEEK_MODE 0x06
+#define XREG_POWER_DOWN 0x08
+#define XREG_SIGNALSOURCE 0x0A
+#define XREG_SMOOTHEDCVBS 0x0E
+#define XREG_AMPLITUDE 0x10
+
+/* Registers (Read-only) */
+#define XREG_ADC_ENV 0x00
+#define XREG_QUALITY 0x01
+#define XREG_FRAME_LINES 0x02
+#define XREG_HSYNC_FREQ 0x03
+#define XREG_LOCK 0x04
+#define XREG_FREQ_ERROR 0x05
+#define XREG_SNR 0x06
+#define XREG_VERSION 0x07
+#define XREG_PRODUCT_ID 0x08
+
+/*
+ Basic firmware description. This will remain with
+ the driver for documentation purposes.
+
+ This represents an I2C firmware file encoded as a
+ string of unsigned char. Format is as follows:
+
+ char[0 ]=len0_MSB -> len = len_MSB * 256 + len_LSB
+ char[1 ]=len0_LSB -> length of first write transaction
+ char[2 ]=data0 -> first byte to be sent
+ char[3 ]=data1
+ char[4 ]=data2
+ char[ ]=...
+ char[M ]=dataN -> last byte to be sent
+ char[M+1]=len1_MSB -> len = len_MSB * 256 + len_LSB
+ char[M+2]=len1_LSB -> length of second write transaction
+ char[M+3]=data0
+ char[M+4]=data1
+ ...
+ etc.
+
+ The [len] value should be interpreted as follows:
+
+ len= len_MSB _ len_LSB
+ len=1111_1111_1111_1111 : End of I2C_SEQUENCE
+ len=0000_0000_0000_0000 : Reset command: Do hardware reset
+ len=0NNN_NNNN_NNNN_NNNN : Normal transaction: number of bytes = {1:32767)
+ len=1WWW_WWWW_WWWW_WWWW : Wait command: wait for {1:32767} ms
+
+ For the RESET and WAIT commands, the two following bytes will contain
+ immediately the length of the following transaction.
+*/
+
+struct XC_TV_STANDARD {
+ const char *Name;
+ u16 audio_mode;
+ u16 video_mode;
+ u16 int_freq;
+};
+
+/* Tuner standards */
+#define XC4000_MN_NTSC_PAL_BTSC 0
+#define XC4000_MN_NTSC_PAL_A2 1
+#define XC4000_MN_NTSC_PAL_EIAJ 2
+#define XC4000_MN_NTSC_PAL_Mono 3
+#define XC4000_BG_PAL_A2 4
+#define XC4000_BG_PAL_NICAM 5
+#define XC4000_BG_PAL_MONO 6
+#define XC4000_I_PAL_NICAM 7
+#define XC4000_I_PAL_NICAM_MONO 8
+#define XC4000_DK_PAL_A2 9
+#define XC4000_DK_PAL_NICAM 10
+#define XC4000_DK_PAL_MONO 11
+#define XC4000_DK_SECAM_A2DK1 12
+#define XC4000_DK_SECAM_A2LDK3 13
+#define XC4000_DK_SECAM_A2MONO 14
+#define XC4000_DK_SECAM_NICAM 15
+#define XC4000_L_SECAM_NICAM 16
+#define XC4000_LC_SECAM_NICAM 17
+#define XC4000_DTV6 18
+#define XC4000_DTV8 19
+#define XC4000_DTV7_8 20
+#define XC4000_DTV7 21
+#define XC4000_FM_Radio_INPUT2 22
+#define XC4000_FM_Radio_INPUT1 23
+
+static struct XC_TV_STANDARD xc4000_standard[MAX_TV_STANDARD] = {
+ {"M/N-NTSC/PAL-BTSC", 0x0000, 0x80A0, 4500},
+ {"M/N-NTSC/PAL-A2", 0x0000, 0x80A0, 4600},
+ {"M/N-NTSC/PAL-EIAJ", 0x0040, 0x80A0, 4500},
+ {"M/N-NTSC/PAL-Mono", 0x0078, 0x80A0, 4500},
+ {"B/G-PAL-A2", 0x0000, 0x8159, 5640},
+ {"B/G-PAL-NICAM", 0x0004, 0x8159, 5740},
+ {"B/G-PAL-MONO", 0x0078, 0x8159, 5500},
+ {"I-PAL-NICAM", 0x0080, 0x8049, 6240},
+ {"I-PAL-NICAM-MONO", 0x0078, 0x8049, 6000},
+ {"D/K-PAL-A2", 0x0000, 0x8049, 6380},
+ {"D/K-PAL-NICAM", 0x0080, 0x8049, 6200},
+ {"D/K-PAL-MONO", 0x0078, 0x8049, 6500},
+ {"D/K-SECAM-A2 DK1", 0x0000, 0x8049, 6340},
+ {"D/K-SECAM-A2 L/DK3", 0x0000, 0x8049, 6000},
+ {"D/K-SECAM-A2 MONO", 0x0078, 0x8049, 6500},
+ {"D/K-SECAM-NICAM", 0x0080, 0x8049, 6200},
+ {"L-SECAM-NICAM", 0x8080, 0x0009, 6200},
+ {"L'-SECAM-NICAM", 0x8080, 0x4009, 6200},
+ {"DTV6", 0x00C0, 0x8002, 0},
+ {"DTV8", 0x00C0, 0x800B, 0},
+ {"DTV7/8", 0x00C0, 0x801B, 0},
+ {"DTV7", 0x00C0, 0x8007, 0},
+ {"FM Radio-INPUT2", 0x0008, 0x9800, 10700},
+ {"FM Radio-INPUT1", 0x0008, 0x9000, 10700}
+};
+
+static int xc4000_readreg(struct xc4000_priv *priv, u16 reg, u16 *val);
+static int xc4000_tuner_reset(struct dvb_frontend *fe);
+static void xc_debug_dump(struct xc4000_priv *priv);
+
+static int xc_send_i2c_data(struct xc4000_priv *priv, u8 *buf, int len)
+{
+ struct i2c_msg msg = { .addr = priv->i2c_props.addr,
+ .flags = 0, .buf = buf, .len = len };
+ if (i2c_transfer(priv->i2c_props.adap, &msg, 1) != 1) {
+ if (priv->ignore_i2c_write_errors == 0) {
+ printk(KERN_ERR "xc4000: I2C write failed (len=%i)\n",
+ len);
+ if (len == 4) {
+ printk(KERN_ERR "bytes %02x %02x %02x %02x\n", buf[0],
+ buf[1], buf[2], buf[3]);
+ }
+ return -EREMOTEIO;
+ }
+ }
+ return 0;
+}
+
+static int xc4000_tuner_reset(struct dvb_frontend *fe)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ int ret;
+
+ dprintk(1, "%s()\n", __func__);
+
+ if (fe->callback) {
+ ret = fe->callback(((fe->dvb) && (fe->dvb->priv)) ?
+ fe->dvb->priv :
+ priv->i2c_props.adap->algo_data,
+ DVB_FRONTEND_COMPONENT_TUNER,
+ XC4000_TUNER_RESET, 0);
+ if (ret) {
+ printk(KERN_ERR "xc4000: reset failed\n");
+ return -EREMOTEIO;
+ }
+ } else {
+ printk(KERN_ERR "xc4000: no tuner reset callback function, "
+ "fatal\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int xc_write_reg(struct xc4000_priv *priv, u16 regAddr, u16 i2cData)
+{
+ u8 buf[4];
+ int result;
+
+ buf[0] = (regAddr >> 8) & 0xFF;
+ buf[1] = regAddr & 0xFF;
+ buf[2] = (i2cData >> 8) & 0xFF;
+ buf[3] = i2cData & 0xFF;
+ result = xc_send_i2c_data(priv, buf, 4);
+
+ return result;
+}
+
+static int xc_load_i2c_sequence(struct dvb_frontend *fe, const u8 *i2c_sequence)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+
+ int i, nbytes_to_send, result;
+ unsigned int len, pos, index;
+ u8 buf[XC_MAX_I2C_WRITE_LENGTH];
+
+ index = 0;
+ while ((i2c_sequence[index] != 0xFF) ||
+ (i2c_sequence[index + 1] != 0xFF)) {
+ len = i2c_sequence[index] * 256 + i2c_sequence[index+1];
+ if (len == 0x0000) {
+ /* RESET command */
+ /* NOTE: this is ignored, as the reset callback was */
+ /* already called by check_firmware() */
+ index += 2;
+ } else if (len & 0x8000) {
+ /* WAIT command */
+ msleep(len & 0x7FFF);
+ index += 2;
+ } else {
+ /* Send i2c data whilst ensuring individual transactions
+ * do not exceed XC_MAX_I2C_WRITE_LENGTH bytes.
+ */
+ index += 2;
+ buf[0] = i2c_sequence[index];
+ buf[1] = i2c_sequence[index + 1];
+ pos = 2;
+ while (pos < len) {
+ if ((len - pos) > XC_MAX_I2C_WRITE_LENGTH - 2)
+ nbytes_to_send =
+ XC_MAX_I2C_WRITE_LENGTH;
+ else
+ nbytes_to_send = (len - pos + 2);
+ for (i = 2; i < nbytes_to_send; i++) {
+ buf[i] = i2c_sequence[index + pos +
+ i - 2];
+ }
+ result = xc_send_i2c_data(priv, buf,
+ nbytes_to_send);
+
+ if (result != 0)
+ return result;
+
+ pos += nbytes_to_send - 2;
+ }
+ index += len;
+ }
+ }
+ return 0;
+}
+
+static int xc_set_tv_standard(struct xc4000_priv *priv,
+ u16 video_mode, u16 audio_mode)
+{
+ int ret;
+ dprintk(1, "%s(0x%04x,0x%04x)\n", __func__, video_mode, audio_mode);
+ dprintk(1, "%s() Standard = %s\n",
+ __func__,
+ xc4000_standard[priv->video_standard].Name);
+
+ /* Don't complain when the request fails because of i2c stretching */
+ priv->ignore_i2c_write_errors = 1;
+
+ ret = xc_write_reg(priv, XREG_VIDEO_MODE, video_mode);
+ if (ret == 0)
+ ret = xc_write_reg(priv, XREG_AUDIO_MODE, audio_mode);
+
+ priv->ignore_i2c_write_errors = 0;
+
+ return ret;
+}
+
+static int xc_set_signal_source(struct xc4000_priv *priv, u16 rf_mode)
+{
+ dprintk(1, "%s(%d) Source = %s\n", __func__, rf_mode,
+ rf_mode == XC_RF_MODE_AIR ? "ANTENNA" : "CABLE");
+
+ if ((rf_mode != XC_RF_MODE_AIR) && (rf_mode != XC_RF_MODE_CABLE)) {
+ rf_mode = XC_RF_MODE_CABLE;
+ printk(KERN_ERR
+ "%s(), Invalid mode, defaulting to CABLE",
+ __func__);
+ }
+ return xc_write_reg(priv, XREG_SIGNALSOURCE, rf_mode);
+}
+
+static const struct dvb_tuner_ops xc4000_tuner_ops;
+
+static int xc_set_rf_frequency(struct xc4000_priv *priv, u32 freq_hz)
+{
+ u16 freq_code;
+
+ dprintk(1, "%s(%u)\n", __func__, freq_hz);
+
+ if ((freq_hz > xc4000_tuner_ops.info.frequency_max) ||
+ (freq_hz < xc4000_tuner_ops.info.frequency_min))
+ return -EINVAL;
+
+ freq_code = (u16)(freq_hz / 15625);
+
+ /* WAS: Starting in firmware version 1.1.44, Xceive recommends using the
+ FINERFREQ for all normal tuning (the doc indicates reg 0x03 should
+ only be used for fast scanning for channel lock) */
+ /* WAS: XREG_FINERFREQ */
+ return xc_write_reg(priv, XREG_RF_FREQ, freq_code);
+}
+
+static int xc_get_adc_envelope(struct xc4000_priv *priv, u16 *adc_envelope)
+{
+ return xc4000_readreg(priv, XREG_ADC_ENV, adc_envelope);
+}
+
+static int xc_get_frequency_error(struct xc4000_priv *priv, u32 *freq_error_hz)
+{
+ int result;
+ u16 regData;
+ u32 tmp;
+
+ result = xc4000_readreg(priv, XREG_FREQ_ERROR, &regData);
+ if (result != 0)
+ return result;
+
+ tmp = (u32)regData & 0xFFFFU;
+ tmp = (tmp < 0x8000U ? tmp : 0x10000U - tmp);
+ (*freq_error_hz) = tmp * 15625;
+ return result;
+}
+
+static int xc_get_lock_status(struct xc4000_priv *priv, u16 *lock_status)
+{
+ return xc4000_readreg(priv, XREG_LOCK, lock_status);
+}
+
+static int xc_get_version(struct xc4000_priv *priv,
+ u8 *hw_majorversion, u8 *hw_minorversion,
+ u8 *fw_majorversion, u8 *fw_minorversion)
+{
+ u16 data;
+ int result;
+
+ result = xc4000_readreg(priv, XREG_VERSION, &data);
+ if (result != 0)
+ return result;
+
+ (*hw_majorversion) = (data >> 12) & 0x0F;
+ (*hw_minorversion) = (data >> 8) & 0x0F;
+ (*fw_majorversion) = (data >> 4) & 0x0F;
+ (*fw_minorversion) = data & 0x0F;
+
+ return 0;
+}
+
+static int xc_get_hsync_freq(struct xc4000_priv *priv, u32 *hsync_freq_hz)
+{
+ u16 regData;
+ int result;
+
+ result = xc4000_readreg(priv, XREG_HSYNC_FREQ, &regData);
+ if (result != 0)
+ return result;
+
+ (*hsync_freq_hz) = ((regData & 0x0fff) * 763)/100;
+ return result;
+}
+
+static int xc_get_frame_lines(struct xc4000_priv *priv, u16 *frame_lines)
+{
+ return xc4000_readreg(priv, XREG_FRAME_LINES, frame_lines);
+}
+
+static int xc_get_quality(struct xc4000_priv *priv, u16 *quality)
+{
+ return xc4000_readreg(priv, XREG_QUALITY, quality);
+}
+
+static u16 xc_wait_for_lock(struct xc4000_priv *priv)
+{
+ u16 lock_state = 0;
+ int watchdog_count = 40;
+
+ while ((lock_state == 0) && (watchdog_count > 0)) {
+ xc_get_lock_status(priv, &lock_state);
+ if (lock_state != 1) {
+ msleep(5);
+ watchdog_count--;
+ }
+ }
+ return lock_state;
+}
+
+static int xc_tune_channel(struct xc4000_priv *priv, u32 freq_hz)
+{
+ int found = 1;
+ int result;
+
+ dprintk(1, "%s(%u)\n", __func__, freq_hz);
+
+ /* Don't complain when the request fails because of i2c stretching */
+ priv->ignore_i2c_write_errors = 1;
+ result = xc_set_rf_frequency(priv, freq_hz);
+ priv->ignore_i2c_write_errors = 0;
+
+ if (result != 0)
+ return 0;
+
+ /* wait for lock only in analog TV mode */
+ if ((priv->cur_fw.type & (FM | DTV6 | DTV7 | DTV78 | DTV8)) == 0) {
+ if (xc_wait_for_lock(priv) != 1)
+ found = 0;
+ }
+
+ /* Wait for stats to stabilize.
+ * Frame Lines needs two frame times after initial lock
+ * before it is valid.
+ */
+ msleep(debug ? 100 : 10);
+
+ if (debug)
+ xc_debug_dump(priv);
+
+ return found;
+}
+
+static int xc4000_readreg(struct xc4000_priv *priv, u16 reg, u16 *val)
+{
+ u8 buf[2] = { reg >> 8, reg & 0xff };
+ u8 bval[2] = { 0, 0 };
+ struct i2c_msg msg[2] = {
+ { .addr = priv->i2c_props.addr,
+ .flags = 0, .buf = &buf[0], .len = 2 },
+ { .addr = priv->i2c_props.addr,
+ .flags = I2C_M_RD, .buf = &bval[0], .len = 2 },
+ };
+
+ if (i2c_transfer(priv->i2c_props.adap, msg, 2) != 2) {
+ printk(KERN_ERR "xc4000: I2C read failed\n");
+ return -EREMOTEIO;
+ }
+
+ *val = (bval[0] << 8) | bval[1];
+ return 0;
+}
+
+#define dump_firm_type(t) dump_firm_type_and_int_freq(t, 0)
+static void dump_firm_type_and_int_freq(unsigned int type, u16 int_freq)
+{
+ if (type & BASE)
+ printk(KERN_CONT "BASE ");
+ if (type & INIT1)
+ printk(KERN_CONT "INIT1 ");
+ if (type & F8MHZ)
+ printk(KERN_CONT "F8MHZ ");
+ if (type & MTS)
+ printk(KERN_CONT "MTS ");
+ if (type & D2620)
+ printk(KERN_CONT "D2620 ");
+ if (type & D2633)
+ printk(KERN_CONT "D2633 ");
+ if (type & DTV6)
+ printk(KERN_CONT "DTV6 ");
+ if (type & QAM)
+ printk(KERN_CONT "QAM ");
+ if (type & DTV7)
+ printk(KERN_CONT "DTV7 ");
+ if (type & DTV78)
+ printk(KERN_CONT "DTV78 ");
+ if (type & DTV8)
+ printk(KERN_CONT "DTV8 ");
+ if (type & FM)
+ printk(KERN_CONT "FM ");
+ if (type & INPUT1)
+ printk(KERN_CONT "INPUT1 ");
+ if (type & LCD)
+ printk(KERN_CONT "LCD ");
+ if (type & NOGD)
+ printk(KERN_CONT "NOGD ");
+ if (type & MONO)
+ printk(KERN_CONT "MONO ");
+ if (type & ATSC)
+ printk(KERN_CONT "ATSC ");
+ if (type & IF)
+ printk(KERN_CONT "IF ");
+ if (type & LG60)
+ printk(KERN_CONT "LG60 ");
+ if (type & ATI638)
+ printk(KERN_CONT "ATI638 ");
+ if (type & OREN538)
+ printk(KERN_CONT "OREN538 ");
+ if (type & OREN36)
+ printk(KERN_CONT "OREN36 ");
+ if (type & TOYOTA388)
+ printk(KERN_CONT "TOYOTA388 ");
+ if (type & TOYOTA794)
+ printk(KERN_CONT "TOYOTA794 ");
+ if (type & DIBCOM52)
+ printk(KERN_CONT "DIBCOM52 ");
+ if (type & ZARLINK456)
+ printk(KERN_CONT "ZARLINK456 ");
+ if (type & CHINA)
+ printk(KERN_CONT "CHINA ");
+ if (type & F6MHZ)
+ printk(KERN_CONT "F6MHZ ");
+ if (type & INPUT2)
+ printk(KERN_CONT "INPUT2 ");
+ if (type & SCODE)
+ printk(KERN_CONT "SCODE ");
+ if (type & HAS_IF)
+ printk(KERN_CONT "HAS_IF_%d ", int_freq);
+}
+
+static int seek_firmware(struct dvb_frontend *fe, unsigned int type,
+ v4l2_std_id *id)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ int i, best_i = -1;
+ unsigned int best_nr_diffs = 255U;
+
+ if (!priv->firm) {
+ printk(KERN_ERR "Error! firmware not loaded\n");
+ return -EINVAL;
+ }
+
+ if (((type & ~SCODE) == 0) && (*id == 0))
+ *id = V4L2_STD_PAL;
+
+ /* Seek for generic video standard match */
+ for (i = 0; i < priv->firm_size; i++) {
+ v4l2_std_id id_diff_mask =
+ (priv->firm[i].id ^ (*id)) & (*id);
+ unsigned int type_diff_mask =
+ (priv->firm[i].type ^ type)
+ & (BASE_TYPES | DTV_TYPES | LCD | NOGD | MONO | SCODE);
+ unsigned int nr_diffs;
+
+ if (type_diff_mask
+ & (BASE | INIT1 | FM | DTV6 | DTV7 | DTV78 | DTV8 | SCODE))
+ continue;
+
+ nr_diffs = hweight64(id_diff_mask) + hweight32(type_diff_mask);
+ if (!nr_diffs) /* Supports all the requested standards */
+ goto found;
+
+ if (nr_diffs < best_nr_diffs) {
+ best_nr_diffs = nr_diffs;
+ best_i = i;
+ }
+ }
+
+ /* FIXME: Would make sense to seek for type "hint" match ? */
+ if (best_i < 0) {
+ i = -ENOENT;
+ goto ret;
+ }
+
+ if (best_nr_diffs > 0U) {
+ printk(KERN_WARNING
+ "Selecting best matching firmware (%u bits differ) for "
+ "type=(%x), id %016llx:\n",
+ best_nr_diffs, type, (unsigned long long)*id);
+ i = best_i;
+ }
+
+found:
+ *id = priv->firm[i].id;
+
+ret:
+ if (debug) {
+ printk(KERN_DEBUG "%s firmware for type=",
+ (i < 0) ? "Can't find" : "Found");
+ dump_firm_type(type);
+ printk(KERN_DEBUG "(%x), id %016llx.\n", type, (unsigned long long)*id);
+ }
+ return i;
+}
+
+static int load_firmware(struct dvb_frontend *fe, unsigned int type,
+ v4l2_std_id *id)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ int pos, rc;
+ unsigned char *p;
+
+ pos = seek_firmware(fe, type, id);
+ if (pos < 0)
+ return pos;
+
+ p = priv->firm[pos].ptr;
+
+ /* Don't complain when the request fails because of i2c stretching */
+ priv->ignore_i2c_write_errors = 1;
+
+ rc = xc_load_i2c_sequence(fe, p);
+
+ priv->ignore_i2c_write_errors = 0;
+
+ return rc;
+}
+
+static int xc4000_fwupload(struct dvb_frontend *fe)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ const struct firmware *fw = NULL;
+ const unsigned char *p, *endp;
+ int rc = 0;
+ int n, n_array;
+ char name[33];
+ const char *fname;
+
+ if (firmware_name[0] != '\0')
+ fname = firmware_name;
+ else
+ fname = XC4000_DEFAULT_FIRMWARE;
+
+ dprintk(1, "Reading firmware %s\n", fname);
+ rc = request_firmware(&fw, fname, priv->i2c_props.adap->dev.parent);
+ if (rc < 0) {
+ if (rc == -ENOENT)
+ printk(KERN_ERR "Error: firmware %s not found.\n", fname);
+ else
+ printk(KERN_ERR "Error %d while requesting firmware %s\n",
+ rc, fname);
+
+ return rc;
+ }
+ p = fw->data;
+ endp = p + fw->size;
+
+ if (fw->size < sizeof(name) - 1 + 2 + 2) {
+ printk(KERN_ERR "Error: firmware file %s has invalid size!\n",
+ fname);
+ goto corrupt;
+ }
+
+ memcpy(name, p, sizeof(name) - 1);
+ name[sizeof(name) - 1] = '\0';
+ p += sizeof(name) - 1;
+
+ priv->firm_version = get_unaligned_le16(p);
+ p += 2;
+
+ n_array = get_unaligned_le16(p);
+ p += 2;
+
+ dprintk(1, "Loading %d firmware images from %s, type: %s, ver %d.%d\n",
+ n_array, fname, name,
+ priv->firm_version >> 8, priv->firm_version & 0xff);
+
+ priv->firm = kzalloc(sizeof(*priv->firm) * n_array, GFP_KERNEL);
+ if (priv->firm == NULL) {
+ printk(KERN_ERR "Not enough memory to load firmware file.\n");
+ rc = -ENOMEM;
+ goto done;
+ }
+ priv->firm_size = n_array;
+
+ n = -1;
+ while (p < endp) {
+ __u32 type, size;
+ v4l2_std_id id;
+ __u16 int_freq = 0;
+
+ n++;
+ if (n >= n_array) {
+ printk(KERN_ERR "More firmware images in file than "
+ "were expected!\n");
+ goto corrupt;
+ }
+
+ /* Checks if there's enough bytes to read */
+ if (endp - p < sizeof(type) + sizeof(id) + sizeof(size))
+ goto header;
+
+ type = get_unaligned_le32(p);
+ p += sizeof(type);
+
+ id = get_unaligned_le64(p);
+ p += sizeof(id);
+
+ if (type & HAS_IF) {
+ int_freq = get_unaligned_le16(p);
+ p += sizeof(int_freq);
+ if (endp - p < sizeof(size))
+ goto header;
+ }
+
+ size = get_unaligned_le32(p);
+ p += sizeof(size);
+
+ if (!size || size > endp - p) {
+ printk(KERN_ERR "Firmware type (%x), id %llx is corrupted (size=%d, expected %d)\n",
+ type, (unsigned long long)id,
+ (unsigned)(endp - p), size);
+ goto corrupt;
+ }
+
+ priv->firm[n].ptr = kzalloc(size, GFP_KERNEL);
+ if (priv->firm[n].ptr == NULL) {
+ printk(KERN_ERR "Not enough memory to load firmware file.\n");
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ if (debug) {
+ printk(KERN_DEBUG "Reading firmware type ");
+ dump_firm_type_and_int_freq(type, int_freq);
+ printk(KERN_DEBUG "(%x), id %llx, size=%d.\n",
+ type, (unsigned long long)id, size);
+ }
+
+ memcpy(priv->firm[n].ptr, p, size);
+ priv->firm[n].type = type;
+ priv->firm[n].id = id;
+ priv->firm[n].size = size;
+ priv->firm[n].int_freq = int_freq;
+
+ p += size;
+ }
+
+ if (n + 1 != priv->firm_size) {
+ printk(KERN_ERR "Firmware file is incomplete!\n");
+ goto corrupt;
+ }
+
+ goto done;
+
+header:
+ printk(KERN_ERR "Firmware header is incomplete!\n");
+corrupt:
+ rc = -EINVAL;
+ printk(KERN_ERR "Error: firmware file is corrupted!\n");
+
+done:
+ release_firmware(fw);
+ if (rc == 0)
+ dprintk(1, "Firmware files loaded.\n");
+
+ return rc;
+}
+
+static int load_scode(struct dvb_frontend *fe, unsigned int type,
+ v4l2_std_id *id, __u16 int_freq, int scode)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ int pos, rc;
+ unsigned char *p;
+ u8 scode_buf[13];
+ u8 indirect_mode[5];
+
+ dprintk(1, "%s called int_freq=%d\n", __func__, int_freq);
+
+ if (!int_freq) {
+ pos = seek_firmware(fe, type, id);
+ if (pos < 0)
+ return pos;
+ } else {
+ for (pos = 0; pos < priv->firm_size; pos++) {
+ if ((priv->firm[pos].int_freq == int_freq) &&
+ (priv->firm[pos].type & HAS_IF))
+ break;
+ }
+ if (pos == priv->firm_size)
+ return -ENOENT;
+ }
+
+ p = priv->firm[pos].ptr;
+
+ if (priv->firm[pos].size != 12 * 16 || scode >= 16)
+ return -EINVAL;
+ p += 12 * scode;
+
+ if (debug) {
+ tuner_info("Loading SCODE for type=");
+ dump_firm_type_and_int_freq(priv->firm[pos].type,
+ priv->firm[pos].int_freq);
+ printk(KERN_CONT "(%x), id %016llx.\n", priv->firm[pos].type,
+ (unsigned long long)*id);
+ }
+
+ scode_buf[0] = 0x00;
+ memcpy(&scode_buf[1], p, 12);
+
+ /* Enter direct-mode */
+ rc = xc_write_reg(priv, XREG_DIRECTSITTING_MODE, 0);
+ if (rc < 0) {
+ printk(KERN_ERR "failed to put device into direct mode!\n");
+ return -EIO;
+ }
+
+ rc = xc_send_i2c_data(priv, scode_buf, 13);
+ if (rc != 0) {
+ /* Even if the send failed, make sure we set back to indirect
+ mode */
+ printk(KERN_ERR "Failed to set scode %d\n", rc);
+ }
+
+ /* Switch back to indirect-mode */
+ memset(indirect_mode, 0, sizeof(indirect_mode));
+ indirect_mode[4] = 0x88;
+ xc_send_i2c_data(priv, indirect_mode, sizeof(indirect_mode));
+ msleep(10);
+
+ return 0;
+}
+
+static int check_firmware(struct dvb_frontend *fe, unsigned int type,
+ v4l2_std_id std, __u16 int_freq)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ struct firmware_properties new_fw;
+ int rc = 0, is_retry = 0;
+ u16 hwmodel;
+ v4l2_std_id std0;
+ u8 hw_major, hw_minor, fw_major, fw_minor;
+
+ dprintk(1, "%s called\n", __func__);
+
+ if (!priv->firm) {
+ rc = xc4000_fwupload(fe);
+ if (rc < 0)
+ return rc;
+ }
+
+retry:
+ new_fw.type = type;
+ new_fw.id = std;
+ new_fw.std_req = std;
+ new_fw.scode_table = SCODE;
+ new_fw.scode_nr = 0;
+ new_fw.int_freq = int_freq;
+
+ dprintk(1, "checking firmware, user requested type=");
+ if (debug) {
+ dump_firm_type(new_fw.type);
+ printk(KERN_CONT "(%x), id %016llx, ", new_fw.type,
+ (unsigned long long)new_fw.std_req);
+ if (!int_freq)
+ printk(KERN_CONT "scode_tbl ");
+ else
+ printk(KERN_CONT "int_freq %d, ", new_fw.int_freq);
+ printk(KERN_CONT "scode_nr %d\n", new_fw.scode_nr);
+ }
+
+ /* No need to reload base firmware if it matches */
+ if (priv->cur_fw.type & BASE) {
+ dprintk(1, "BASE firmware not changed.\n");
+ goto skip_base;
+ }
+
+ /* Updating BASE - forget about all currently loaded firmware */
+ memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
+
+ /* Reset is needed before loading firmware */
+ rc = xc4000_tuner_reset(fe);
+ if (rc < 0)
+ goto fail;
+
+ /* BASE firmwares are all std0 */
+ std0 = 0;
+ rc = load_firmware(fe, BASE, &std0);
+ if (rc < 0) {
+ printk(KERN_ERR "Error %d while loading base firmware\n", rc);
+ goto fail;
+ }
+
+ /* Load INIT1, if needed */
+ dprintk(1, "Load init1 firmware, if exists\n");
+
+ rc = load_firmware(fe, BASE | INIT1, &std0);
+ if (rc == -ENOENT)
+ rc = load_firmware(fe, BASE | INIT1, &std0);
+ if (rc < 0 && rc != -ENOENT) {
+ tuner_err("Error %d while loading init1 firmware\n",
+ rc);
+ goto fail;
+ }
+
+skip_base:
+ /*
+ * No need to reload standard specific firmware if base firmware
+ * was not reloaded and requested video standards have not changed.
+ */
+ if (priv->cur_fw.type == (BASE | new_fw.type) &&
+ priv->cur_fw.std_req == std) {
+ dprintk(1, "Std-specific firmware already loaded.\n");
+ goto skip_std_specific;
+ }
+
+ /* Reloading std-specific firmware forces a SCODE update */
+ priv->cur_fw.scode_table = 0;
+
+ /* Load the standard firmware */
+ rc = load_firmware(fe, new_fw.type, &new_fw.id);
+
+ if (rc < 0)
+ goto fail;
+
+skip_std_specific:
+ if (priv->cur_fw.scode_table == new_fw.scode_table &&
+ priv->cur_fw.scode_nr == new_fw.scode_nr) {
+ dprintk(1, "SCODE firmware already loaded.\n");
+ goto check_device;
+ }
+
+ /* Load SCODE firmware, if exists */
+ rc = load_scode(fe, new_fw.type | new_fw.scode_table, &new_fw.id,
+ new_fw.int_freq, new_fw.scode_nr);
+ if (rc != 0)
+ dprintk(1, "load scode failed %d\n", rc);
+
+check_device:
+ rc = xc4000_readreg(priv, XREG_PRODUCT_ID, &hwmodel);
+
+ if (xc_get_version(priv, &hw_major, &hw_minor, &fw_major,
+ &fw_minor) != 0) {
+ printk(KERN_ERR "Unable to read tuner registers.\n");
+ goto fail;
+ }
+
+ dprintk(1, "Device is Xceive %d version %d.%d, "
+ "firmware version %d.%d\n",
+ hwmodel, hw_major, hw_minor, fw_major, fw_minor);
+
+ /* Check firmware version against what we downloaded. */
+ if (priv->firm_version != ((fw_major << 8) | fw_minor)) {
+ printk(KERN_WARNING
+ "Incorrect readback of firmware version %d.%d.\n",
+ fw_major, fw_minor);
+ goto fail;
+ }
+
+ /* Check that the tuner hardware model remains consistent over time. */
+ if (priv->hwmodel == 0 &&
+ (hwmodel == XC_PRODUCT_ID_XC4000 ||
+ hwmodel == XC_PRODUCT_ID_XC4100)) {
+ priv->hwmodel = hwmodel;
+ priv->hwvers = (hw_major << 8) | hw_minor;
+ } else if (priv->hwmodel == 0 || priv->hwmodel != hwmodel ||
+ priv->hwvers != ((hw_major << 8) | hw_minor)) {
+ printk(KERN_WARNING
+ "Read invalid device hardware information - tuner "
+ "hung?\n");
+ goto fail;
+ }
+
+ memcpy(&priv->cur_fw, &new_fw, sizeof(priv->cur_fw));
+
+ /*
+ * By setting BASE in cur_fw.type only after successfully loading all
+ * firmwares, we can:
+ * 1. Identify that BASE firmware with type=0 has been loaded;
+ * 2. Tell whether BASE firmware was just changed the next time through.
+ */
+ priv->cur_fw.type |= BASE;
+
+ return 0;
+
+fail:
+ memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
+ if (!is_retry) {
+ msleep(50);
+ is_retry = 1;
+ dprintk(1, "Retrying firmware load\n");
+ goto retry;
+ }
+
+ if (rc == -ENOENT)
+ rc = -EINVAL;
+ return rc;
+}
+
+static void xc_debug_dump(struct xc4000_priv *priv)
+{
+ u16 adc_envelope;
+ u32 freq_error_hz = 0;
+ u16 lock_status;
+ u32 hsync_freq_hz = 0;
+ u16 frame_lines;
+ u16 quality;
+ u8 hw_majorversion = 0, hw_minorversion = 0;
+ u8 fw_majorversion = 0, fw_minorversion = 0;
+
+ xc_get_adc_envelope(priv, &adc_envelope);
+ dprintk(1, "*** ADC envelope (0-1023) = %d\n", adc_envelope);
+
+ xc_get_frequency_error(priv, &freq_error_hz);
+ dprintk(1, "*** Frequency error = %d Hz\n", freq_error_hz);
+
+ xc_get_lock_status(priv, &lock_status);
+ dprintk(1, "*** Lock status (0-Wait, 1-Locked, 2-No-signal) = %d\n",
+ lock_status);
+
+ xc_get_version(priv, &hw_majorversion, &hw_minorversion,
+ &fw_majorversion, &fw_minorversion);
+ dprintk(1, "*** HW: V%02x.%02x, FW: V%02x.%02x\n",
+ hw_majorversion, hw_minorversion,
+ fw_majorversion, fw_minorversion);
+
+ if (priv->video_standard < XC4000_DTV6) {
+ xc_get_hsync_freq(priv, &hsync_freq_hz);
+ dprintk(1, "*** Horizontal sync frequency = %d Hz\n",
+ hsync_freq_hz);
+
+ xc_get_frame_lines(priv, &frame_lines);
+ dprintk(1, "*** Frame lines = %d\n", frame_lines);
+ }
+
+ xc_get_quality(priv, &quality);
+ dprintk(1, "*** Quality (0:<8dB, 7:>56dB) = %d\n", quality);
+}
+
+static int xc4000_set_params(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *params)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ unsigned int type;
+ int ret = -EREMOTEIO;
+
+ dprintk(1, "%s() frequency=%d (Hz)\n", __func__, params->frequency);
+
+ mutex_lock(&priv->lock);
+
+ if (fe->ops.info.type == FE_ATSC) {
+ dprintk(1, "%s() ATSC\n", __func__);
+ switch (params->u.vsb.modulation) {
+ case VSB_8:
+ case VSB_16:
+ dprintk(1, "%s() VSB modulation\n", __func__);
+ priv->rf_mode = XC_RF_MODE_AIR;
+ priv->freq_hz = params->frequency - 1750000;
+ priv->bandwidth = BANDWIDTH_6_MHZ;
+ priv->video_standard = XC4000_DTV6;
+ type = DTV6;
+ break;
+ case QAM_64:
+ case QAM_256:
+ case QAM_AUTO:
+ dprintk(1, "%s() QAM modulation\n", __func__);
+ priv->rf_mode = XC_RF_MODE_CABLE;
+ priv->freq_hz = params->frequency - 1750000;
+ priv->bandwidth = BANDWIDTH_6_MHZ;
+ priv->video_standard = XC4000_DTV6;
+ type = DTV6;
+ break;
+ default:
+ ret = -EINVAL;
+ goto fail;
+ }
+ } else if (fe->ops.info.type == FE_OFDM) {
+ dprintk(1, "%s() OFDM\n", __func__);
+ switch (params->u.ofdm.bandwidth) {
+ case BANDWIDTH_6_MHZ:
+ priv->bandwidth = BANDWIDTH_6_MHZ;
+ priv->video_standard = XC4000_DTV6;
+ priv->freq_hz = params->frequency - 1750000;
+ type = DTV6;
+ break;
+ case BANDWIDTH_7_MHZ:
+ priv->bandwidth = BANDWIDTH_7_MHZ;
+ priv->video_standard = XC4000_DTV7;
+ priv->freq_hz = params->frequency - 2250000;
+ type = DTV7;
+ break;
+ case BANDWIDTH_8_MHZ:
+ priv->bandwidth = BANDWIDTH_8_MHZ;
+ priv->video_standard = XC4000_DTV8;
+ priv->freq_hz = params->frequency - 2750000;
+ type = DTV8;
+ break;
+ case BANDWIDTH_AUTO:
+ if (params->frequency < 400000000) {
+ priv->bandwidth = BANDWIDTH_7_MHZ;
+ priv->freq_hz = params->frequency - 2250000;
+ } else {
+ priv->bandwidth = BANDWIDTH_8_MHZ;
+ priv->freq_hz = params->frequency - 2750000;
+ }
+ priv->video_standard = XC4000_DTV7_8;
+ type = DTV78;
+ break;
+ default:
+ printk(KERN_ERR "xc4000 bandwidth not set!\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+ priv->rf_mode = XC_RF_MODE_AIR;
+ } else {
+ printk(KERN_ERR "xc4000 modulation type not supported!\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ dprintk(1, "%s() frequency=%d (compensated)\n",
+ __func__, priv->freq_hz);
+
+ /* Make sure the correct firmware type is loaded */
+ if (check_firmware(fe, type, 0, priv->if_khz) != 0)
+ goto fail;
+
+ ret = xc_set_signal_source(priv, priv->rf_mode);
+ if (ret != 0) {
+ printk(KERN_ERR "xc4000: xc_set_signal_source(%d) failed\n",
+ priv->rf_mode);
+ goto fail;
+ } else {
+ u16 video_mode, audio_mode;
+ video_mode = xc4000_standard[priv->video_standard].video_mode;
+ audio_mode = xc4000_standard[priv->video_standard].audio_mode;
+ if (type == DTV6 && priv->firm_version != 0x0102)
+ video_mode |= 0x0001;
+ ret = xc_set_tv_standard(priv, video_mode, audio_mode);
+ if (ret != 0) {
+ printk(KERN_ERR "xc4000: xc_set_tv_standard failed\n");
+ /* DJH - do not return when it fails... */
+ /* goto fail; */
+ }
+ }
+
+ if (xc_write_reg(priv, XREG_D_CODE, 0) == 0)
+ ret = 0;
+ if (priv->dvb_amplitude != 0) {
+ if (xc_write_reg(priv, XREG_AMPLITUDE,
+ (priv->firm_version != 0x0102 ||
+ priv->dvb_amplitude != 134 ?
+ priv->dvb_amplitude : 132)) != 0)
+ ret = -EREMOTEIO;
+ }
+ if (priv->set_smoothedcvbs != 0) {
+ if (xc_write_reg(priv, XREG_SMOOTHEDCVBS, 1) != 0)
+ ret = -EREMOTEIO;
+ }
+ if (ret != 0) {
+ printk(KERN_ERR "xc4000: setting registers failed\n");
+ /* goto fail; */
+ }
+
+ xc_tune_channel(priv, priv->freq_hz);
+
+ ret = 0;
+
+fail:
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int xc4000_set_analog_params(struct dvb_frontend *fe,
+ struct analog_parameters *params)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ unsigned int type = 0;
+ int ret = -EREMOTEIO;
+
+ if (params->mode == V4L2_TUNER_RADIO) {
+ dprintk(1, "%s() frequency=%d (in units of 62.5Hz)\n",
+ __func__, params->frequency);
+
+ mutex_lock(&priv->lock);
+
+ params->std = 0;
+ priv->freq_hz = params->frequency * 125L / 2;
+
+ if (audio_std & XC4000_AUDIO_STD_INPUT1) {
+ priv->video_standard = XC4000_FM_Radio_INPUT1;
+ type = FM | INPUT1;
+ } else {
+ priv->video_standard = XC4000_FM_Radio_INPUT2;
+ type = FM | INPUT2;
+ }
+
+ goto tune_channel;
+ }
+
+ dprintk(1, "%s() frequency=%d (in units of 62.5khz)\n",
+ __func__, params->frequency);
+
+ mutex_lock(&priv->lock);
+
+ /* params->frequency is in units of 62.5khz */
+ priv->freq_hz = params->frequency * 62500;
+
+ params->std &= V4L2_STD_ALL;
+ /* if std is not defined, choose one */
+ if (!params->std)
+ params->std = V4L2_STD_PAL_BG;
+
+ if (audio_std & XC4000_AUDIO_STD_MONO)
+ type = MONO;
+
+ if (params->std & V4L2_STD_MN) {
+ params->std = V4L2_STD_MN;
+ if (audio_std & XC4000_AUDIO_STD_MONO) {
+ priv->video_standard = XC4000_MN_NTSC_PAL_Mono;
+ } else if (audio_std & XC4000_AUDIO_STD_A2) {
+ params->std |= V4L2_STD_A2;
+ priv->video_standard = XC4000_MN_NTSC_PAL_A2;
+ } else {
+ params->std |= V4L2_STD_BTSC;
+ priv->video_standard = XC4000_MN_NTSC_PAL_BTSC;
+ }
+ goto tune_channel;
+ }
+
+ if (params->std & V4L2_STD_PAL_BG) {
+ params->std = V4L2_STD_PAL_BG;
+ if (audio_std & XC4000_AUDIO_STD_MONO) {
+ priv->video_standard = XC4000_BG_PAL_MONO;
+ } else if (!(audio_std & XC4000_AUDIO_STD_A2)) {
+ if (!(audio_std & XC4000_AUDIO_STD_B)) {
+ params->std |= V4L2_STD_NICAM_A;
+ priv->video_standard = XC4000_BG_PAL_NICAM;
+ } else {
+ params->std |= V4L2_STD_NICAM_B;
+ priv->video_standard = XC4000_BG_PAL_NICAM;
+ }
+ } else {
+ if (!(audio_std & XC4000_AUDIO_STD_B)) {
+ params->std |= V4L2_STD_A2_A;
+ priv->video_standard = XC4000_BG_PAL_A2;
+ } else {
+ params->std |= V4L2_STD_A2_B;
+ priv->video_standard = XC4000_BG_PAL_A2;
+ }
+ }
+ goto tune_channel;
+ }
+
+ if (params->std & V4L2_STD_PAL_I) {
+ /* default to NICAM audio standard */
+ params->std = V4L2_STD_PAL_I | V4L2_STD_NICAM;
+ if (audio_std & XC4000_AUDIO_STD_MONO)
+ priv->video_standard = XC4000_I_PAL_NICAM_MONO;
+ else
+ priv->video_standard = XC4000_I_PAL_NICAM;
+ goto tune_channel;
+ }
+
+ if (params->std & V4L2_STD_PAL_DK) {
+ params->std = V4L2_STD_PAL_DK;
+ if (audio_std & XC4000_AUDIO_STD_MONO) {
+ priv->video_standard = XC4000_DK_PAL_MONO;
+ } else if (audio_std & XC4000_AUDIO_STD_A2) {
+ params->std |= V4L2_STD_A2;
+ priv->video_standard = XC4000_DK_PAL_A2;
+ } else {
+ params->std |= V4L2_STD_NICAM;
+ priv->video_standard = XC4000_DK_PAL_NICAM;
+ }
+ goto tune_channel;
+ }
+
+ if (params->std & V4L2_STD_SECAM_DK) {
+ /* default to A2 audio standard */
+ params->std = V4L2_STD_SECAM_DK | V4L2_STD_A2;
+ if (audio_std & XC4000_AUDIO_STD_L) {
+ type = 0;
+ priv->video_standard = XC4000_DK_SECAM_NICAM;
+ } else if (audio_std & XC4000_AUDIO_STD_MONO) {
+ priv->video_standard = XC4000_DK_SECAM_A2MONO;
+ } else if (audio_std & XC4000_AUDIO_STD_K3) {
+ params->std |= V4L2_STD_SECAM_K3;
+ priv->video_standard = XC4000_DK_SECAM_A2LDK3;
+ } else {
+ priv->video_standard = XC4000_DK_SECAM_A2DK1;
+ }
+ goto tune_channel;
+ }
+
+ if (params->std & V4L2_STD_SECAM_L) {
+ /* default to NICAM audio standard */
+ type = 0;
+ params->std = V4L2_STD_SECAM_L | V4L2_STD_NICAM;
+ priv->video_standard = XC4000_L_SECAM_NICAM;
+ goto tune_channel;
+ }
+
+ if (params->std & V4L2_STD_SECAM_LC) {
+ /* default to NICAM audio standard */
+ type = 0;
+ params->std = V4L2_STD_SECAM_LC | V4L2_STD_NICAM;
+ priv->video_standard = XC4000_LC_SECAM_NICAM;
+ goto tune_channel;
+ }
+
+tune_channel:
+ /* FIXME: it could be air. */
+ priv->rf_mode = XC_RF_MODE_CABLE;
+
+ if (check_firmware(fe, type, params->std,
+ xc4000_standard[priv->video_standard].int_freq) != 0)
+ goto fail;
+
+ ret = xc_set_signal_source(priv, priv->rf_mode);
+ if (ret != 0) {
+ printk(KERN_ERR
+ "xc4000: xc_set_signal_source(%d) failed\n",
+ priv->rf_mode);
+ goto fail;
+ } else {
+ u16 video_mode, audio_mode;
+ video_mode = xc4000_standard[priv->video_standard].video_mode;
+ audio_mode = xc4000_standard[priv->video_standard].audio_mode;
+ if (priv->video_standard < XC4000_BG_PAL_A2) {
+ if (type & NOGD)
+ video_mode &= 0xFF7F;
+ } else if (priv->video_standard < XC4000_I_PAL_NICAM) {
+ if (priv->firm_version == 0x0102)
+ video_mode &= 0xFEFF;
+ if (audio_std & XC4000_AUDIO_STD_B)
+ video_mode |= 0x0080;
+ }
+ ret = xc_set_tv_standard(priv, video_mode, audio_mode);
+ if (ret != 0) {
+ printk(KERN_ERR "xc4000: xc_set_tv_standard failed\n");
+ goto fail;
+ }
+ }
+
+ if (xc_write_reg(priv, XREG_D_CODE, 0) == 0)
+ ret = 0;
+ if (xc_write_reg(priv, XREG_AMPLITUDE, 1) != 0)
+ ret = -EREMOTEIO;
+ if (priv->set_smoothedcvbs != 0) {
+ if (xc_write_reg(priv, XREG_SMOOTHEDCVBS, 1) != 0)
+ ret = -EREMOTEIO;
+ }
+ if (ret != 0) {
+ printk(KERN_ERR "xc4000: setting registers failed\n");
+ goto fail;
+ }
+
+ xc_tune_channel(priv, priv->freq_hz);
+
+ ret = 0;
+
+fail:
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+
+ *freq = priv->freq_hz;
+
+ if (debug) {
+ mutex_lock(&priv->lock);
+ if ((priv->cur_fw.type
+ & (BASE | FM | DTV6 | DTV7 | DTV78 | DTV8)) == BASE) {
+ u16 snr = 0;
+ if (xc4000_readreg(priv, XREG_SNR, &snr) == 0) {
+ mutex_unlock(&priv->lock);
+ dprintk(1, "%s() freq = %u, SNR = %d\n",
+ __func__, *freq, snr);
+ return 0;
+ }
+ }
+ mutex_unlock(&priv->lock);
+ }
+
+ dprintk(1, "%s()\n", __func__);
+
+ return 0;
+}
+
+static int xc4000_get_bandwidth(struct dvb_frontend *fe, u32 *bw)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ dprintk(1, "%s()\n", __func__);
+
+ *bw = priv->bandwidth;
+ return 0;
+}
+
+static int xc4000_get_status(struct dvb_frontend *fe, u32 *status)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ u16 lock_status = 0;
+
+ mutex_lock(&priv->lock);
+
+ if (priv->cur_fw.type & BASE)
+ xc_get_lock_status(priv, &lock_status);
+
+ *status = (lock_status == 1 ?
+ TUNER_STATUS_LOCKED | TUNER_STATUS_STEREO : 0);
+ if (priv->cur_fw.type & (DTV6 | DTV7 | DTV78 | DTV8))
+ *status &= (~TUNER_STATUS_STEREO);
+
+ mutex_unlock(&priv->lock);
+
+ dprintk(2, "%s() lock_status = %d\n", __func__, lock_status);
+
+ return 0;
+}
+
+static int xc4000_sleep(struct dvb_frontend *fe)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ int ret = 0;
+
+ dprintk(1, "%s()\n", __func__);
+
+ mutex_lock(&priv->lock);
+
+ /* Avoid firmware reload on slow devices */
+ if ((no_poweroff == 2 ||
+ (no_poweroff == 0 && priv->default_pm != 0)) &&
+ (priv->cur_fw.type & BASE) != 0) {
+ /* force reset and firmware reload */
+ priv->cur_fw.type = XC_POWERED_DOWN;
+
+ if (xc_write_reg(priv, XREG_POWER_DOWN, 0) != 0) {
+ printk(KERN_ERR
+ "xc4000: %s() unable to shutdown tuner\n",
+ __func__);
+ ret = -EREMOTEIO;
+ }
+ msleep(20);
+ }
+
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int xc4000_init(struct dvb_frontend *fe)
+{
+ dprintk(1, "%s()\n", __func__);
+
+ return 0;
+}
+
+static int xc4000_release(struct dvb_frontend *fe)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+
+ dprintk(1, "%s()\n", __func__);
+
+ mutex_lock(&xc4000_list_mutex);
+
+ if (priv)
+ hybrid_tuner_release_state(priv);
+
+ mutex_unlock(&xc4000_list_mutex);
+
+ fe->tuner_priv = NULL;
+
+ return 0;
+}
+
+static const struct dvb_tuner_ops xc4000_tuner_ops = {
+ .info = {
+ .name = "Xceive XC4000",
+ .frequency_min = 1000000,
+ .frequency_max = 1023000000,
+ .frequency_step = 50000,
+ },
+
+ .release = xc4000_release,
+ .init = xc4000_init,
+ .sleep = xc4000_sleep,
+
+ .set_params = xc4000_set_params,
+ .set_analog_params = xc4000_set_analog_params,
+ .get_frequency = xc4000_get_frequency,
+ .get_bandwidth = xc4000_get_bandwidth,
+ .get_status = xc4000_get_status
+};
+
+struct dvb_frontend *xc4000_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ struct xc4000_config *cfg)
+{
+ struct xc4000_priv *priv = NULL;
+ int instance;
+ u16 id = 0;
+
+ dprintk(1, "%s(%d-%04x)\n", __func__,
+ i2c ? i2c_adapter_id(i2c) : -1,
+ cfg ? cfg->i2c_address : -1);
+
+ mutex_lock(&xc4000_list_mutex);
+
+ instance = hybrid_tuner_request_state(struct xc4000_priv, priv,
+ hybrid_tuner_instance_list,
+ i2c, cfg->i2c_address, "xc4000");
+ switch (instance) {
+ case 0:
+ goto fail;
+ break;
+ case 1:
+ /* new tuner instance */
+ priv->bandwidth = BANDWIDTH_6_MHZ;
+ /* set default configuration */
+ priv->if_khz = 4560;
+ priv->default_pm = 0;
+ priv->dvb_amplitude = 134;
+ priv->set_smoothedcvbs = 1;
+ mutex_init(&priv->lock);
+ fe->tuner_priv = priv;
+ break;
+ default:
+ /* existing tuner instance */
+ fe->tuner_priv = priv;
+ break;
+ }
+
+ if (cfg->if_khz != 0) {
+ /* copy configuration if provided by the caller */
+ priv->if_khz = cfg->if_khz;
+ priv->default_pm = cfg->default_pm;
+ priv->dvb_amplitude = cfg->dvb_amplitude;
+ priv->set_smoothedcvbs = cfg->set_smoothedcvbs;
+ }
+
+ /* Check if firmware has been loaded. It is possible that another
+ instance of the driver has loaded the firmware.
+ */
+
+ if (instance == 1) {
+ if (xc4000_readreg(priv, XREG_PRODUCT_ID, &id) != 0)
+ goto fail;
+ } else {
+ id = ((priv->cur_fw.type & BASE) != 0 ?
+ priv->hwmodel : XC_PRODUCT_ID_FW_NOT_LOADED);
+ }
+
+ switch (id) {
+ case XC_PRODUCT_ID_XC4000:
+ case XC_PRODUCT_ID_XC4100:
+ printk(KERN_INFO
+ "xc4000: Successfully identified at address 0x%02x\n",
+ cfg->i2c_address);
+ printk(KERN_INFO
+ "xc4000: Firmware has been loaded previously\n");
+ break;
+ case XC_PRODUCT_ID_FW_NOT_LOADED:
+ printk(KERN_INFO
+ "xc4000: Successfully identified at address 0x%02x\n",
+ cfg->i2c_address);
+ printk(KERN_INFO
+ "xc4000: Firmware has not been loaded previously\n");
+ break;
+ default:
+ printk(KERN_ERR
+ "xc4000: Device not found at addr 0x%02x (0x%x)\n",
+ cfg->i2c_address, id);
+ goto fail;
+ }
+
+ mutex_unlock(&xc4000_list_mutex);
+
+ memcpy(&fe->ops.tuner_ops, &xc4000_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+
+ if (instance == 1) {
+ int ret;
+ mutex_lock(&priv->lock);
+ ret = xc4000_fwupload(fe);
+ mutex_unlock(&priv->lock);
+ if (ret != 0)
+ goto fail2;
+ }
+
+ return fe;
+fail:
+ mutex_unlock(&xc4000_list_mutex);
+fail2:
+ xc4000_release(fe);
+ return NULL;
+}
+EXPORT_SYMBOL(xc4000_attach);
+
+MODULE_AUTHOR("Steven Toth, Davide Ferri");
+MODULE_DESCRIPTION("Xceive xc4000 silicon tuner driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/tuners/xc4000.h b/drivers/media/common/tuners/xc4000.h
new file mode 100644
index 0000000..e6a44d1
--- /dev/null
+++ b/drivers/media/common/tuners/xc4000.h
@@ -0,0 +1,67 @@
+/*
+ * Driver for Xceive XC4000 "QAM/8VSB single chip tuner"
+ *
+ * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __XC4000_H__
+#define __XC4000_H__
+
+#include <linux/firmware.h>
+
+struct dvb_frontend;
+struct i2c_adapter;
+
+struct xc4000_config {
+ u8 i2c_address;
+ /* if non-zero, power management is enabled by default */
+ u8 default_pm;
+ /* value to be written to XREG_AMPLITUDE in DVB-T mode (0: no write) */
+ u8 dvb_amplitude;
+ /* if non-zero, register 0x0E is set to filter analog TV video output */
+ u8 set_smoothedcvbs;
+ /* IF for DVB-T */
+ u32 if_khz;
+};
+
+/* xc4000 callback command */
+#define XC4000_TUNER_RESET 0
+
+/* For each bridge framework, when it attaches either analog or digital,
+ * it has to store a reference back to its _core equivalent structure,
+ * so that it can service the hardware by steering gpio's etc.
+ * Each bridge implementation is different so cast devptr accordingly.
+ * The xc4000 driver cares not for this value, other than ensuring
+ * it's passed back to a bridge during tuner_callback().
+ */
+
+#if defined(CONFIG_MEDIA_TUNER_XC4000) || (defined(CONFIG_MEDIA_TUNER_XC4000_MODULE) && defined(MODULE))
+extern struct dvb_frontend *xc4000_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ struct xc4000_config *cfg);
+#else
+static inline struct dvb_frontend *xc4000_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ struct xc4000_config *cfg)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/dvb/Kconfig b/drivers/media/dvb/Kconfig
index ee214c3..f6e40b3 100644
--- a/drivers/media/dvb/Kconfig
+++ b/drivers/media/dvb/Kconfig
@@ -80,6 +80,10 @@ comment "Supported nGene Adapters"
depends on DVB_CORE && PCI && I2C
source "drivers/media/dvb/ngene/Kconfig"
+comment "Supported ddbridge ('Octopus') Adapters"
+ depends on DVB_CORE && PCI && I2C
+ source "drivers/media/dvb/ddbridge/Kconfig"
+
comment "Supported DVB Frontends"
depends on DVB_CORE
source "drivers/media/dvb/frontends/Kconfig"
diff --git a/drivers/media/dvb/Makefile b/drivers/media/dvb/Makefile
index a1a0875..b2cefe6 100644
--- a/drivers/media/dvb/Makefile
+++ b/drivers/media/dvb/Makefile
@@ -15,6 +15,7 @@ obj-y := dvb-core/ \
dm1105/ \
pt1/ \
mantis/ \
- ngene/
+ ngene/ \
+ ddbridge/
obj-$(CONFIG_DVB_FIREDTV) += firewire/
diff --git a/drivers/media/dvb/bt8xx/dvb-bt8xx.c b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
index 1e1106d..521d691 100644
--- a/drivers/media/dvb/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
@@ -892,7 +892,7 @@ static int __devinit dvb_bt8xx_probe(struct bttv_sub_device *sub)
if (!(bttv_pci_dev = bttv_get_pcidev(card->bttv_nr))) {
printk("dvb_bt8xx: no pci device for card %d\n", card->bttv_nr);
kfree(card);
- return -EFAULT;
+ return -ENODEV;
}
if (!(card->bt = dvb_bt8xx_878_match(card->bttv_nr, bttv_pci_dev))) {
@@ -902,7 +902,7 @@ static int __devinit dvb_bt8xx_probe(struct bttv_sub_device *sub)
"installed, try removing it.\n");
kfree(card);
- return -EFAULT;
+ return -ENODEV;
}
mutex_init(&card->bt->gpio_lock);
diff --git a/drivers/media/dvb/ddbridge/Kconfig b/drivers/media/dvb/ddbridge/Kconfig
new file mode 100644
index 0000000..d099e1a
--- /dev/null
+++ b/drivers/media/dvb/ddbridge/Kconfig
@@ -0,0 +1,18 @@
+config DVB_DDBRIDGE
+ tristate "Digital Devices bridge support"
+ depends on DVB_CORE && PCI && I2C
+ select DVB_LNBP21 if !DVB_FE_CUSTOMISE
+ select DVB_STV6110x if !DVB_FE_CUSTOMISE
+ select DVB_STV090x if !DVB_FE_CUSTOMISE
+ select DVB_DRXK if !DVB_FE_CUSTOMISE
+ select DVB_TDA18271C2DD if !DVB_FE_CUSTOMISE
+ ---help---
+ Support for cards with the Digital Devices PCI express bridge:
+ - Octopus PCIe Bridge
+ - Octopus mini PCIe Bridge
+ - Octopus LE
+ - DuoFlex S2 Octopus
+ - DuoFlex CT Octopus
+ - cineS2(v6)
+
+ Say Y if you own such a card and want to use it.
diff --git a/drivers/media/dvb/ddbridge/Makefile b/drivers/media/dvb/ddbridge/Makefile
new file mode 100644
index 0000000..de4fe19
--- /dev/null
+++ b/drivers/media/dvb/ddbridge/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the ddbridge device driver
+#
+
+ddbridge-objs := ddbridge-core.o
+
+obj-$(CONFIG_DVB_DDBRIDGE) += ddbridge.o
+
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
+EXTRA_CFLAGS += -Idrivers/media/dvb/frontends/
+EXTRA_CFLAGS += -Idrivers/media/common/tuners/
+
+# For the staging CI driver cxd2099
+EXTRA_CFLAGS += -Idrivers/staging/cxd2099/
diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
new file mode 100644
index 0000000..573d540
--- /dev/null
+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
@@ -0,0 +1,1719 @@
+/*
+ * ddbridge.c: Digital Devices PCIe bridge driver
+ *
+ * Copyright (C) 2010-2011 Digital Devices GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/timer.h>
+#include <linux/version.h>
+#include <linux/i2c.h>
+#include <linux/swab.h>
+#include <linux/vmalloc.h>
+#include "ddbridge.h"
+
+#include "ddbridge-regs.h"
+
+#include "tda18271c2dd.h"
+#include "stv6110x.h"
+#include "stv090x.h"
+#include "lnbh24.h"
+#include "drxk.h"
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+/* MSI had problems with lost interrupts, fixed but needs testing */
+#undef CONFIG_PCI_MSI
+
+/******************************************************************************/
+
+static int i2c_read(struct i2c_adapter *adapter, u8 adr, u8 *val)
+{
+ struct i2c_msg msgs[1] = {{.addr = adr, .flags = I2C_M_RD,
+ .buf = val, .len = 1 } };
+ return (i2c_transfer(adapter, msgs, 1) == 1) ? 0 : -1;
+}
+
+static int i2c_read_reg(struct i2c_adapter *adapter, u8 adr, u8 reg, u8 *val)
+{
+ struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
+ .buf = &reg, .len = 1 },
+ {.addr = adr, .flags = I2C_M_RD,
+ .buf = val, .len = 1 } };
+ return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
+}
+
+static int i2c_read_reg16(struct i2c_adapter *adapter, u8 adr,
+ u16 reg, u8 *val)
+{
+ u8 msg[2] = {reg>>8, reg&0xff};
+ struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
+ .buf = msg, .len = 2},
+ {.addr = adr, .flags = I2C_M_RD,
+ .buf = val, .len = 1} };
+ return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
+}
+
+static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd)
+{
+ struct ddb *dev = i2c->dev;
+ int stat;
+ u32 val;
+
+ i2c->done = 0;
+ ddbwritel((adr << 9) | cmd, i2c->regs + I2C_COMMAND);
+ stat = wait_event_timeout(i2c->wq, i2c->done == 1, HZ);
+ if (stat <= 0) {
+ printk(KERN_ERR "I2C timeout\n");
+ { /* MSI debugging*/
+ u32 istat = ddbreadl(INTERRUPT_STATUS);
+ printk(KERN_ERR "IRS %08x\n", istat);
+ ddbwritel(istat, INTERRUPT_ACK);
+ }
+ return -EIO;
+ }
+ val = ddbreadl(i2c->regs+I2C_COMMAND);
+ if (val & 0x70000)
+ return -EIO;
+ return 0;
+}
+
+static int ddb_i2c_master_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg msg[], int num)
+{
+ struct ddb_i2c *i2c = (struct ddb_i2c *)i2c_get_adapdata(adapter);
+ struct ddb *dev = i2c->dev;
+ u8 addr = 0;
+
+ if (num)
+ addr = msg[0].addr;
+
+ if (num == 2 && msg[1].flags & I2C_M_RD &&
+ !(msg[0].flags & I2C_M_RD)) {
+ memcpy_toio(dev->regs + I2C_TASKMEM_BASE + i2c->wbuf,
+ msg[0].buf, msg[0].len);
+ ddbwritel(msg[0].len|(msg[1].len << 16),
+ i2c->regs+I2C_TASKLENGTH);
+ if (!ddb_i2c_cmd(i2c, addr, 1)) {
+ memcpy_fromio(msg[1].buf,
+ dev->regs + I2C_TASKMEM_BASE + i2c->rbuf,
+ msg[1].len);
+ return num;
+ }
+ }
+
+ if (num == 1 && !(msg[0].flags & I2C_M_RD)) {
+ ddbcpyto(I2C_TASKMEM_BASE + i2c->wbuf, msg[0].buf, msg[0].len);
+ ddbwritel(msg[0].len, i2c->regs + I2C_TASKLENGTH);
+ if (!ddb_i2c_cmd(i2c, addr, 2))
+ return num;
+ }
+ if (num == 1 && (msg[0].flags & I2C_M_RD)) {
+ ddbwritel(msg[0].len << 16, i2c->regs + I2C_TASKLENGTH);
+ if (!ddb_i2c_cmd(i2c, addr, 3)) {
+ ddbcpyfrom(msg[0].buf,
+ I2C_TASKMEM_BASE + i2c->rbuf, msg[0].len);
+ return num;
+ }
+ }
+ return -EIO;
+}
+
+
+static u32 ddb_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_SMBUS_EMUL;
+}
+
+struct i2c_algorithm ddb_i2c_algo = {
+ .master_xfer = ddb_i2c_master_xfer,
+ .functionality = ddb_i2c_functionality,
+};
+
+static void ddb_i2c_release(struct ddb *dev)
+{
+ int i;
+ struct ddb_i2c *i2c;
+ struct i2c_adapter *adap;
+
+ for (i = 0; i < dev->info->port_num; i++) {
+ i2c = &dev->i2c[i];
+ adap = &i2c->adap;
+ i2c_del_adapter(adap);
+ }
+}
+
+static int ddb_i2c_init(struct ddb *dev)
+{
+ int i, j, stat = 0;
+ struct ddb_i2c *i2c;
+ struct i2c_adapter *adap;
+
+ for (i = 0; i < dev->info->port_num; i++) {
+ i2c = &dev->i2c[i];
+ i2c->dev = dev;
+ i2c->nr = i;
+ i2c->wbuf = i * (I2C_TASKMEM_SIZE / 4);
+ i2c->rbuf = i2c->wbuf + (I2C_TASKMEM_SIZE / 8);
+ i2c->regs = 0x80 + i * 0x20;
+ ddbwritel(I2C_SPEED_100, i2c->regs + I2C_TIMING);
+ ddbwritel((i2c->rbuf << 16) | i2c->wbuf,
+ i2c->regs + I2C_TASKADDRESS);
+ init_waitqueue_head(&i2c->wq);
+
+ adap = &i2c->adap;
+ i2c_set_adapdata(adap, i2c);
+#ifdef I2C_ADAP_CLASS_TV_DIGITAL
+ adap->class = I2C_ADAP_CLASS_TV_DIGITAL|I2C_CLASS_TV_ANALOG;
+#else
+#ifdef I2C_CLASS_TV_ANALOG
+ adap->class = I2C_CLASS_TV_ANALOG;
+#endif
+#endif
+ strcpy(adap->name, "ddbridge");
+ adap->algo = &ddb_i2c_algo;
+ adap->algo_data = (void *)i2c;
+ adap->dev.parent = &dev->pdev->dev;
+ stat = i2c_add_adapter(adap);
+ if (stat)
+ break;
+ }
+ if (stat)
+ for (j = 0; j < i; j++) {
+ i2c = &dev->i2c[j];
+ adap = &i2c->adap;
+ i2c_del_adapter(adap);
+ }
+ return stat;
+}
+
+
+/******************************************************************************/
+/******************************************************************************/
+/******************************************************************************/
+
+#if 0
+static void set_table(struct ddb *dev, u32 off,
+ dma_addr_t *pbuf, u32 num)
+{
+ u32 i, base;
+ u64 mem;
+
+ base = DMA_BASE_ADDRESS_TABLE + off;
+ for (i = 0; i < num; i++) {
+ mem = pbuf[i];
+ ddbwritel(mem & 0xffffffff, base + i * 8);
+ ddbwritel(mem >> 32, base + i * 8 + 4);
+ }
+}
+#endif
+
+static void ddb_address_table(struct ddb *dev)
+{
+ u32 i, j, base;
+ u64 mem;
+ dma_addr_t *pbuf;
+
+ for (i = 0; i < dev->info->port_num * 2; i++) {
+ base = DMA_BASE_ADDRESS_TABLE + i * 0x100;
+ pbuf = dev->input[i].pbuf;
+ for (j = 0; j < dev->input[i].dma_buf_num; j++) {
+ mem = pbuf[j];
+ ddbwritel(mem & 0xffffffff, base + j * 8);
+ ddbwritel(mem >> 32, base + j * 8 + 4);
+ }
+ }
+ for (i = 0; i < dev->info->port_num; i++) {
+ base = DMA_BASE_ADDRESS_TABLE + 0x800 + i * 0x100;
+ pbuf = dev->output[i].pbuf;
+ for (j = 0; j < dev->output[i].dma_buf_num; j++) {
+ mem = pbuf[j];
+ ddbwritel(mem & 0xffffffff, base + j * 8);
+ ddbwritel(mem >> 32, base + j * 8 + 4);
+ }
+ }
+}
+
+static void io_free(struct pci_dev *pdev, u8 **vbuf,
+ dma_addr_t *pbuf, u32 size, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ if (vbuf[i]) {
+ pci_free_consistent(pdev, size, vbuf[i], pbuf[i]);
+ vbuf[i] = 0;
+ }
+ }
+}
+
+static int io_alloc(struct pci_dev *pdev, u8 **vbuf,
+ dma_addr_t *pbuf, u32 size, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ vbuf[i] = pci_alloc_consistent(pdev, size, &pbuf[i]);
+ if (!vbuf[i])
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int ddb_buffers_alloc(struct ddb *dev)
+{
+ int i;
+ struct ddb_port *port;
+
+ for (i = 0; i < dev->info->port_num; i++) {
+ port = &dev->port[i];
+ switch (port->class) {
+ case DDB_PORT_TUNER:
+ if (io_alloc(dev->pdev, port->input[0]->vbuf,
+ port->input[0]->pbuf,
+ port->input[0]->dma_buf_size,
+ port->input[0]->dma_buf_num) < 0)
+ return -1;
+ if (io_alloc(dev->pdev, port->input[1]->vbuf,
+ port->input[1]->pbuf,
+ port->input[1]->dma_buf_size,
+ port->input[1]->dma_buf_num) < 0)
+ return -1;
+ break;
+ case DDB_PORT_CI:
+ if (io_alloc(dev->pdev, port->input[0]->vbuf,
+ port->input[0]->pbuf,
+ port->input[0]->dma_buf_size,
+ port->input[0]->dma_buf_num) < 0)
+ return -1;
+ if (io_alloc(dev->pdev, port->output->vbuf,
+ port->output->pbuf,
+ port->output->dma_buf_size,
+ port->output->dma_buf_num) < 0)
+ return -1;
+ break;
+ default:
+ break;
+ }
+ }
+ ddb_address_table(dev);
+ return 0;
+}
+
+static void ddb_buffers_free(struct ddb *dev)
+{
+ int i;
+ struct ddb_port *port;
+
+ for (i = 0; i < dev->info->port_num; i++) {
+ port = &dev->port[i];
+ io_free(dev->pdev, port->input[0]->vbuf,
+ port->input[0]->pbuf,
+ port->input[0]->dma_buf_size,
+ port->input[0]->dma_buf_num);
+ io_free(dev->pdev, port->input[1]->vbuf,
+ port->input[1]->pbuf,
+ port->input[1]->dma_buf_size,
+ port->input[1]->dma_buf_num);
+ io_free(dev->pdev, port->output->vbuf,
+ port->output->pbuf,
+ port->output->dma_buf_size,
+ port->output->dma_buf_num);
+ }
+}
+
+static void ddb_input_start(struct ddb_input *input)
+{
+ struct ddb *dev = input->port->dev;
+
+ spin_lock_irq(&input->lock);
+ input->cbuf = 0;
+ input->coff = 0;
+
+ /* reset */
+ ddbwritel(0, TS_INPUT_CONTROL(input->nr));
+ ddbwritel(2, TS_INPUT_CONTROL(input->nr));
+ ddbwritel(0, TS_INPUT_CONTROL(input->nr));
+
+ ddbwritel((1 << 16) |
+ (input->dma_buf_num << 11) |
+ (input->dma_buf_size >> 7),
+ DMA_BUFFER_SIZE(input->nr));
+ ddbwritel(0, DMA_BUFFER_ACK(input->nr));
+
+ ddbwritel(1, DMA_BASE_WRITE);
+ ddbwritel(3, DMA_BUFFER_CONTROL(input->nr));
+ ddbwritel(9, TS_INPUT_CONTROL(input->nr));
+ input->running = 1;
+ spin_unlock_irq(&input->lock);
+}
+
+static void ddb_input_stop(struct ddb_input *input)
+{
+ struct ddb *dev = input->port->dev;
+
+ spin_lock_irq(&input->lock);
+ ddbwritel(0, TS_INPUT_CONTROL(input->nr));
+ ddbwritel(0, DMA_BUFFER_CONTROL(input->nr));
+ input->running = 0;
+ spin_unlock_irq(&input->lock);
+}
+
+static void ddb_output_start(struct ddb_output *output)
+{
+ struct ddb *dev = output->port->dev;
+
+ spin_lock_irq(&output->lock);
+ output->cbuf = 0;
+ output->coff = 0;
+ ddbwritel(0, TS_OUTPUT_CONTROL(output->nr));
+ ddbwritel(2, TS_OUTPUT_CONTROL(output->nr));
+ ddbwritel(0, TS_OUTPUT_CONTROL(output->nr));
+ ddbwritel(0x3c, TS_OUTPUT_CONTROL(output->nr));
+ ddbwritel((1 << 16) |
+ (output->dma_buf_num << 11) |
+ (output->dma_buf_size >> 7),
+ DMA_BUFFER_SIZE(output->nr + 8));
+ ddbwritel(0, DMA_BUFFER_ACK(output->nr + 8));
+
+ ddbwritel(1, DMA_BASE_READ);
+ ddbwritel(3, DMA_BUFFER_CONTROL(output->nr + 8));
+ /* ddbwritel(0xbd, TS_OUTPUT_CONTROL(output->nr)); */
+ ddbwritel(0x1d, TS_OUTPUT_CONTROL(output->nr));
+ output->running = 1;
+ spin_unlock_irq(&output->lock);
+}
+
+static void ddb_output_stop(struct ddb_output *output)
+{
+ struct ddb *dev = output->port->dev;
+
+ spin_lock_irq(&output->lock);
+ ddbwritel(0, TS_OUTPUT_CONTROL(output->nr));
+ ddbwritel(0, DMA_BUFFER_CONTROL(output->nr + 8));
+ output->running = 0;
+ spin_unlock_irq(&output->lock);
+}
+
+static u32 ddb_output_free(struct ddb_output *output)
+{
+ u32 idx, off, stat = output->stat;
+ s32 diff;
+
+ idx = (stat >> 11) & 0x1f;
+ off = (stat & 0x7ff) << 7;
+
+ if (output->cbuf != idx) {
+ if ((((output->cbuf + 1) % output->dma_buf_num) == idx) &&
+ (output->dma_buf_size - output->coff <= 188))
+ return 0;
+ return 188;
+ }
+ diff = off - output->coff;
+ if (diff <= 0 || diff > 188)
+ return 188;
+ return 0;
+}
+
+static ssize_t ddb_output_write(struct ddb_output *output,
+ const u8 *buf, size_t count)
+{
+ struct ddb *dev = output->port->dev;
+ u32 idx, off, stat = output->stat;
+ u32 left = count, len;
+
+ idx = (stat >> 11) & 0x1f;
+ off = (stat & 0x7ff) << 7;
+
+ while (left) {
+ len = output->dma_buf_size - output->coff;
+ if ((((output->cbuf + 1) % output->dma_buf_num) == idx) &&
+ (off == 0)) {
+ if (len <= 188)
+ break;
+ len -= 188;
+ }
+ if (output->cbuf == idx) {
+ if (off > output->coff) {
+#if 1
+ len = off - output->coff;
+ len -= (len % 188);
+ if (len <= 188)
+
+#endif
+ break;
+ len -= 188;
+ }
+ }
+ if (len > left)
+ len = left;
+ if (copy_from_user(output->vbuf[output->cbuf] + output->coff,
+ buf, len))
+ return -EIO;
+ left -= len;
+ buf += len;
+ output->coff += len;
+ if (output->coff == output->dma_buf_size) {
+ output->coff = 0;
+ output->cbuf = ((output->cbuf + 1) % output->dma_buf_num);
+ }
+ ddbwritel((output->cbuf << 11) | (output->coff >> 7),
+ DMA_BUFFER_ACK(output->nr + 8));
+ }
+ return count - left;
+}
+
+static u32 ddb_input_avail(struct ddb_input *input)
+{
+ struct ddb *dev = input->port->dev;
+ u32 idx, off, stat = input->stat;
+ u32 ctrl = ddbreadl(DMA_BUFFER_CONTROL(input->nr));
+
+ idx = (stat >> 11) & 0x1f;
+ off = (stat & 0x7ff) << 7;
+
+ if (ctrl & 4) {
+ printk(KERN_ERR "IA %d %d %08x\n", idx, off, ctrl);
+ ddbwritel(input->stat, DMA_BUFFER_ACK(input->nr));
+ return 0;
+ }
+ if (input->cbuf != idx)
+ return 188;
+ return 0;
+}
+
+static size_t ddb_input_read(struct ddb_input *input, u8 *buf, size_t count)
+{
+ struct ddb *dev = input->port->dev;
+ u32 left = count;
+ u32 idx, off, free, stat = input->stat;
+ int ret;
+
+ idx = (stat >> 11) & 0x1f;
+ off = (stat & 0x7ff) << 7;
+
+ while (left) {
+ if (input->cbuf == idx)
+ return count - left;
+ free = input->dma_buf_size - input->coff;
+ if (free > left)
+ free = left;
+ ret = copy_to_user(buf, input->vbuf[input->cbuf] +
+ input->coff, free);
+ input->coff += free;
+ if (input->coff == input->dma_buf_size) {
+ input->coff = 0;
+ input->cbuf = (input->cbuf+1) % input->dma_buf_num;
+ }
+ left -= free;
+ ddbwritel((input->cbuf << 11) | (input->coff >> 7),
+ DMA_BUFFER_ACK(input->nr));
+ }
+ return count;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+/******************************************************************************/
+
+#if 0
+static struct ddb_input *fe2input(struct ddb *dev, struct dvb_frontend *fe)
+{
+ int i;
+
+ for (i = 0; i < dev->info->port_num * 2; i++) {
+ if (dev->input[i].fe == fe)
+ return &dev->input[i];
+ }
+ return NULL;
+}
+#endif
+
+static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct ddb_input *input = fe->sec_priv;
+ struct ddb_port *port = input->port;
+ int status;
+
+ if (enable) {
+ mutex_lock(&port->i2c_gate_lock);
+ status = input->gate_ctrl(fe, 1);
+ } else {
+ status = input->gate_ctrl(fe, 0);
+ mutex_unlock(&port->i2c_gate_lock);
+ }
+ return status;
+}
+
+static int demod_attach_drxk(struct ddb_input *input)
+{
+ struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct dvb_frontend *fe;
+ struct drxk_config config;
+
+ memset(&config, 0, sizeof(config));
+ config.adr = 0x29 + (input->nr & 1);
+
+ fe = input->fe = dvb_attach(drxk_attach, &config, i2c, &input->fe2);
+ if (!input->fe) {
+ printk(KERN_ERR "No DRXK found!\n");
+ return -ENODEV;
+ }
+ fe->sec_priv = input;
+ input->gate_ctrl = fe->ops.i2c_gate_ctrl;
+ fe->ops.i2c_gate_ctrl = drxk_gate_ctrl;
+ return 0;
+}
+
+static int tuner_attach_tda18271(struct ddb_input *input)
+{
+ struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct dvb_frontend *fe;
+
+ if (input->fe->ops.i2c_gate_ctrl)
+ input->fe->ops.i2c_gate_ctrl(input->fe, 1);
+ fe = dvb_attach(tda18271c2dd_attach, input->fe, i2c, 0x60);
+ if (!fe) {
+ printk(KERN_ERR "No TDA18271 found!\n");
+ return -ENODEV;
+ }
+ if (input->fe->ops.i2c_gate_ctrl)
+ input->fe->ops.i2c_gate_ctrl(input->fe, 0);
+ return 0;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+/******************************************************************************/
+
+static struct stv090x_config stv0900 = {
+ .device = STV0900,
+ .demod_mode = STV090x_DUAL,
+ .clk_mode = STV090x_CLK_EXT,
+
+ .xtal = 27000000,
+ .address = 0x69,
+
+ .ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
+ .ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
+
+ .repeater_level = STV090x_RPTLEVEL_16,
+
+ .adc1_range = STV090x_ADC_1Vpp,
+ .adc2_range = STV090x_ADC_1Vpp,
+
+ .diseqc_envelope_mode = true,
+};
+
+static struct stv090x_config stv0900_aa = {
+ .device = STV0900,
+ .demod_mode = STV090x_DUAL,
+ .clk_mode = STV090x_CLK_EXT,
+
+ .xtal = 27000000,
+ .address = 0x68,
+
+ .ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
+ .ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
+
+ .repeater_level = STV090x_RPTLEVEL_16,
+
+ .adc1_range = STV090x_ADC_1Vpp,
+ .adc2_range = STV090x_ADC_1Vpp,
+
+ .diseqc_envelope_mode = true,
+};
+
+static struct stv6110x_config stv6110a = {
+ .addr = 0x60,
+ .refclk = 27000000,
+ .clk_div = 1,
+};
+
+static struct stv6110x_config stv6110b = {
+ .addr = 0x63,
+ .refclk = 27000000,
+ .clk_div = 1,
+};
+
+static int demod_attach_stv0900(struct ddb_input *input, int type)
+{
+ struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900;
+
+ input->fe = dvb_attach(stv090x_attach, feconf, i2c,
+ (input->nr & 1) ? STV090x_DEMODULATOR_1
+ : STV090x_DEMODULATOR_0);
+ if (!input->fe) {
+ printk(KERN_ERR "No STV0900 found!\n");
+ return -ENODEV;
+ }
+ if (!dvb_attach(lnbh24_attach, input->fe, i2c, 0,
+ 0, (input->nr & 1) ?
+ (0x09 - type) : (0x0b - type))) {
+ printk(KERN_ERR "No LNBH24 found!\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int tuner_attach_stv6110(struct ddb_input *input, int type)
+{
+ struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900;
+ struct stv6110x_config *tunerconf = (input->nr & 1) ?
+ &stv6110b : &stv6110a;
+ struct stv6110x_devctl *ctl;
+
+ ctl = dvb_attach(stv6110x_attach, input->fe, tunerconf, i2c);
+ if (!ctl) {
+ printk(KERN_ERR "No STV6110X found!\n");
+ return -ENODEV;
+ }
+ printk(KERN_INFO "attach tuner input %d adr %02x\n",
+ input->nr, tunerconf->addr);
+
+ feconf->tuner_init = ctl->tuner_init;
+ feconf->tuner_sleep = ctl->tuner_sleep;
+ feconf->tuner_set_mode = ctl->tuner_set_mode;
+ feconf->tuner_set_frequency = ctl->tuner_set_frequency;
+ feconf->tuner_get_frequency = ctl->tuner_get_frequency;
+ feconf->tuner_set_bandwidth = ctl->tuner_set_bandwidth;
+ feconf->tuner_get_bandwidth = ctl->tuner_get_bandwidth;
+ feconf->tuner_set_bbgain = ctl->tuner_set_bbgain;
+ feconf->tuner_get_bbgain = ctl->tuner_get_bbgain;
+ feconf->tuner_set_refclk = ctl->tuner_set_refclk;
+ feconf->tuner_get_status = ctl->tuner_get_status;
+
+ return 0;
+}
+
+static int my_dvb_dmx_ts_card_init(struct dvb_demux *dvbdemux, char *id,
+ int (*start_feed)(struct dvb_demux_feed *),
+ int (*stop_feed)(struct dvb_demux_feed *),
+ void *priv)
+{
+ dvbdemux->priv = priv;
+
+ dvbdemux->filternum = 256;
+ dvbdemux->feednum = 256;
+ dvbdemux->start_feed = start_feed;
+ dvbdemux->stop_feed = stop_feed;
+ dvbdemux->write_to_decoder = NULL;
+ dvbdemux->dmx.capabilities = (DMX_TS_FILTERING |
+ DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING);
+ return dvb_dmx_init(dvbdemux);
+}
+
+static int my_dvb_dmxdev_ts_card_init(struct dmxdev *dmxdev,
+ struct dvb_demux *dvbdemux,
+ struct dmx_frontend *hw_frontend,
+ struct dmx_frontend *mem_frontend,
+ struct dvb_adapter *dvb_adapter)
+{
+ int ret;
+
+ dmxdev->filternum = 256;
+ dmxdev->demux = &dvbdemux->dmx;
+ dmxdev->capabilities = 0;
+ ret = dvb_dmxdev_init(dmxdev, dvb_adapter);
+ if (ret < 0)
+ return ret;
+
+ hw_frontend->source = DMX_FRONTEND_0;
+ dvbdemux->dmx.add_frontend(&dvbdemux->dmx, hw_frontend);
+ mem_frontend->source = DMX_MEMORY_FE;
+ dvbdemux->dmx.add_frontend(&dvbdemux->dmx, mem_frontend);
+ return dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, hw_frontend);
+}
+
+static int start_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ struct ddb_input *input = dvbdmx->priv;
+
+ if (!input->users)
+ ddb_input_start(input);
+
+ return ++input->users;
+}
+
+static int stop_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ struct ddb_input *input = dvbdmx->priv;
+
+ if (--input->users)
+ return input->users;
+
+ ddb_input_stop(input);
+ return 0;
+}
+
+
+static void dvb_input_detach(struct ddb_input *input)
+{
+ struct dvb_adapter *adap = &input->adap;
+ struct dvb_demux *dvbdemux = &input->demux;
+
+ switch (input->attached) {
+ case 5:
+ if (input->fe2)
+ dvb_unregister_frontend(input->fe2);
+ if (input->fe) {
+ dvb_unregister_frontend(input->fe);
+ dvb_frontend_detach(input->fe);
+ input->fe = NULL;
+ }
+ case 4:
+ dvb_net_release(&input->dvbnet);
+
+ case 3:
+ dvbdemux->dmx.close(&dvbdemux->dmx);
+ dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
+ &input->hw_frontend);
+ dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
+ &input->mem_frontend);
+ dvb_dmxdev_release(&input->dmxdev);
+
+ case 2:
+ dvb_dmx_release(&input->demux);
+
+ case 1:
+ dvb_unregister_adapter(adap);
+ }
+ input->attached = 0;
+}
+
+static int dvb_input_attach(struct ddb_input *input)
+{
+ int ret;
+ struct ddb_port *port = input->port;
+ struct dvb_adapter *adap = &input->adap;
+ struct dvb_demux *dvbdemux = &input->demux;
+
+ ret = dvb_register_adapter(adap, "DDBridge", THIS_MODULE,
+ &input->port->dev->pdev->dev,
+ adapter_nr);
+ if (ret < 0) {
+ printk(KERN_ERR "ddbridge: Could not register adapter."
+ "Check if you enabled enough adapters in dvb-core!\n");
+ return ret;
+ }
+ input->attached = 1;
+
+ ret = my_dvb_dmx_ts_card_init(dvbdemux, "SW demux",
+ start_feed,
+ stop_feed, input);
+ if (ret < 0)
+ return ret;
+ input->attached = 2;
+
+ ret = my_dvb_dmxdev_ts_card_init(&input->dmxdev, &input->demux,
+ &input->hw_frontend,
+ &input->mem_frontend, adap);
+ if (ret < 0)
+ return ret;
+ input->attached = 3;
+
+ ret = dvb_net_init(adap, &input->dvbnet, input->dmxdev.demux);
+ if (ret < 0)
+ return ret;
+ input->attached = 4;
+
+ input->fe = 0;
+ switch (port->type) {
+ case DDB_TUNER_DVBS_ST:
+ if (demod_attach_stv0900(input, 0) < 0)
+ return -ENODEV;
+ if (tuner_attach_stv6110(input, 0) < 0)
+ return -ENODEV;
+ if (input->fe) {
+ if (dvb_register_frontend(adap, input->fe) < 0)
+ return -ENODEV;
+ }
+ break;
+ case DDB_TUNER_DVBS_ST_AA:
+ if (demod_attach_stv0900(input, 1) < 0)
+ return -ENODEV;
+ if (tuner_attach_stv6110(input, 1) < 0)
+ return -ENODEV;
+ if (input->fe) {
+ if (dvb_register_frontend(adap, input->fe) < 0)
+ return -ENODEV;
+ }
+ break;
+ case DDB_TUNER_DVBCT_TR:
+ if (demod_attach_drxk(input) < 0)
+ return -ENODEV;
+ if (tuner_attach_tda18271(input) < 0)
+ return -ENODEV;
+ if (input->fe) {
+ if (dvb_register_frontend(adap, input->fe) < 0)
+ return -ENODEV;
+ }
+ if (input->fe2) {
+ if (dvb_register_frontend(adap, input->fe2) < 0)
+ return -ENODEV;
+ input->fe2->tuner_priv = input->fe->tuner_priv;
+ memcpy(&input->fe2->ops.tuner_ops,
+ &input->fe->ops.tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+ }
+ break;
+ }
+ input->attached = 5;
+ return 0;
+}
+
+/****************************************************************************/
+/****************************************************************************/
+
+static ssize_t ts_write(struct file *file, const char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct ddb_output *output = dvbdev->priv;
+ size_t left = count;
+ int stat;
+
+ while (left) {
+ if (ddb_output_free(output) < 188) {
+ if (file->f_flags & O_NONBLOCK)
+ break;
+ if (wait_event_interruptible(
+ output->wq, ddb_output_free(output) >= 188) < 0)
+ break;
+ }
+ stat = ddb_output_write(output, buf, left);
+ if (stat < 0)
+ break;
+ buf += stat;
+ left -= stat;
+ }
+ return (left == count) ? -EAGAIN : (count - left);
+}
+
+static ssize_t ts_read(struct file *file, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct ddb_output *output = dvbdev->priv;
+ struct ddb_input *input = output->port->input[0];
+ int left, read;
+
+ count -= count % 188;
+ left = count;
+ while (left) {
+ if (ddb_input_avail(input) < 188) {
+ if (file->f_flags & O_NONBLOCK)
+ break;
+ if (wait_event_interruptible(
+ input->wq, ddb_input_avail(input) >= 188) < 0)
+ break;
+ }
+ read = ddb_input_read(input, buf, left);
+ left -= read;
+ buf += read;
+ }
+ return (left == count) ? -EAGAIN : (count - left);
+}
+
+static unsigned int ts_poll(struct file *file, poll_table *wait)
+{
+ /*
+ struct dvb_device *dvbdev = file->private_data;
+ struct ddb_output *output = dvbdev->priv;
+ struct ddb_input *input = output->port->input[0];
+ */
+ unsigned int mask = 0;
+
+#if 0
+ if (data_avail_to_read)
+ mask |= POLLIN | POLLRDNORM;
+ if (data_avail_to_write)
+ mask |= POLLOUT | POLLWRNORM;
+
+ poll_wait(file, &read_queue, wait);
+ poll_wait(file, &write_queue, wait);
+#endif
+ return mask;
+}
+
+static const struct file_operations ci_fops = {
+ .owner = THIS_MODULE,
+ .read = ts_read,
+ .write = ts_write,
+ .open = dvb_generic_open,
+ .release = dvb_generic_release,
+ .poll = ts_poll,
+ .mmap = 0,
+};
+
+static struct dvb_device dvbdev_ci = {
+ .priv = 0,
+ .readers = -1,
+ .writers = -1,
+ .users = -1,
+ .fops = &ci_fops,
+};
+
+/****************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
+
+static void input_tasklet(unsigned long data)
+{
+ struct ddb_input *input = (struct ddb_input *) data;
+ struct ddb *dev = input->port->dev;
+
+ spin_lock(&input->lock);
+ if (!input->running) {
+ spin_unlock(&input->lock);
+ return;
+ }
+ input->stat = ddbreadl(DMA_BUFFER_CURRENT(input->nr));
+
+ if (input->port->class == DDB_PORT_TUNER) {
+ if (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr)))
+ printk(KERN_ERR "Overflow input %d\n", input->nr);
+ while (input->cbuf != ((input->stat >> 11) & 0x1f)
+ || (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr)))) {
+ dvb_dmx_swfilter_packets(&input->demux,
+ input->vbuf[input->cbuf],
+ input->dma_buf_size / 188);
+
+ input->cbuf = (input->cbuf + 1) % input->dma_buf_num;
+ ddbwritel((input->cbuf << 11),
+ DMA_BUFFER_ACK(input->nr));
+ input->stat = ddbreadl(DMA_BUFFER_CURRENT(input->nr));
+ }
+ }
+ if (input->port->class == DDB_PORT_CI)
+ wake_up(&input->wq);
+ spin_unlock(&input->lock);
+}
+
+static void output_tasklet(unsigned long data)
+{
+ struct ddb_output *output = (struct ddb_output *) data;
+ struct ddb *dev = output->port->dev;
+
+ spin_lock(&output->lock);
+ if (!output->running) {
+ spin_unlock(&output->lock);
+ return;
+ }
+ output->stat = ddbreadl(DMA_BUFFER_CURRENT(output->nr + 8));
+ wake_up(&output->wq);
+ spin_unlock(&output->lock);
+}
+
+
+struct cxd2099_cfg cxd_cfg = {
+ .bitrate = 62000,
+ .adr = 0x40,
+ .polarity = 1,
+ .clock_mode = 1,
+};
+
+static int ddb_ci_attach(struct ddb_port *port)
+{
+ int ret;
+
+ ret = dvb_register_adapter(&port->output->adap,
+ "DDBridge",
+ THIS_MODULE,
+ &port->dev->pdev->dev,
+ adapter_nr);
+ if (ret < 0)
+ return ret;
+ port->en = cxd2099_attach(&cxd_cfg, port, &port->i2c->adap);
+ if (!port->en) {
+ dvb_unregister_adapter(&port->output->adap);
+ return -ENODEV;
+ }
+ ddb_input_start(port->input[0]);
+ ddb_output_start(port->output);
+ dvb_ca_en50221_init(&port->output->adap,
+ port->en, 0, 1);
+ ret = dvb_register_device(&port->output->adap, &port->output->dev,
+ &dvbdev_ci, (void *) port->output,
+ DVB_DEVICE_SEC);
+ return ret;
+}
+
+static int ddb_port_attach(struct ddb_port *port)
+{
+ int ret = 0;
+
+ switch (port->class) {
+ case DDB_PORT_TUNER:
+ ret = dvb_input_attach(port->input[0]);
+ if (ret < 0)
+ break;
+ ret = dvb_input_attach(port->input[1]);
+ break;
+ case DDB_PORT_CI:
+ ret = ddb_ci_attach(port);
+ break;
+ default:
+ break;
+ }
+ if (ret < 0)
+ printk(KERN_ERR "port_attach on port %d failed\n", port->nr);
+ return ret;
+}
+
+static int ddb_ports_attach(struct ddb *dev)
+{
+ int i, ret = 0;
+ struct ddb_port *port;
+
+ for (i = 0; i < dev->info->port_num; i++) {
+ port = &dev->port[i];
+ ret = ddb_port_attach(port);
+ if (ret < 0)
+ break;
+ }
+ return ret;
+}
+
+static void ddb_ports_detach(struct ddb *dev)
+{
+ int i;
+ struct ddb_port *port;
+
+ for (i = 0; i < dev->info->port_num; i++) {
+ port = &dev->port[i];
+ switch (port->class) {
+ case DDB_PORT_TUNER:
+ dvb_input_detach(port->input[0]);
+ dvb_input_detach(port->input[1]);
+ break;
+ case DDB_PORT_CI:
+ if (port->output->dev)
+ dvb_unregister_device(port->output->dev);
+ if (port->en) {
+ ddb_input_stop(port->input[0]);
+ ddb_output_stop(port->output);
+ dvb_ca_en50221_release(port->en);
+ kfree(port->en);
+ port->en = 0;
+ dvb_unregister_adapter(&port->output->adap);
+ }
+ break;
+ }
+ }
+}
+
+/****************************************************************************/
+/****************************************************************************/
+
+static int port_has_ci(struct ddb_port *port)
+{
+ u8 val;
+ return i2c_read_reg(&port->i2c->adap, 0x40, 0, &val) ? 0 : 1;
+}
+
+static int port_has_stv0900(struct ddb_port *port)
+{
+ u8 val;
+ if (i2c_read_reg16(&port->i2c->adap, 0x69, 0xf100, &val) < 0)
+ return 0;
+ return 1;
+}
+
+static int port_has_stv0900_aa(struct ddb_port *port)
+{
+ u8 val;
+ if (i2c_read_reg16(&port->i2c->adap, 0x68, 0xf100, &val) < 0)
+ return 0;
+ return 1;
+}
+
+static int port_has_drxks(struct ddb_port *port)
+{
+ u8 val;
+ if (i2c_read(&port->i2c->adap, 0x29, &val) < 0)
+ return 0;
+ if (i2c_read(&port->i2c->adap, 0x2a, &val) < 0)
+ return 0;
+ return 1;
+}
+
+static void ddb_port_probe(struct ddb_port *port)
+{
+ struct ddb *dev = port->dev;
+ char *modname = "NO MODULE";
+
+ port->class = DDB_PORT_NONE;
+
+ if (port_has_ci(port)) {
+ modname = "CI";
+ port->class = DDB_PORT_CI;
+ ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
+ } else if (port_has_stv0900(port)) {
+ modname = "DUAL DVB-S2";
+ port->class = DDB_PORT_TUNER;
+ port->type = DDB_TUNER_DVBS_ST;
+ ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING);
+ } else if (port_has_stv0900_aa(port)) {
+ modname = "DUAL DVB-S2";
+ port->class = DDB_PORT_TUNER;
+ port->type = DDB_TUNER_DVBS_ST_AA;
+ ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING);
+ } else if (port_has_drxks(port)) {
+ modname = "DUAL DVB-C/T";
+ port->class = DDB_PORT_TUNER;
+ port->type = DDB_TUNER_DVBCT_TR;
+ ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
+ }
+ printk(KERN_INFO "Port %d (TAB %d): %s\n",
+ port->nr, port->nr+1, modname);
+}
+
+static void ddb_input_init(struct ddb_port *port, int nr)
+{
+ struct ddb *dev = port->dev;
+ struct ddb_input *input = &dev->input[nr];
+
+ input->nr = nr;
+ input->port = port;
+ input->dma_buf_num = INPUT_DMA_BUFS;
+ input->dma_buf_size = INPUT_DMA_SIZE;
+ ddbwritel(0, TS_INPUT_CONTROL(nr));
+ ddbwritel(2, TS_INPUT_CONTROL(nr));
+ ddbwritel(0, TS_INPUT_CONTROL(nr));
+ ddbwritel(0, DMA_BUFFER_ACK(nr));
+ tasklet_init(&input->tasklet, input_tasklet, (unsigned long) input);
+ spin_lock_init(&input->lock);
+ init_waitqueue_head(&input->wq);
+}
+
+static void ddb_output_init(struct ddb_port *port, int nr)
+{
+ struct ddb *dev = port->dev;
+ struct ddb_output *output = &dev->output[nr];
+ output->nr = nr;
+ output->port = port;
+ output->dma_buf_num = OUTPUT_DMA_BUFS;
+ output->dma_buf_size = OUTPUT_DMA_SIZE;
+
+ ddbwritel(0, TS_OUTPUT_CONTROL(nr));
+ ddbwritel(2, TS_OUTPUT_CONTROL(nr));
+ ddbwritel(0, TS_OUTPUT_CONTROL(nr));
+ tasklet_init(&output->tasklet, output_tasklet, (unsigned long) output);
+ init_waitqueue_head(&output->wq);
+}
+
+static void ddb_ports_init(struct ddb *dev)
+{
+ int i;
+ struct ddb_port *port;
+
+ for (i = 0; i < dev->info->port_num; i++) {
+ port = &dev->port[i];
+ port->dev = dev;
+ port->nr = i;
+ port->i2c = &dev->i2c[i];
+ port->input[0] = &dev->input[2 * i];
+ port->input[1] = &dev->input[2 * i + 1];
+ port->output = &dev->output[i];
+
+ mutex_init(&port->i2c_gate_lock);
+ ddb_port_probe(port);
+ ddb_input_init(port, 2 * i);
+ ddb_input_init(port, 2 * i + 1);
+ ddb_output_init(port, i);
+ }
+}
+
+static void ddb_ports_release(struct ddb *dev)
+{
+ int i;
+ struct ddb_port *port;
+
+ for (i = 0; i < dev->info->port_num; i++) {
+ port = &dev->port[i];
+ port->dev = dev;
+ tasklet_kill(&port->input[0]->tasklet);
+ tasklet_kill(&port->input[1]->tasklet);
+ tasklet_kill(&port->output->tasklet);
+ }
+}
+
+/****************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
+
+static void irq_handle_i2c(struct ddb *dev, int n)
+{
+ struct ddb_i2c *i2c = &dev->i2c[n];
+
+ i2c->done = 1;
+ wake_up(&i2c->wq);
+}
+
+static irqreturn_t irq_handler(int irq, void *dev_id)
+{
+ struct ddb *dev = (struct ddb *) dev_id;
+ u32 s = ddbreadl(INTERRUPT_STATUS);
+
+ if (!s)
+ return IRQ_NONE;
+
+ do {
+ ddbwritel(s, INTERRUPT_ACK);
+
+ if (s & 0x00000001)
+ irq_handle_i2c(dev, 0);
+ if (s & 0x00000002)
+ irq_handle_i2c(dev, 1);
+ if (s & 0x00000004)
+ irq_handle_i2c(dev, 2);
+ if (s & 0x00000008)
+ irq_handle_i2c(dev, 3);
+
+ if (s & 0x00000100)
+ tasklet_schedule(&dev->input[0].tasklet);
+ if (s & 0x00000200)
+ tasklet_schedule(&dev->input[1].tasklet);
+ if (s & 0x00000400)
+ tasklet_schedule(&dev->input[2].tasklet);
+ if (s & 0x00000800)
+ tasklet_schedule(&dev->input[3].tasklet);
+ if (s & 0x00001000)
+ tasklet_schedule(&dev->input[4].tasklet);
+ if (s & 0x00002000)
+ tasklet_schedule(&dev->input[5].tasklet);
+ if (s & 0x00004000)
+ tasklet_schedule(&dev->input[6].tasklet);
+ if (s & 0x00008000)
+ tasklet_schedule(&dev->input[7].tasklet);
+
+ if (s & 0x00010000)
+ tasklet_schedule(&dev->output[0].tasklet);
+ if (s & 0x00020000)
+ tasklet_schedule(&dev->output[1].tasklet);
+ if (s & 0x00040000)
+ tasklet_schedule(&dev->output[2].tasklet);
+ if (s & 0x00080000)
+ tasklet_schedule(&dev->output[3].tasklet);
+
+ /* if (s & 0x000f0000) printk(KERN_DEBUG "%08x\n", istat); */
+ } while ((s = ddbreadl(INTERRUPT_STATUS)));
+
+ return IRQ_HANDLED;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+/******************************************************************************/
+
+static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen)
+{
+ u32 data, shift;
+
+ if (wlen > 4)
+ ddbwritel(1, SPI_CONTROL);
+ while (wlen > 4) {
+ /* FIXME: check for big-endian */
+ data = swab32(*(u32 *)wbuf);
+ wbuf += 4;
+ wlen -= 4;
+ ddbwritel(data, SPI_DATA);
+ while (ddbreadl(SPI_CONTROL) & 0x0004)
+ ;
+ }
+
+ if (rlen)
+ ddbwritel(0x0001 | ((wlen << (8 + 3)) & 0x1f00), SPI_CONTROL);
+ else
+ ddbwritel(0x0003 | ((wlen << (8 + 3)) & 0x1f00), SPI_CONTROL);
+
+ data = 0;
+ shift = ((4 - wlen) * 8);
+ while (wlen) {
+ data <<= 8;
+ data |= *wbuf;
+ wlen--;
+ wbuf++;
+ }
+ if (shift)
+ data <<= shift;
+ ddbwritel(data, SPI_DATA);
+ while (ddbreadl(SPI_CONTROL) & 0x0004)
+ ;
+
+ if (!rlen) {
+ ddbwritel(0, SPI_CONTROL);
+ return 0;
+ }
+ if (rlen > 4)
+ ddbwritel(1, SPI_CONTROL);
+
+ while (rlen > 4) {
+ ddbwritel(0xffffffff, SPI_DATA);
+ while (ddbreadl(SPI_CONTROL) & 0x0004)
+ ;
+ data = ddbreadl(SPI_DATA);
+ *(u32 *) rbuf = swab32(data);
+ rbuf += 4;
+ rlen -= 4;
+ }
+ ddbwritel(0x0003 | ((rlen << (8 + 3)) & 0x1F00), SPI_CONTROL);
+ ddbwritel(0xffffffff, SPI_DATA);
+ while (ddbreadl(SPI_CONTROL) & 0x0004)
+ ;
+
+ data = ddbreadl(SPI_DATA);
+ ddbwritel(0, SPI_CONTROL);
+
+ if (rlen < 4)
+ data <<= ((4 - rlen) * 8);
+
+ while (rlen > 0) {
+ *rbuf = ((data >> 24) & 0xff);
+ data <<= 8;
+ rbuf++;
+ rlen--;
+ }
+ return 0;
+}
+
+#define DDB_MAGIC 'd'
+
+struct ddb_flashio {
+ __u8 *write_buf;
+ __u32 write_len;
+ __u8 *read_buf;
+ __u32 read_len;
+};
+
+#define IOCTL_DDB_FLASHIO _IOWR(DDB_MAGIC, 0x00, struct ddb_flashio)
+
+#define DDB_NAME "ddbridge"
+
+static u32 ddb_num;
+static struct ddb *ddbs[32];
+static struct class *ddb_class;
+static int ddb_major;
+
+static int ddb_open(struct inode *inode, struct file *file)
+{
+ struct ddb *dev = ddbs[iminor(inode)];
+
+ file->private_data = dev;
+ return 0;
+}
+
+static long ddb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct ddb *dev = file->private_data;
+ void *parg = (void *)arg;
+ int res = -EFAULT;
+
+ switch (cmd) {
+ case IOCTL_DDB_FLASHIO:
+ {
+ struct ddb_flashio fio;
+ u8 *rbuf, *wbuf;
+
+ if (copy_from_user(&fio, parg, sizeof(fio)))
+ break;
+ if (fio.write_len + fio.read_len > 1028) {
+ printk(KERN_ERR "IOBUF too small\n");
+ return -ENOMEM;
+ }
+ wbuf = &dev->iobuf[0];
+ if (!wbuf)
+ return -ENOMEM;
+ rbuf = wbuf + fio.write_len;
+ if (copy_from_user(wbuf, fio.write_buf, fio.write_len)) {
+ vfree(wbuf);
+ break;
+ }
+ res = flashio(dev, wbuf, fio.write_len,
+ rbuf, fio.read_len);
+ if (copy_to_user(fio.read_buf, rbuf, fio.read_len))
+ res = -EFAULT;
+ break;
+ }
+ default:
+ break;
+ }
+ return res;
+}
+
+static const struct file_operations ddb_fops = {
+ .unlocked_ioctl = ddb_ioctl,
+ .open = ddb_open,
+};
+
+static char *ddb_devnode(struct device *device, mode_t *mode)
+{
+ struct ddb *dev = dev_get_drvdata(device);
+
+ return kasprintf(GFP_KERNEL, "ddbridge/card%d", dev->nr);
+}
+
+static int ddb_class_create(void)
+{
+ ddb_major = register_chrdev(0, DDB_NAME, &ddb_fops);
+ if (ddb_major < 0)
+ return ddb_major;
+
+ ddb_class = class_create(THIS_MODULE, DDB_NAME);
+ if (IS_ERR(ddb_class)) {
+ unregister_chrdev(ddb_major, DDB_NAME);
+ return -1;
+ }
+ ddb_class->devnode = ddb_devnode;
+ return 0;
+}
+
+static void ddb_class_destroy(void)
+{
+ class_destroy(ddb_class);
+ unregister_chrdev(ddb_major, DDB_NAME);
+}
+
+static int ddb_device_create(struct ddb *dev)
+{
+ dev->nr = ddb_num++;
+ dev->ddb_dev = device_create(ddb_class, NULL,
+ MKDEV(ddb_major, dev->nr),
+ dev, "ddbridge%d", dev->nr);
+ ddbs[dev->nr] = dev;
+ if (IS_ERR(dev->ddb_dev))
+ return -1;
+ return 0;
+}
+
+static void ddb_device_destroy(struct ddb *dev)
+{
+ ddb_num--;
+ if (IS_ERR(dev->ddb_dev))
+ return;
+ device_destroy(ddb_class, MKDEV(ddb_major, 0));
+}
+
+
+/****************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
+
+static void ddb_unmap(struct ddb *dev)
+{
+ if (dev->regs)
+ iounmap(dev->regs);
+ vfree(dev);
+}
+
+
+static void __devexit ddb_remove(struct pci_dev *pdev)
+{
+ struct ddb *dev = (struct ddb *) pci_get_drvdata(pdev);
+
+ ddb_ports_detach(dev);
+ ddb_i2c_release(dev);
+
+ ddbwritel(0, INTERRUPT_ENABLE);
+ free_irq(dev->pdev->irq, dev);
+#ifdef CONFIG_PCI_MSI
+ if (dev->msi)
+ pci_disable_msi(dev->pdev);
+#endif
+ ddb_ports_release(dev);
+ ddb_buffers_free(dev);
+ ddb_device_destroy(dev);
+
+ ddb_unmap(dev);
+ pci_set_drvdata(pdev, 0);
+ pci_disable_device(pdev);
+}
+
+
+static int __devinit ddb_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct ddb *dev;
+ int stat = 0;
+ int irq_flag = IRQF_SHARED;
+
+ if (pci_enable_device(pdev) < 0)
+ return -ENODEV;
+
+ dev = vmalloc(sizeof(struct ddb));
+ if (dev == NULL)
+ return -ENOMEM;
+ memset(dev, 0, sizeof(struct ddb));
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+ dev->info = (struct ddb_info *) id->driver_data;
+ printk(KERN_INFO "DDBridge driver detected: %s\n", dev->info->name);
+
+ dev->regs = ioremap(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+ if (!dev->regs) {
+ stat = -ENOMEM;
+ goto fail;
+ }
+ printk(KERN_INFO "HW %08x FW %08x\n", ddbreadl(0), ddbreadl(4));
+
+#ifdef CONFIG_PCI_MSI
+ if (pci_msi_enabled())
+ stat = pci_enable_msi(dev->pdev);
+ if (stat) {
+ printk(KERN_INFO ": MSI not available.\n");
+ } else {
+ irq_flag = 0;
+ dev->msi = 1;
+ }
+#endif
+ stat = request_irq(dev->pdev->irq, irq_handler,
+ irq_flag, "DDBridge", (void *) dev);
+ if (stat < 0)
+ goto fail1;
+ ddbwritel(0, DMA_BASE_WRITE);
+ ddbwritel(0, DMA_BASE_READ);
+ ddbwritel(0xffffffff, INTERRUPT_ACK);
+ ddbwritel(0xfff0f, INTERRUPT_ENABLE);
+ ddbwritel(0, MSI1_ENABLE);
+
+ if (ddb_i2c_init(dev) < 0)
+ goto fail1;
+ ddb_ports_init(dev);
+ if (ddb_buffers_alloc(dev) < 0) {
+ printk(KERN_INFO ": Could not allocate buffer memory\n");
+ goto fail2;
+ }
+ if (ddb_ports_attach(dev) < 0)
+ goto fail3;
+ ddb_device_create(dev);
+ return 0;
+
+fail3:
+ ddb_ports_detach(dev);
+ printk(KERN_ERR "fail3\n");
+ ddb_ports_release(dev);
+fail2:
+ printk(KERN_ERR "fail2\n");
+ ddb_buffers_free(dev);
+fail1:
+ printk(KERN_ERR "fail1\n");
+ if (dev->msi)
+ pci_disable_msi(dev->pdev);
+ free_irq(dev->pdev->irq, dev);
+fail:
+ printk(KERN_ERR "fail\n");
+ ddb_unmap(dev);
+ pci_set_drvdata(pdev, 0);
+ pci_disable_device(pdev);
+ return -1;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+/******************************************************************************/
+
+static struct ddb_info ddb_none = {
+ .type = DDB_NONE,
+ .name = "Digital Devices PCIe bridge",
+};
+
+static struct ddb_info ddb_octopus = {
+ .type = DDB_OCTOPUS,
+ .name = "Digital Devices Octopus DVB adapter",
+ .port_num = 4,
+};
+
+static struct ddb_info ddb_octopus_le = {
+ .type = DDB_OCTOPUS,
+ .name = "Digital Devices Octopus LE DVB adapter",
+ .port_num = 2,
+};
+
+static struct ddb_info ddb_v6 = {
+ .type = DDB_OCTOPUS,
+ .name = "Digital Devices Cine S2 V6 DVB adapter",
+ .port_num = 3,
+};
+
+#define DDVID 0xdd01 /* Digital Devices Vendor ID */
+
+#define DDB_ID(_vend, _dev, _subvend, _subdev, _driverdata) { \
+ .vendor = _vend, .device = _dev, \
+ .subvendor = _subvend, .subdevice = _subdev, \
+ .driver_data = (unsigned long)&_driverdata }
+
+static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
+ DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
+ DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
+ DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
+ DDB_ID(DDVID, 0x0003, DDVID, 0x0010, ddb_octopus),
+ DDB_ID(DDVID, 0x0003, DDVID, 0x0020, ddb_v6),
+ /* in case sub-ids got deleted in flash */
+ DDB_ID(DDVID, 0x0003, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, ddb_id_tbl);
+
+
+static struct pci_driver ddb_pci_driver = {
+ .name = "DDBridge",
+ .id_table = ddb_id_tbl,
+ .probe = ddb_probe,
+ .remove = ddb_remove,
+};
+
+static __init int module_init_ddbridge(void)
+{
+ printk(KERN_INFO "Digital Devices PCIE bridge driver, "
+ "Copyright (C) 2010-11 Digital Devices GmbH\n");
+ if (ddb_class_create())
+ return -1;
+ return pci_register_driver(&ddb_pci_driver);
+}
+
+static __exit void module_exit_ddbridge(void)
+{
+ pci_unregister_driver(&ddb_pci_driver);
+ ddb_class_destroy();
+}
+
+module_init(module_init_ddbridge);
+module_exit(module_exit_ddbridge);
+
+MODULE_DESCRIPTION("Digital Devices PCIe Bridge");
+MODULE_AUTHOR("Ralph Metzler");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.5");
diff --git a/drivers/media/dvb/ddbridge/ddbridge-regs.h b/drivers/media/dvb/ddbridge/ddbridge-regs.h
new file mode 100644
index 0000000..a3ccb31
--- /dev/null
+++ b/drivers/media/dvb/ddbridge/ddbridge-regs.h
@@ -0,0 +1,151 @@
+/*
+ * ddbridge-regs.h: Digital Devices PCIe bridge driver
+ *
+ * Copyright (C) 2010-2011 Digital Devices GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ */
+
+/* DD-DVBBridgeV1.h 273 2010-09-17 05:03:16Z manfred */
+
+/* Register Definitions */
+
+#define CUR_REGISTERMAP_VERSION 0x10000
+
+#define HARDWARE_VERSION 0x00
+#define REGISTERMAP_VERSION 0x04
+
+/* ------------------------------------------------------------------------- */
+/* SPI Controller */
+
+#define SPI_CONTROL 0x10
+#define SPI_DATA 0x14
+
+/* ------------------------------------------------------------------------- */
+
+/* Interrupt controller */
+/* How many MSI's are available depends on HW (Min 2 max 8) */
+/* How many are usable also depends on Host platform */
+
+#define INTERRUPT_BASE (0x40)
+
+#define INTERRUPT_ENABLE (INTERRUPT_BASE + 0x00)
+#define MSI0_ENABLE (INTERRUPT_BASE + 0x00)
+#define MSI1_ENABLE (INTERRUPT_BASE + 0x04)
+#define MSI2_ENABLE (INTERRUPT_BASE + 0x08)
+#define MSI3_ENABLE (INTERRUPT_BASE + 0x0C)
+#define MSI4_ENABLE (INTERRUPT_BASE + 0x10)
+#define MSI5_ENABLE (INTERRUPT_BASE + 0x14)
+#define MSI6_ENABLE (INTERRUPT_BASE + 0x18)
+#define MSI7_ENABLE (INTERRUPT_BASE + 0x1C)
+
+#define INTERRUPT_STATUS (INTERRUPT_BASE + 0x20)
+#define INTERRUPT_ACK (INTERRUPT_BASE + 0x20)
+
+#define INTMASK_I2C1 (0x00000001)
+#define INTMASK_I2C2 (0x00000002)
+#define INTMASK_I2C3 (0x00000004)
+#define INTMASK_I2C4 (0x00000008)
+
+#define INTMASK_CIRQ1 (0x00000010)
+#define INTMASK_CIRQ2 (0x00000020)
+#define INTMASK_CIRQ3 (0x00000040)
+#define INTMASK_CIRQ4 (0x00000080)
+
+#define INTMASK_TSINPUT1 (0x00000100)
+#define INTMASK_TSINPUT2 (0x00000200)
+#define INTMASK_TSINPUT3 (0x00000400)
+#define INTMASK_TSINPUT4 (0x00000800)
+#define INTMASK_TSINPUT5 (0x00001000)
+#define INTMASK_TSINPUT6 (0x00002000)
+#define INTMASK_TSINPUT7 (0x00004000)
+#define INTMASK_TSINPUT8 (0x00008000)
+
+#define INTMASK_TSOUTPUT1 (0x00010000)
+#define INTMASK_TSOUTPUT2 (0x00020000)
+#define INTMASK_TSOUTPUT3 (0x00040000)
+#define INTMASK_TSOUTPUT4 (0x00080000)
+
+/* ------------------------------------------------------------------------- */
+/* I2C Master Controller */
+
+#define I2C_BASE (0x80) /* Byte offset */
+
+#define I2C_COMMAND (0x00)
+#define I2C_TIMING (0x04)
+#define I2C_TASKLENGTH (0x08) /* High read, low write */
+#define I2C_TASKADDRESS (0x0C) /* High read, low write */
+
+#define I2C_MONITOR (0x1C)
+
+#define I2C_BASE_1 (I2C_BASE + 0x00)
+#define I2C_BASE_2 (I2C_BASE + 0x20)
+#define I2C_BASE_3 (I2C_BASE + 0x40)
+#define I2C_BASE_4 (I2C_BASE + 0x60)
+
+#define I2C_BASE_N(i) (I2C_BASE + (i) * 0x20)
+
+#define I2C_TASKMEM_BASE (0x1000) /* Byte offset */
+#define I2C_TASKMEM_SIZE (0x1000)
+
+#define I2C_SPEED_400 (0x04030404)
+#define I2C_SPEED_200 (0x09080909)
+#define I2C_SPEED_154 (0x0C0B0C0C)
+#define I2C_SPEED_100 (0x13121313)
+#define I2C_SPEED_77 (0x19181919)
+#define I2C_SPEED_50 (0x27262727)
+
+
+/* ------------------------------------------------------------------------- */
+/* DMA Controller */
+
+#define DMA_BASE_WRITE (0x100)
+#define DMA_BASE_READ (0x140)
+
+#define DMA_CONTROL (0x00) /* 64 */
+#define DMA_ERROR (0x04) /* 65 ( only read instance ) */
+
+#define DMA_DIAG_CONTROL (0x1C) /* 71 */
+#define DMA_DIAG_PACKETCOUNTER_LOW (0x20) /* 72 */
+#define DMA_DIAG_PACKETCOUNTER_HIGH (0x24) /* 73 */
+#define DMA_DIAG_TIMECOUNTER_LOW (0x28) /* 74 */
+#define DMA_DIAG_TIMECOUNTER_HIGH (0x2C) /* 75 */
+#define DMA_DIAG_RECHECKCOUNTER (0x30) /* 76 ( Split completions on read ) */
+#define DMA_DIAG_WAITTIMEOUTINIT (0x34) /* 77 */
+#define DMA_DIAG_WAITOVERFLOWCOUNTER (0x38) /* 78 */
+#define DMA_DIAG_WAITCOUNTER (0x3C) /* 79 */
+
+/* ------------------------------------------------------------------------- */
+/* DMA Buffer */
+
+#define TS_INPUT_BASE (0x200)
+#define TS_INPUT_CONTROL(i) (TS_INPUT_BASE + (i) * 16 + 0x00)
+
+#define TS_OUTPUT_BASE (0x280)
+#define TS_OUTPUT_CONTROL(i) (TS_OUTPUT_BASE + (i) * 16 + 0x00)
+
+#define DMA_BUFFER_BASE (0x300)
+
+#define DMA_BUFFER_CONTROL(i) (DMA_BUFFER_BASE + (i) * 16 + 0x00)
+#define DMA_BUFFER_ACK(i) (DMA_BUFFER_BASE + (i) * 16 + 0x04)
+#define DMA_BUFFER_CURRENT(i) (DMA_BUFFER_BASE + (i) * 16 + 0x08)
+#define DMA_BUFFER_SIZE(i) (DMA_BUFFER_BASE + (i) * 16 + 0x0c)
+
+#define DMA_BASE_ADDRESS_TABLE (0x2000)
+#define DMA_BASE_ADDRESS_TABLE_ENTRIES (512)
+
diff --git a/drivers/media/dvb/ddbridge/ddbridge.h b/drivers/media/dvb/ddbridge/ddbridge.h
new file mode 100644
index 0000000..6d14893
--- /dev/null
+++ b/drivers/media/dvb/ddbridge/ddbridge.h
@@ -0,0 +1,187 @@
+/*
+ * ddbridge.h: Digital Devices PCIe bridge driver
+ *
+ * Copyright (C) 2010-2011 Digital Devices GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ */
+
+#ifndef _DDBRIDGE_H_
+#define _DDBRIDGE_H_
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <asm/dma.h>
+#include <linux/dvb/frontend.h>
+#include <linux/dvb/ca.h>
+#include <linux/dvb/video.h>
+#include <linux/dvb/audio.h>
+#include <linux/socket.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_ringbuffer.h"
+#include "dvb_ca_en50221.h"
+#include "dvb_net.h"
+#include "cxd2099.h"
+
+#define DDB_MAX_I2C 4
+#define DDB_MAX_PORT 4
+#define DDB_MAX_INPUT 8
+#define DDB_MAX_OUTPUT 4
+
+struct ddb_info {
+ int type;
+#define DDB_NONE 0
+#define DDB_OCTOPUS 1
+ char *name;
+ int port_num;
+ u32 port_type[DDB_MAX_PORT];
+};
+
+/* DMA_SIZE MUST be divisible by 188 and 128 !!! */
+
+#define INPUT_DMA_MAX_BUFS 32 /* hardware table limit */
+#define INPUT_DMA_BUFS 8
+#define INPUT_DMA_SIZE (128*47*21)
+
+#define OUTPUT_DMA_MAX_BUFS 32
+#define OUTPUT_DMA_BUFS 8
+#define OUTPUT_DMA_SIZE (128*47*21)
+
+struct ddb;
+struct ddb_port;
+
+struct ddb_input {
+ struct ddb_port *port;
+ u32 nr;
+ int attached;
+
+ dma_addr_t pbuf[INPUT_DMA_MAX_BUFS];
+ u8 *vbuf[INPUT_DMA_MAX_BUFS];
+ u32 dma_buf_num;
+ u32 dma_buf_size;
+
+ struct tasklet_struct tasklet;
+ spinlock_t lock;
+ wait_queue_head_t wq;
+ int running;
+ u32 stat;
+ u32 cbuf;
+ u32 coff;
+
+ struct dvb_adapter adap;
+ struct dvb_device *dev;
+ struct dvb_frontend *fe;
+ struct dvb_frontend *fe2;
+ struct dmxdev dmxdev;
+ struct dvb_demux demux;
+ struct dvb_net dvbnet;
+ struct dmx_frontend hw_frontend;
+ struct dmx_frontend mem_frontend;
+ int users;
+ int (*gate_ctrl)(struct dvb_frontend *, int);
+};
+
+struct ddb_output {
+ struct ddb_port *port;
+ u32 nr;
+ dma_addr_t pbuf[OUTPUT_DMA_MAX_BUFS];
+ u8 *vbuf[OUTPUT_DMA_MAX_BUFS];
+ u32 dma_buf_num;
+ u32 dma_buf_size;
+ struct tasklet_struct tasklet;
+ spinlock_t lock;
+ wait_queue_head_t wq;
+ int running;
+ u32 stat;
+ u32 cbuf;
+ u32 coff;
+
+ struct dvb_adapter adap;
+ struct dvb_device *dev;
+};
+
+struct ddb_i2c {
+ struct ddb *dev;
+ u32 nr;
+ struct i2c_adapter adap;
+ struct i2c_adapter adap2;
+ u32 regs;
+ u32 rbuf;
+ u32 wbuf;
+ int done;
+ wait_queue_head_t wq;
+};
+
+struct ddb_port {
+ struct ddb *dev;
+ u32 nr;
+ struct ddb_i2c *i2c;
+ struct mutex i2c_gate_lock;
+ u32 class;
+#define DDB_PORT_NONE 0
+#define DDB_PORT_CI 1
+#define DDB_PORT_TUNER 2
+ u32 type;
+#define DDB_TUNER_NONE 0
+#define DDB_TUNER_DVBS_ST 1
+#define DDB_TUNER_DVBS_ST_AA 2
+#define DDB_TUNER_DVBCT_TR 16
+#define DDB_TUNER_DVBCT_ST 17
+ u32 adr;
+
+ struct ddb_input *input[2];
+ struct ddb_output *output;
+ struct dvb_ca_en50221 *en;
+};
+
+struct ddb {
+ struct pci_dev *pdev;
+ unsigned char *regs;
+ struct ddb_port port[DDB_MAX_PORT];
+ struct ddb_i2c i2c[DDB_MAX_I2C];
+ struct ddb_input input[DDB_MAX_INPUT];
+ struct ddb_output output[DDB_MAX_OUTPUT];
+
+ struct device *ddb_dev;
+ int nr;
+ u8 iobuf[1028];
+
+ struct ddb_info *info;
+ int msi;
+};
+
+/****************************************************************************/
+
+#define ddbwritel(_val, _adr) writel((_val), \
+ (char *) (dev->regs+(_adr)))
+#define ddbreadl(_adr) readl((char *) (dev->regs+(_adr)))
+#define ddbcpyto(_adr, _src, _count) memcpy_toio((char *) \
+ (dev->regs+(_adr)), (_src), (_count))
+#define ddbcpyfrom(_dst, _adr, _count) memcpy_fromio((_dst), (char *) \
+ (dev->regs+(_adr)), (_count))
+
+/****************************************************************************/
+
+#endif
diff --git a/drivers/media/dvb/dvb-core/Makefile b/drivers/media/dvb/dvb-core/Makefile
index 0b51828..8f22bcd 100644
--- a/drivers/media/dvb/dvb-core/Makefile
+++ b/drivers/media/dvb/dvb-core/Makefile
@@ -2,8 +2,10 @@
# Makefile for the kernel DVB device drivers.
#
+dvb-net-$(CONFIG_DVB_NET) := dvb_net.o
+
dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o dvb_filter.o \
dvb_ca_en50221.o dvb_frontend.o \
- dvb_net.o dvb_ringbuffer.o dvb_math.o
+ $(dvb-net-y) dvb_ringbuffer.o dvb_math.o
obj-$(CONFIG_DVB_CORE) += dvb-core.o
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 5b6b451..efe9c30 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -904,7 +904,7 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe)
.buffer = b \
}
-static struct dtv_cmds_h dtv_cmds[] = {
+static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_TUNE, 1, 0),
_DTV_CMD(DTV_CLEAR, 1, 0),
@@ -966,6 +966,7 @@ static struct dtv_cmds_h dtv_cmds[] = {
_DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 0, 0),
_DTV_CMD(DTV_ISDBS_TS_ID, 1, 0),
+ _DTV_CMD(DTV_DVBT2_PLP_ID, 1, 0),
/* Get */
_DTV_CMD(DTV_DISEQC_SLAVE_REPLY, 0, 1),
diff --git a/drivers/media/dvb/dvb-core/dvb_net.h b/drivers/media/dvb/dvb-core/dvb_net.h
index 3a3126ca..1e53acd 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.h
+++ b/drivers/media/dvb/dvb-core/dvb_net.h
@@ -32,6 +32,8 @@
#define DVB_NET_DEVICES_MAX 10
+#ifdef CONFIG_DVB_NET
+
struct dvb_net {
struct dvb_device *dvbdev;
struct net_device *device[DVB_NET_DEVICES_MAX];
@@ -40,8 +42,25 @@ struct dvb_net {
struct dmx_demux *demux;
};
-
void dvb_net_release(struct dvb_net *);
int dvb_net_init(struct dvb_adapter *, struct dvb_net *, struct dmx_demux *);
+#else
+
+struct dvb_net {
+ struct dvb_device *dvbdev;
+};
+
+static inline void dvb_net_release(struct dvb_net *dvbnet)
+{
+}
+
+static inline int dvb_net_init(struct dvb_adapter *adap,
+ struct dvb_net *dvbnet, struct dmx_demux *dmx)
+{
+ return 0;
+}
+
+#endif /* ifdef CONFIG_DVB_NET */
+
#endif
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index e85304c..5d73dec 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -81,6 +81,7 @@ config DVB_USB_DIB0700
select MEDIA_TUNER_MT2266 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_XC5000 if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_XC4000 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_MXL5007T if !MEDIA_TUNER_CUSTOMISE
help
Support for USB2.0/1.1 DVB receivers based on the DiB0700 USB bridge. The
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c
index 100ebc3..d7ad05f 100644
--- a/drivers/media/dvb/dvb-usb/af9015.c
+++ b/drivers/media/dvb/dvb-usb/af9015.c
@@ -91,7 +91,6 @@ static int af9015_rw_udev(struct usb_device *udev, struct req_t *req)
case GET_CONFIG:
case READ_MEMORY:
case RECONNECT_USB:
- case GET_IR_CODE:
write = 0;
break;
case READ_I2C:
@@ -164,13 +163,6 @@ static int af9015_rw_udev(struct usb_device *udev, struct req_t *req)
deb_xfer("<<< ");
debug_dump(buf, act_len, deb_xfer);
- /* remote controller query status is 1 if remote code is not received */
- if (req->cmd == GET_IR_CODE && buf[1] == 1) {
- buf[1] = 0; /* clear command "error" status */
- memset(&buf[2], 0, req->data_len);
- buf[3] = 1; /* no remote code received mark */
- }
-
/* check status */
if (buf[1]) {
err("command failed:%d", buf[1]);
@@ -292,6 +284,10 @@ Due to that the only way to select correct tuner is use demodulator I2C-gate.
}
if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
+ if (msg[i].len > 3 || msg[i+1].len > 61) {
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
if (msg[i].addr ==
af9015_af9013_config[0].demod_address)
req.cmd = READ_MEMORY;
@@ -306,12 +302,16 @@ Due to that the only way to select correct tuner is use demodulator I2C-gate.
ret = af9015_ctrl_msg(d, &req);
i += 2;
} else if (msg[i].flags & I2C_M_RD) {
- ret = -EINVAL;
+ if (msg[i].len > 61) {
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
if (msg[i].addr ==
- af9015_af9013_config[0].demod_address)
+ af9015_af9013_config[0].demod_address) {
+ ret = -EINVAL;
goto error;
- else
- req.cmd = READ_I2C;
+ }
+ req.cmd = READ_I2C;
req.i2c_addr = msg[i].addr;
req.addr = addr;
req.mbox = mbox;
@@ -321,6 +321,10 @@ Due to that the only way to select correct tuner is use demodulator I2C-gate.
ret = af9015_ctrl_msg(d, &req);
i += 1;
} else {
+ if (msg[i].len > 21) {
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
if (msg[i].addr ==
af9015_af9013_config[0].demod_address)
req.cmd = WRITE_MEMORY;
@@ -735,6 +739,7 @@ static const struct af9015_rc_setup af9015_rc_setup_hashes[] = {
{ 0xb8feb708, RC_MAP_MSI_DIGIVOX_II },
{ 0xa3703d00, RC_MAP_ALINK_DTU_M },
{ 0x9b7dc64e, RC_MAP_TOTAL_MEDIA_IN_HAND }, /* MYGICTV U718 */
+ { 0x5d49e3db, RC_MAP_DIGITTRADE }, /* LC-Power LC-USB-DVBT */
{ }
};
@@ -749,6 +754,8 @@ static const struct af9015_rc_setup af9015_rc_setup_usbids[] = {
RC_MAP_AZUREWAVE_AD_TU700 },
{ (USB_VID_MSI_2 << 16) + USB_PID_MSI_DIGI_VOX_MINI_III,
RC_MAP_MSI_DIGIVOX_III },
+ { (USB_VID_MSI_2 << 16) + USB_PID_MSI_DIGIVOX_DUO,
+ RC_MAP_MSI_DIGIVOX_III },
{ (USB_VID_LEADTEK << 16) + USB_PID_WINFAST_DTV_DONGLE_GOLD,
RC_MAP_LEADTEK_Y04G0051 },
{ (USB_VID_AVERMEDIA << 16) + USB_PID_AVERMEDIA_VOLAR_X,
@@ -759,6 +766,8 @@ static const struct af9015_rc_setup af9015_rc_setup_usbids[] = {
RC_MAP_DIGITALNOW_TINYTWIN },
{ (USB_VID_GTEK << 16) + USB_PID_TINYTWIN_3,
RC_MAP_DIGITALNOW_TINYTWIN },
+ { (USB_VID_KWORLD_2 << 16) + USB_PID_SVEON_STV22,
+ RC_MAP_MSI_DIGIVOX_III },
{ }
};
@@ -1082,44 +1091,11 @@ error:
return ret;
}
-/* init 2nd I2C adapter */
-static int af9015_i2c_init(struct dvb_usb_device *d)
-{
- int ret;
- struct af9015_state *state = d->priv;
- deb_info("%s:\n", __func__);
-
- strncpy(state->i2c_adap.name, d->desc->name,
- sizeof(state->i2c_adap.name));
- state->i2c_adap.algo = d->props.i2c_algo;
- state->i2c_adap.algo_data = NULL;
- state->i2c_adap.dev.parent = &d->udev->dev;
-
- i2c_set_adapdata(&state->i2c_adap, d);
-
- ret = i2c_add_adapter(&state->i2c_adap);
- if (ret < 0)
- err("could not add i2c adapter");
-
- return ret;
-}
-
static int af9015_af9013_frontend_attach(struct dvb_usb_adapter *adap)
{
int ret;
- struct af9015_state *state = adap->dev->priv;
- struct i2c_adapter *i2c_adap;
-
- if (adap->id == 0) {
- /* select I2C adapter */
- i2c_adap = &adap->dev->i2c_adap;
-
- deb_info("%s: init I2C\n", __func__);
- ret = af9015_i2c_init(adap->dev);
- } else {
- /* select I2C adapter */
- i2c_adap = &state->i2c_adap;
+ if (adap->id == 1) {
/* copy firmware to 2nd demodulator */
if (af9015_config.dual_mode) {
ret = af9015_copy_firmware(adap->dev);
@@ -1136,7 +1112,7 @@ static int af9015_af9013_frontend_attach(struct dvb_usb_adapter *adap)
/* attach demodulator */
adap->fe = dvb_attach(af9013_attach, &af9015_af9013_config[adap->id],
- i2c_adap);
+ &adap->dev->i2c_adap);
return adap->fe == NULL ? -ENODEV : 0;
}
@@ -1206,57 +1182,56 @@ static struct mxl5007t_config af9015_mxl5007t_config = {
static int af9015_tuner_attach(struct dvb_usb_adapter *adap)
{
- struct af9015_state *state = adap->dev->priv;
- struct i2c_adapter *i2c_adap;
int ret;
deb_info("%s:\n", __func__);
- /* select I2C adapter */
- if (adap->id == 0)
- i2c_adap = &adap->dev->i2c_adap;
- else
- i2c_adap = &state->i2c_adap;
-
switch (af9015_af9013_config[adap->id].tuner) {
case AF9013_TUNER_MT2060:
case AF9013_TUNER_MT2060_2:
- ret = dvb_attach(mt2060_attach, adap->fe, i2c_adap,
+ ret = dvb_attach(mt2060_attach, adap->fe, &adap->dev->i2c_adap,
&af9015_mt2060_config,
af9015_config.mt2060_if1[adap->id])
== NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_QT1010:
case AF9013_TUNER_QT1010A:
- ret = dvb_attach(qt1010_attach, adap->fe, i2c_adap,
+ ret = dvb_attach(qt1010_attach, adap->fe, &adap->dev->i2c_adap,
&af9015_qt1010_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_TDA18271:
- ret = dvb_attach(tda18271_attach, adap->fe, 0xc0, i2c_adap,
+ ret = dvb_attach(tda18271_attach, adap->fe, 0xc0,
+ &adap->dev->i2c_adap,
&af9015_tda18271_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_TDA18218:
- ret = dvb_attach(tda18218_attach, adap->fe, i2c_adap,
+ ret = dvb_attach(tda18218_attach, adap->fe,
+ &adap->dev->i2c_adap,
&af9015_tda18218_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_MXL5003D:
- ret = dvb_attach(mxl5005s_attach, adap->fe, i2c_adap,
+ ret = dvb_attach(mxl5005s_attach, adap->fe,
+ &adap->dev->i2c_adap,
&af9015_mxl5003_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_MXL5005D:
case AF9013_TUNER_MXL5005R:
- ret = dvb_attach(mxl5005s_attach, adap->fe, i2c_adap,
+ ret = dvb_attach(mxl5005s_attach, adap->fe,
+ &adap->dev->i2c_adap,
&af9015_mxl5005_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_ENV77H11D5:
- ret = dvb_attach(dvb_pll_attach, adap->fe, 0xc0, i2c_adap,
+ ret = dvb_attach(dvb_pll_attach, adap->fe, 0xc0,
+ &adap->dev->i2c_adap,
DVB_PLL_TDA665X) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_MC44S803:
- ret = dvb_attach(mc44s803_attach, adap->fe, i2c_adap,
+ ret = dvb_attach(mc44s803_attach, adap->fe,
+ &adap->dev->i2c_adap,
&af9015_mc44s803_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_MXL5007T:
- ret = dvb_attach(mxl5007t_attach, adap->fe, i2c_adap,
+ ret = dvb_attach(mxl5007t_attach, adap->fe,
+ &adap->dev->i2c_adap,
0xc0, &af9015_mxl5007t_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_UNKNOWN:
@@ -1309,6 +1284,7 @@ static struct usb_device_id af9015_usb_table[] = {
USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC)},
/* 35 */{USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850T)},
{USB_DEVICE(USB_VID_GTEK, USB_PID_TINYTWIN_3)},
+ {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV22)},
{0},
};
MODULE_DEVICE_TABLE(usb, af9015_usb_table);
@@ -1502,7 +1478,7 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.i2c_algo = &af9015_i2c_algo,
- .num_device_descs = 9, /* check max from dvb-usb.h */
+ .num_device_descs = 10, /* check max from dvb-usb.h */
.devices = {
{
.name = "Xtensions XD-380",
@@ -1554,6 +1530,11 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.cold_ids = {&af9015_usb_table[20], NULL},
.warm_ids = {NULL},
},
+ {
+ .name = "Sveon STV22 Dual USB DVB-T Tuner HDTV",
+ .cold_ids = {&af9015_usb_table[37], NULL},
+ .warm_ids = {NULL},
+ },
}
}, {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
@@ -1704,33 +1685,11 @@ static int af9015_usb_probe(struct usb_interface *intf,
return ret;
}
-static void af9015_i2c_exit(struct dvb_usb_device *d)
-{
- struct af9015_state *state = d->priv;
- deb_info("%s:\n", __func__);
-
- /* remove 2nd I2C adapter */
- if (d->state & DVB_USB_STATE_I2C)
- i2c_del_adapter(&state->i2c_adap);
-}
-
-static void af9015_usb_device_exit(struct usb_interface *intf)
-{
- struct dvb_usb_device *d = usb_get_intfdata(intf);
- deb_info("%s:\n", __func__);
-
- /* remove 2nd I2C adapter */
- if (d != NULL && d->desc != NULL)
- af9015_i2c_exit(d);
-
- dvb_usb_device_exit(intf);
-}
-
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver af9015_usb_driver = {
.name = "dvb_usb_af9015",
.probe = af9015_usb_probe,
- .disconnect = af9015_usb_device_exit,
+ .disconnect = dvb_usb_device_exit,
.id_table = af9015_usb_table,
};
diff --git a/drivers/media/dvb/dvb-usb/af9015.h b/drivers/media/dvb/dvb-usb/af9015.h
index beb3004..6252ea6 100644
--- a/drivers/media/dvb/dvb-usb/af9015.h
+++ b/drivers/media/dvb/dvb-usb/af9015.h
@@ -99,7 +99,6 @@ enum af9015_ir_mode {
};
struct af9015_state {
- struct i2c_adapter i2c_adap; /* I2C adapter for 2nd FE */
u8 rc_repeat;
u32 rc_keycode;
u8 rc_last[4];
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index 7c327b5..2cbf19a 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -347,15 +347,17 @@ static struct isl6423_config anysee_isl6423_config = {
* PCB: ?
* parts: DNOS404ZH102A(MT352, DTT7579(?))
*
- * E30 VID=04b4 PID=861f HW=2 FW=2.1 Product=????????
- * PCB: ?
+ * E30 VID=04b4 PID=861f HW=2 FW=2.1 "anysee-T(LP)"
+ * PCB: PCB 507T (rev1.61)
* parts: DNOS404ZH103A(ZL10353, DTT7579(?))
+ * OEA=0a OEB=00 OEC=00 OED=ff OEE=00
+ * IOA=45 IOB=ff IOC=00 IOD=ff IOE=00
*
* E30 Plus VID=04b4 PID=861f HW=6 FW=1.0 "anysee"
* PCB: 507CD (rev1.1)
* parts: DNOS404ZH103A(ZL10353, DTT7579(?)), CST56I01
- * OEA=80 OEB=00 OEC=00 OED=ff OEF=fe
- * IOA=4f IOB=ff IOC=00 IOD=06 IOF=01
+ * OEA=80 OEB=00 OEC=00 OED=ff OEE=fe
+ * IOA=4f IOB=ff IOC=00 IOD=06 IOE=01
* IOD[0] ZL10353 1=enabled
* IOA[7] TS 0=enabled
* tuner is not behind ZL10353 I2C-gate (no care if gate disabled or not)
@@ -363,30 +365,30 @@ static struct isl6423_config anysee_isl6423_config = {
* E30 C Plus VID=04b4 PID=861f HW=10 FW=1.0 "anysee-DC(LP)"
* PCB: 507DC (rev0.2)
* parts: TDA10023, DTOS403IH102B TM, CST56I01
- * OEA=80 OEB=00 OEC=00 OED=ff OEF=fe
- * IOA=4f IOB=ff IOC=00 IOD=26 IOF=01
+ * OEA=80 OEB=00 OEC=00 OED=ff OEE=fe
+ * IOA=4f IOB=ff IOC=00 IOD=26 IOE=01
* IOD[0] TDA10023 1=enabled
*
* E30 S2 Plus VID=04b4 PID=861f HW=11 FW=0.1 "anysee-S2(LP)"
* PCB: 507SI (rev2.1)
* parts: BS2N10WCC01(CX24116, CX24118), ISL6423, TDA8024
- * OEA=80 OEB=00 OEC=ff OED=ff OEF=fe
- * IOA=4d IOB=ff IOC=00 IOD=26 IOF=01
+ * OEA=80 OEB=00 OEC=ff OED=ff OEE=fe
+ * IOA=4d IOB=ff IOC=00 IOD=26 IOE=01
* IOD[0] CX24116 1=enabled
*
* E30 C Plus VID=1c73 PID=861f HW=15 FW=1.2 "anysee-FA(LP)"
* PCB: 507FA (rev0.4)
* parts: TDA10023, DTOS403IH102B TM, TDA8024
- * OEA=80 OEB=00 OEC=ff OED=ff OEF=ff
- * IOA=4d IOB=ff IOC=00 IOD=00 IOF=c0
+ * OEA=80 OEB=00 OEC=ff OED=ff OEE=ff
+ * IOA=4d IOB=ff IOC=00 IOD=00 IOE=c0
* IOD[5] TDA10023 1=enabled
* IOE[0] tuner 1=enabled
*
* E30 Combo Plus VID=1c73 PID=861f HW=15 FW=1.2 "anysee-FA(LP)"
* PCB: 507FA (rev1.1)
* parts: ZL10353, TDA10023, DTOS403IH102B TM, TDA8024
- * OEA=80 OEB=00 OEC=ff OED=ff OEF=ff
- * IOA=4d IOB=ff IOC=00 IOD=00 IOF=c0
+ * OEA=80 OEB=00 OEC=ff OED=ff OEE=ff
+ * IOA=4d IOB=ff IOC=00 IOD=00 IOE=c0
* DVB-C:
* IOD[5] TDA10023 1=enabled
* IOE[0] tuner 1=enabled
@@ -398,8 +400,8 @@ static struct isl6423_config anysee_isl6423_config = {
* E7 TC VID=1c73 PID=861f HW=18 FW=0.7 AMTCI=0.5 "anysee-E7TC(LP)"
* PCB: 508TC (rev0.6)
* parts: ZL10353, TDA10023, DNOD44CDH086A(TDA18212)
- * OEA=80 OEB=00 OEC=03 OED=f7 OEF=ff
- * IOA=4d IOB=00 IOC=cc IOD=48 IOF=e4
+ * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff
+ * IOA=4d IOB=00 IOC=cc IOD=48 IOE=e4
* IOA[7] TS 1=enabled
* IOE[4] TDA18212 1=enabled
* DVB-C:
@@ -414,11 +416,34 @@ static struct isl6423_config anysee_isl6423_config = {
* E7 S2 VID=1c73 PID=861f HW=19 FW=0.4 AMTCI=0.5 "anysee-E7S2(LP)"
* PCB: 508S2 (rev0.7)
* parts: DNBU10512IST(STV0903, STV6110), ISL6423
- * OEA=80 OEB=00 OEC=03 OED=f7 OEF=ff
- * IOA=4d IOB=00 IOC=c4 IOD=08 IOF=e4
+ * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff
+ * IOA=4d IOB=00 IOC=c4 IOD=08 IOE=e4
* IOA[7] TS 1=enabled
* IOE[5] STV0903 1=enabled
*
+ * E7 PTC VID=1c73 PID=861f HW=21 FW=0.1 AMTCI=?? "anysee-E7PTC(LP)"
+ * PCB: 508PTC (rev0.5)
+ * parts: ZL10353, TDA10023, DNOD44CDH086A(TDA18212)
+ * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff
+ * IOA=4d IOB=00 IOC=cc IOD=48 IOE=e4
+ * IOA[7] TS 1=enabled
+ * IOE[4] TDA18212 1=enabled
+ * DVB-C:
+ * IOD[6] ZL10353 0=disabled
+ * IOD[5] TDA10023 1=enabled
+ * IOE[0] IF 1=enabled
+ * DVB-T:
+ * IOD[5] TDA10023 0=disabled
+ * IOD[6] ZL10353 1=enabled
+ * IOE[0] IF 0=enabled
+ *
+ * E7 S2 VID=1c73 PID=861f HW=22 FW=0.1 AMTCI=?? "anysee-E7PS2(LP)"
+ * PCB: 508PS2 (rev0.4)
+ * parts: DNBU10512IST(STV0903, STV6110), ISL6423
+ * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff
+ * IOA=4d IOB=00 IOC=c4 IOD=08 IOE=e4
+ * IOA[7] TS 1=enabled
+ * IOE[5] STV0903 1=enabled
*/
static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
@@ -459,7 +484,7 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
state->hw = hw_info[0];
switch (state->hw) {
- case ANYSEE_HW_02: /* 2 */
+ case ANYSEE_HW_507T: /* 2 */
/* E30 */
/* attach demod */
@@ -593,7 +618,9 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
break;
case ANYSEE_HW_508TC: /* 18 */
+ case ANYSEE_HW_508PTC: /* 21 */
/* E7 TC */
+ /* E7 PTC */
/* enable transport stream on IOA[7] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOA, (1 << 7), 0x80);
@@ -650,7 +677,9 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
break;
case ANYSEE_HW_508S2: /* 19 */
+ case ANYSEE_HW_508PS2: /* 22 */
/* E7 S2 */
+ /* E7 PS2 */
/* enable transport stream on IOA[7] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOA, (1 << 7), 0x80);
@@ -687,7 +716,7 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
deb_info("%s:\n", __func__);
switch (state->hw) {
- case ANYSEE_HW_02: /* 2 */
+ case ANYSEE_HW_507T: /* 2 */
/* E30 */
/* attach tuner */
@@ -762,7 +791,9 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
break;
case ANYSEE_HW_508TC: /* 18 */
+ case ANYSEE_HW_508PTC: /* 21 */
/* E7 TC */
+ /* E7 PTC */
/* enable tuner on IOE[4] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 4), 0x10);
@@ -775,7 +806,9 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
break;
case ANYSEE_HW_508S2: /* 19 */
+ case ANYSEE_HW_508PS2: /* 22 */
/* E7 S2 */
+ /* E7 PS2 */
/* attach tuner */
fe = dvb_attach(stv6110_attach, adap->fe,
diff --git a/drivers/media/dvb/dvb-usb/anysee.h b/drivers/media/dvb/dvb-usb/anysee.h
index a7673aa..ad6ccd1 100644
--- a/drivers/media/dvb/dvb-usb/anysee.h
+++ b/drivers/media/dvb/dvb-usb/anysee.h
@@ -61,13 +61,15 @@ struct anysee_state {
u8 seq;
};
-#define ANYSEE_HW_02 2 /* E30 */
-#define ANYSEE_HW_507CD 6 /* E30 Plus */
-#define ANYSEE_HW_507DC 10 /* E30 C Plus */
-#define ANYSEE_HW_507SI 11 /* E30 S2 Plus */
-#define ANYSEE_HW_507FA 15 /* E30 Combo Plus / E30 C Plus */
-#define ANYSEE_HW_508TC 18 /* E7 TC */
-#define ANYSEE_HW_508S2 19 /* E7 S2 */
+#define ANYSEE_HW_507T 2 /* E30 */
+#define ANYSEE_HW_507CD 6 /* E30 Plus */
+#define ANYSEE_HW_507DC 10 /* E30 C Plus */
+#define ANYSEE_HW_507SI 11 /* E30 S2 Plus */
+#define ANYSEE_HW_507FA 15 /* E30 Combo Plus / E30 C Plus */
+#define ANYSEE_HW_508TC 18 /* E7 TC */
+#define ANYSEE_HW_508S2 19 /* E7 S2 */
+#define ANYSEE_HW_508PTC 21 /* E7 PTC Plus */
+#define ANYSEE_HW_508PS2 22 /* E7 PS2 Plus */
#define REG_IOA 0x80 /* Port A (bit addressable) */
#define REG_IOB 0x90 /* Port B (bit addressable) */
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index c519ad5..d0ea5b6 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -17,6 +17,7 @@
#include "mt2266.h"
#include "tuner-xc2028.h"
#include "xc5000.h"
+#include "xc4000.h"
#include "s5h1411.h"
#include "dib0070.h"
#include "dib0090.h"
@@ -2655,6 +2656,156 @@ static int xc5000_tuner_attach(struct dvb_usb_adapter *adap)
== NULL ? -ENODEV : 0;
}
+static int dib0700_xc4000_tuner_callback(void *priv, int component,
+ int command, int arg)
+{
+ struct dvb_usb_adapter *adap = priv;
+
+ if (command == XC4000_TUNER_RESET) {
+ /* Reset the tuner */
+ dib7000p_set_gpio(adap->fe, 8, 0, 0);
+ msleep(10);
+ dib7000p_set_gpio(adap->fe, 8, 0, 1);
+ } else {
+ err("xc4000: unknown tuner callback command: %d\n", command);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct dibx000_agc_config stk7700p_7000p_xc4000_agc_config = {
+ .band_caps = BAND_UHF | BAND_VHF,
+ .setup = 0x64,
+ .inv_gain = 0x02c8,
+ .time_stabiliz = 0x15,
+ .alpha_level = 0x00,
+ .thlock = 0x76,
+ .wbd_inv = 0x01,
+ .wbd_ref = 0x0b33,
+ .wbd_sel = 0x00,
+ .wbd_alpha = 0x02,
+ .agc1_max = 0x00,
+ .agc1_min = 0x00,
+ .agc2_max = 0x9b26,
+ .agc2_min = 0x26ca,
+ .agc1_pt1 = 0x00,
+ .agc1_pt2 = 0x00,
+ .agc1_pt3 = 0x00,
+ .agc1_slope1 = 0x00,
+ .agc1_slope2 = 0x00,
+ .agc2_pt1 = 0x00,
+ .agc2_pt2 = 0x80,
+ .agc2_slope1 = 0x1d,
+ .agc2_slope2 = 0x1d,
+ .alpha_mant = 0x11,
+ .alpha_exp = 0x1b,
+ .beta_mant = 0x17,
+ .beta_exp = 0x33,
+ .perform_agc_softsplit = 0x00,
+};
+
+static struct dibx000_bandwidth_config stk7700p_xc4000_pll_config = {
+ 60000, 30000, /* internal, sampling */
+ 1, 8, 3, 1, 0, /* pll_cfg: prediv, ratio, range, reset, bypass */
+ 0, 0, 1, 1, 0, /* misc: refdiv, bypclk_div, IO_CLK_en_core, */
+ /* ADClkSrc, modulo */
+ (3 << 14) | (1 << 12) | 524, /* sad_cfg: refsel, sel, freq_15k */
+ 39370534, /* ifreq */
+ 20452225, /* timf */
+ 30000000 /* xtal */
+};
+
+/* FIXME: none of these inputs are validated yet */
+static struct dib7000p_config pctv_340e_config = {
+ .output_mpeg2_in_188_bytes = 1,
+
+ .agc_config_count = 1,
+ .agc = &stk7700p_7000p_xc4000_agc_config,
+ .bw = &stk7700p_xc4000_pll_config,
+
+ .gpio_dir = DIB7000M_GPIO_DEFAULT_DIRECTIONS,
+ .gpio_val = DIB7000M_GPIO_DEFAULT_VALUES,
+ .gpio_pwm_pos = DIB7000M_GPIO_DEFAULT_PWM_POS,
+};
+
+/* PCTV 340e GPIOs map:
+ dib0700:
+ GPIO2 - CX25843 sleep
+ GPIO3 - CS5340 reset
+ GPIO5 - IRD
+ GPIO6 - Power Supply
+ GPIO8 - LNA (1=off 0=on)
+ GPIO10 - CX25843 reset
+ dib7000:
+ GPIO8 - xc4000 reset
+ */
+static int pctv340e_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct dib0700_state *st = adap->dev->priv;
+
+ /* Power Supply on */
+ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0);
+ msleep(50);
+ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
+ msleep(100); /* Allow power supply to settle before probing */
+
+ /* cx25843 reset */
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
+ msleep(1); /* cx25843 datasheet say 350us required */
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
+
+ /* LNA off for now */
+ dib0700_set_gpio(adap->dev, GPIO8, GPIO_OUT, 1);
+
+ /* Put the CX25843 to sleep for now since we're in digital mode */
+ dib0700_set_gpio(adap->dev, GPIO2, GPIO_OUT, 1);
+
+ /* FIXME: not verified yet */
+ dib0700_ctrl_clock(adap->dev, 72, 1);
+
+ msleep(500);
+
+ if (dib7000pc_detection(&adap->dev->i2c_adap) == 0) {
+ /* Demodulator not found for some reason? */
+ return -ENODEV;
+ }
+
+ adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x12,
+ &pctv_340e_config);
+ st->is_dib7000pc = 1;
+
+ return adap->fe == NULL ? -ENODEV : 0;
+}
+
+static struct xc4000_config dib7000p_xc4000_tunerconfig = {
+ .i2c_address = 0x61,
+ .default_pm = 1,
+ .dvb_amplitude = 0,
+ .set_smoothedcvbs = 0,
+ .if_khz = 5400
+};
+
+static int xc4000_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ struct i2c_adapter *tun_i2c;
+
+ /* The xc4000 is not on the main i2c bus */
+ tun_i2c = dib7000p_get_i2c_master(adap->fe,
+ DIBX000_I2C_INTERFACE_TUNER, 1);
+ if (tun_i2c == NULL) {
+ printk(KERN_ERR "Could not reach tuner i2c bus\n");
+ return 0;
+ }
+
+ /* Setup the reset callback */
+ adap->fe->callback = dib0700_xc4000_tuner_callback;
+
+ return dvb_attach(xc4000_attach, adap->fe, tun_i2c,
+ &dib7000p_xc4000_tunerconfig)
+ == NULL ? -ENODEV : 0;
+}
+
static struct lgdt3305_config hcw_lgdt3305_config = {
.i2c_addr = 0x0e,
.mpeg_mode = LGDT3305_MPEG_PARALLEL,
@@ -2802,6 +2953,8 @@ struct usb_device_id dib0700_usb_id_table[] = {
{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_TFE7090PVR) },
{ USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_AIRSTAR_TELESTICK_2) },
/* 75 */{ USB_DEVICE(USB_VID_MEDION, USB_PID_CREATIX_CTX1921) },
+ { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV340E) },
+ { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV340E_SE) },
{ 0 } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -3772,6 +3925,41 @@ struct dvb_usb_device_properties dib0700_devices[] = {
RC_TYPE_NEC,
.change_protocol = dib0700_change_protocol,
},
+ }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .frontend_attach = pctv340e_frontend_attach,
+ .tuner_attach = xc4000_tuner_attach,
+
+ DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+
+ .size_of_priv = sizeof(struct
+ dib0700_adapter_state),
+ },
+ },
+
+ .num_device_descs = 2,
+ .devices = {
+ { "Pinnacle PCTV 340e HD Pro USB Stick",
+ { &dib0700_usb_id_table[76], NULL },
+ { NULL },
+ },
+ { "Pinnacle PCTV Hybrid Stick Solo",
+ { &dib0700_usb_id_table[77], NULL },
+ { NULL },
+ },
+ },
+ .rc.core = {
+ .rc_interval = DEFAULT_RC_INTERVAL,
+ .rc_codes = RC_MAP_DIB0700_RC5_TABLE,
+ .module_name = "dib0700",
+ .rc_query = dib0700_rc_query_old_firmware,
+ .allowed_protos = RC_TYPE_RC5 |
+ RC_TYPE_RC6 |
+ RC_TYPE_NEC,
+ .change_protocol = dib0700_change_protocol,
+ },
},
};
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 21b1549..2a79b8f 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -230,6 +230,8 @@
#define USB_PID_PINNACLE_PCTV310E 0x3211
#define USB_PID_PINNACLE_PCTV801E 0x023a
#define USB_PID_PINNACLE_PCTV801E_SE 0x023b
+#define USB_PID_PINNACLE_PCTV340E 0x023d
+#define USB_PID_PINNACLE_PCTV340E_SE 0x023e
#define USB_PID_PINNACLE_PCTV73A 0x0243
#define USB_PID_PINNACLE_PCTV73ESE 0x0245
#define USB_PID_PINNACLE_PCTV74E 0x0246
@@ -313,6 +315,7 @@
#define USB_PID_FRIIO_WHITE 0x0001
#define USB_PID_TVWAY_PLUS 0x0002
#define USB_PID_SVEON_STV20 0xe39d
+#define USB_PID_SVEON_STV22 0xe401
#define USB_PID_AZUREWAVE_AZ6027 0x3275
#define USB_PID_TERRATEC_DVBS2CI_V1 0x10a4
#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index 76a8096..7d35d07 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -85,7 +85,7 @@ static inline u8 rc5_data(struct rc_map_table *key)
return key->scancode & 0xff;
}
-static inline u8 rc5_scan(struct rc_map_table *key)
+static inline u16 rc5_scan(struct rc_map_table *key)
{
return key->scancode & 0xffff;
}
diff --git a/drivers/media/dvb/dvb-usb/gp8psk.h b/drivers/media/dvb/dvb-usb/gp8psk.h
index 831749a..ed32b9d 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk.h
+++ b/drivers/media/dvb/dvb-usb/gp8psk.h
@@ -78,9 +78,6 @@ extern int dvb_usb_gp8psk_debug;
#define ADV_MOD_DVB_BPSK 9 /* DVB-S BPSK */
#define GET_USB_SPEED 0x07
- #define USB_SPEED_LOW 0
- #define USB_SPEED_FULL 1
- #define USB_SPEED_HIGH 2
#define RESET_FX2 0x13
diff --git a/drivers/media/dvb/dvb-usb/technisat-usb2.c b/drivers/media/dvb/dvb-usb/technisat-usb2.c
index 08f8842..473b95e 100644
--- a/drivers/media/dvb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/dvb/dvb-usb/technisat-usb2.c
@@ -765,10 +765,8 @@ static void technisat_usb2_disconnect(struct usb_interface *intf)
/* work and stuff was only created when the device is is hot-state */
if (dev != NULL) {
struct technisat_usb2_state *state = dev->priv;
- if (state != NULL) {
+ if (state != NULL)
cancel_delayed_work_sync(&state->green_led_work);
- flush_scheduled_work();
- }
}
dvb_usb_device_exit(intf);
diff --git a/drivers/media/dvb/dvb-usb/vp7045.c b/drivers/media/dvb/dvb-usb/vp7045.c
index 3db89e3..536c16c 100644
--- a/drivers/media/dvb/dvb-usb/vp7045.c
+++ b/drivers/media/dvb/dvb-usb/vp7045.c
@@ -224,26 +224,8 @@ static struct dvb_usb_device_properties vp7045_properties;
static int vp7045_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct dvb_usb_device *d;
- int ret = dvb_usb_device_init(intf, &vp7045_properties,
- THIS_MODULE, &d, adapter_nr);
- if (ret)
- return ret;
-
- d->priv = kmalloc(20, GFP_KERNEL);
- if (!d->priv) {
- dvb_usb_device_exit(intf);
- return -ENOMEM;
- }
-
- return ret;
-}
-
-static void vp7045_usb_disconnect(struct usb_interface *intf)
-{
- struct dvb_usb_device *d = usb_get_intfdata(intf);
- kfree(d->priv);
- dvb_usb_device_exit(intf);
+ return dvb_usb_device_init(intf, &vp7045_properties,
+ THIS_MODULE, NULL, adapter_nr);
}
static struct usb_device_id vp7045_usb_table [] = {
@@ -258,7 +240,7 @@ MODULE_DEVICE_TABLE(usb, vp7045_usb_table);
static struct dvb_usb_device_properties vp7045_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-vp7045-01.fw",
- .size_of_priv = sizeof(u8 *),
+ .size_of_priv = 20,
.num_adapters = 1,
.adapter = {
@@ -305,7 +287,7 @@ static struct dvb_usb_device_properties vp7045_properties = {
static struct usb_driver vp7045_usb_driver = {
.name = "dvb_usb_vp7045",
.probe = vp7045_usb_probe,
- .disconnect = vp7045_usb_disconnect,
+ .disconnect = dvb_usb_device_exit,
.id_table = vp7045_usb_table,
};
diff --git a/drivers/media/dvb/dvb-usb/vp7045.h b/drivers/media/dvb/dvb-usb/vp7045.h
index 969688f..cf5ec46 100644
--- a/drivers/media/dvb/dvb-usb/vp7045.h
+++ b/drivers/media/dvb/dvb-usb/vp7045.h
@@ -36,9 +36,6 @@
#define Tuner_Power_OFF 0
#define GET_USB_SPEED 0x07
- #define USB_SPEED_LOW 0
- #define USB_SPEED_FULL 1
- #define USB_SPEED_HIGH 2
#define LOCK_TUNER_COMMAND 0x09
diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c
index 21c52e3..489ae82 100644
--- a/drivers/media/dvb/firewire/firedtv-avc.c
+++ b/drivers/media/dvb/firewire/firedtv-avc.c
@@ -1208,7 +1208,7 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
if (r->response != AVC_RESPONSE_ACCEPTED) {
dev_err(fdtv->device,
"CA PMT failed with response 0x%x\n", r->response);
- ret = -EFAULT;
+ ret = -EACCES;
}
out:
mutex_unlock(&fdtv->avc_mutex);
diff --git a/drivers/media/dvb/firewire/firedtv-ci.c b/drivers/media/dvb/firewire/firedtv-ci.c
index 8ffb565..e5ebdbf 100644
--- a/drivers/media/dvb/firewire/firedtv-ci.c
+++ b/drivers/media/dvb/firewire/firedtv-ci.c
@@ -45,11 +45,6 @@ static int fdtv_get_ca_flags(struct firedtv_tuner_status *stat)
return flags;
}
-static int fdtv_ca_reset(struct firedtv *fdtv)
-{
- return avc_ca_reset(fdtv) ? -EFAULT : 0;
-}
-
static int fdtv_ca_get_caps(void *arg)
{
struct ca_caps *cap = arg;
@@ -65,12 +60,14 @@ static int fdtv_ca_get_slot_info(struct firedtv *fdtv, void *arg)
{
struct firedtv_tuner_status stat;
struct ca_slot_info *slot = arg;
+ int err;
- if (avc_tuner_status(fdtv, &stat))
- return -EFAULT;
+ err = avc_tuner_status(fdtv, &stat);
+ if (err)
+ return err;
if (slot->num != 0)
- return -EFAULT;
+ return -EACCES;
slot->type = CA_CI;
slot->flags = fdtv_get_ca_flags(&stat);
@@ -81,21 +78,21 @@ static int fdtv_ca_app_info(struct firedtv *fdtv, void *arg)
{
struct ca_msg *reply = arg;
- return avc_ca_app_info(fdtv, reply->msg, &reply->length) ? -EFAULT : 0;
+ return avc_ca_app_info(fdtv, reply->msg, &reply->length);
}
static int fdtv_ca_info(struct firedtv *fdtv, void *arg)
{
struct ca_msg *reply = arg;
- return avc_ca_info(fdtv, reply->msg, &reply->length) ? -EFAULT : 0;
+ return avc_ca_info(fdtv, reply->msg, &reply->length);
}
static int fdtv_ca_get_mmi(struct firedtv *fdtv, void *arg)
{
struct ca_msg *reply = arg;
- return avc_ca_get_mmi(fdtv, reply->msg, &reply->length) ? -EFAULT : 0;
+ return avc_ca_get_mmi(fdtv, reply->msg, &reply->length);
}
static int fdtv_ca_get_msg(struct firedtv *fdtv, void *arg)
@@ -111,14 +108,15 @@ static int fdtv_ca_get_msg(struct firedtv *fdtv, void *arg)
err = fdtv_ca_info(fdtv, arg);
break;
default:
- if (avc_tuner_status(fdtv, &stat))
- err = -EFAULT;
- else if (stat.ca_mmi == 1)
+ err = avc_tuner_status(fdtv, &stat);
+ if (err)
+ break;
+ if (stat.ca_mmi == 1)
err = fdtv_ca_get_mmi(fdtv, arg);
else {
dev_info(fdtv->device, "unhandled CA message 0x%08x\n",
fdtv->ca_last_command);
- err = -EFAULT;
+ err = -EACCES;
}
}
fdtv->ca_last_command = 0;
@@ -141,7 +139,7 @@ static int fdtv_ca_pmt(struct firedtv *fdtv, void *arg)
data_length = msg->msg[3];
}
- return avc_ca_pmt(fdtv, &msg->msg[data_pos], data_length) ? -EFAULT : 0;
+ return avc_ca_pmt(fdtv, &msg->msg[data_pos], data_length);
}
static int fdtv_ca_send_msg(struct firedtv *fdtv, void *arg)
@@ -170,7 +168,7 @@ static int fdtv_ca_send_msg(struct firedtv *fdtv, void *arg)
default:
dev_err(fdtv->device, "unhandled CA message 0x%08x\n",
fdtv->ca_last_command);
- err = -EFAULT;
+ err = -EACCES;
}
return err;
}
@@ -184,7 +182,7 @@ static int fdtv_ca_ioctl(struct file *file, unsigned int cmd, void *arg)
switch (cmd) {
case CA_RESET:
- err = fdtv_ca_reset(fdtv);
+ err = avc_ca_reset(fdtv);
break;
case CA_GET_CAP:
err = fdtv_ca_get_caps(arg);
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 44b816f..32e08e3 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -49,6 +49,27 @@ config DVB_STV6110x
help
A Silicon tuner that supports DVB-S and DVB-S2 modes
+comment "Multistandard (cable + terrestrial) frontends"
+ depends on DVB_CORE
+
+config DVB_DRXK
+ tristate "Micronas DRXK based"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ Micronas DRX-K DVB-C/T demodulator.
+
+ Say Y when you want to support this frontend.
+
+config DVB_TDA18271C2DD
+ tristate "NXP TDA18271C2 silicon tuner"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ NXP TDA18271 silicon tuner.
+
+ Say Y when you want to support this tuner.
+
comment "DVB-S (satellite) frontends"
depends on DVB_CORE
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 2f3a6f7..6a6ba05 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -10,6 +10,7 @@ stv0900-objs = stv0900_core.o stv0900_sw.o
au8522-objs = au8522_dig.o au8522_decoder.o
drxd-objs = drxd_firm.o drxd_hard.o
cxd2820r-objs = cxd2820r_core.o cxd2820r_c.o cxd2820r_t.o cxd2820r_t2.o
+drxk-objs := drxk_hard.o
obj-$(CONFIG_DVB_PLL) += dvb-pll.o
obj-$(CONFIG_DVB_STV0299) += stv0299.o
@@ -88,4 +89,6 @@ obj-$(CONFIG_DVB_MB86A20S) += mb86a20s.o
obj-$(CONFIG_DVB_IX2505V) += ix2505v.o
obj-$(CONFIG_DVB_STV0367) += stv0367.o
obj-$(CONFIG_DVB_CXD2820R) += cxd2820r.o
+obj-$(CONFIG_DVB_DRXK) += drxk.o
+obj-$(CONFIG_DVB_TDA18271C2DD) += tda18271c2dd.o
diff --git a/drivers/media/dvb/frontends/au8522_decoder.c b/drivers/media/dvb/frontends/au8522_decoder.c
index b537891..2b248c1 100644
--- a/drivers/media/dvb/frontends/au8522_decoder.c
+++ b/drivers/media/dvb/frontends/au8522_decoder.c
@@ -692,7 +692,7 @@ static int au8522_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
/* Interrogate the decoder to see if we are getting a real signal */
lock_status = au8522_readreg(state, 0x00);
if (lock_status == 0xa2)
- vt->signal = 0x01;
+ vt->signal = 0xffff;
else
vt->signal = 0x00;
diff --git a/drivers/media/dvb/frontends/cx24113.c b/drivers/media/dvb/frontends/cx24113.c
index e9ee555..c341d57 100644
--- a/drivers/media/dvb/frontends/cx24113.c
+++ b/drivers/media/dvb/frontends/cx24113.c
@@ -31,8 +31,8 @@
static int debug;
-#define info(args...) do { printk(KERN_INFO "CX24113: " args); } while (0)
-#define err(args...) do { printk(KERN_ERR "CX24113: " args); } while (0)
+#define cx_info(args...) do { printk(KERN_INFO "CX24113: " args); } while (0)
+#define cx_err(args...) do { printk(KERN_ERR "CX24113: " args); } while (0)
#define dprintk(args...) \
do { \
@@ -341,7 +341,7 @@ static void cx24113_calc_pll_nf(struct cx24113_state *state, u16 *n, s32 *f)
} while (N < 6 && R < 3);
if (N < 6) {
- err("strange frequency: N < 6\n");
+ cx_err("strange frequency: N < 6\n");
return;
}
F = freq_hz;
@@ -563,7 +563,7 @@ struct dvb_frontend *cx24113_attach(struct dvb_frontend *fe,
kzalloc(sizeof(struct cx24113_state), GFP_KERNEL);
int rc;
if (state == NULL) {
- err("Unable to kzalloc\n");
+ cx_err("Unable to kzalloc\n");
goto error;
}
@@ -571,7 +571,7 @@ struct dvb_frontend *cx24113_attach(struct dvb_frontend *fe,
state->config = config;
state->i2c = i2c;
- info("trying to detect myself\n");
+ cx_info("trying to detect myself\n");
/* making a dummy read, because of some expected troubles
* after power on */
@@ -579,24 +579,24 @@ struct dvb_frontend *cx24113_attach(struct dvb_frontend *fe,
rc = cx24113_readreg(state, 0x00);
if (rc < 0) {
- info("CX24113 not found.\n");
+ cx_info("CX24113 not found.\n");
goto error;
}
state->rev = rc;
switch (rc) {
case 0x43:
- info("detected CX24113 variant\n");
+ cx_info("detected CX24113 variant\n");
break;
case REV_CX24113:
- info("successfully detected\n");
+ cx_info("successfully detected\n");
break;
default:
- err("unsupported device id: %x\n", state->rev);
+ cx_err("unsupported device id: %x\n", state->rev);
goto error;
}
state->ver = cx24113_readreg(state, 0x01);
- info("version: %x\n", state->ver);
+ cx_info("version: %x\n", state->ver);
/* create dvb_frontend */
memcpy(&fe->ops.tuner_ops, &cx24113_tuner_ops,
diff --git a/drivers/media/dvb/frontends/cx24116.c b/drivers/media/dvb/frontends/cx24116.c
index 95c6465..ccd0525 100644
--- a/drivers/media/dvb/frontends/cx24116.c
+++ b/drivers/media/dvb/frontends/cx24116.c
@@ -1452,11 +1452,7 @@ tuned: /* Set/Reset B/W */
cmd.args[0x00] = CMD_BANDWIDTH;
cmd.args[0x01] = 0x00;
cmd.len = 0x02;
- ret = cx24116_cmd_execute(fe, &cmd);
- if (ret != 0)
- return ret;
-
- return ret;
+ return cx24116_cmd_execute(fe, &cmd);
}
static int cx24116_tune(struct dvb_frontend *fe, struct dvb_frontend_parameters *params,
diff --git a/drivers/media/dvb/frontends/cxd2820r.h b/drivers/media/dvb/frontends/cxd2820r.h
index ad17845..2906582 100644
--- a/drivers/media/dvb/frontends/cxd2820r.h
+++ b/drivers/media/dvb/frontends/cxd2820r.h
@@ -55,13 +55,13 @@ struct cxd2820r_config {
* Default: 0
* Values: 0, 1
*/
- int if_agc_polarity:1;
+ bool if_agc_polarity;
/* Spectrum inversion.
* Default: 0
* Values: 0, 1
*/
- int spec_inv:1;
+ bool spec_inv;
/* IFs for all used modes.
* Default: none, must set
diff --git a/drivers/media/dvb/frontends/cxd2820r_core.c b/drivers/media/dvb/frontends/cxd2820r_core.c
index 0779f69..d416e85 100644
--- a/drivers/media/dvb/frontends/cxd2820r_core.c
+++ b/drivers/media/dvb/frontends/cxd2820r_core.c
@@ -314,6 +314,8 @@ static int cxd2820r_set_frontend(struct dvb_frontend *fe,
} else if (c->delivery_system == SYS_DVBT2) {
/* DVB-T => DVB-T2 */
ret = cxd2820r_sleep_t(fe);
+ if (ret)
+ break;
ret = cxd2820r_set_frontend_t2(fe, p);
}
break;
@@ -324,6 +326,8 @@ static int cxd2820r_set_frontend(struct dvb_frontend *fe,
} else if (c->delivery_system == SYS_DVBT) {
/* DVB-T2 => DVB-T */
ret = cxd2820r_sleep_t2(fe);
+ if (ret)
+ break;
ret = cxd2820r_set_frontend_t(fe, p);
}
break;
@@ -740,12 +744,13 @@ static int cxd2820r_tuner_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg msg[], int num)
{
struct cxd2820r_priv *priv = i2c_get_adapdata(i2c_adap);
- u8 obuf[msg[0].len + 2];
+ int ret;
+ u8 *obuf = kmalloc(msg[0].len + 2, GFP_KERNEL);
struct i2c_msg msg2[2] = {
{
.addr = priv->cfg.i2c_address,
.flags = 0,
- .len = sizeof(obuf),
+ .len = msg[0].len + 2,
.buf = obuf,
}, {
.addr = priv->cfg.i2c_address,
@@ -755,15 +760,24 @@ static int cxd2820r_tuner_i2c_xfer(struct i2c_adapter *i2c_adap,
}
};
+ if (!obuf)
+ return -ENOMEM;
+
obuf[0] = 0x09;
obuf[1] = (msg[0].addr << 1);
if (num == 2) { /* I2C read */
obuf[1] = (msg[0].addr << 1) | I2C_M_RD; /* I2C RD flag */
- msg2[0].len = sizeof(obuf) - 1; /* maybe HW bug ? */
+ msg2[0].len = msg[0].len + 2 - 1; /* '-1' maybe HW bug ? */
}
memcpy(&obuf[2], msg[0].buf, msg[0].len);
- return i2c_transfer(priv->i2c, msg2, num);
+ ret = i2c_transfer(priv->i2c, msg2, num);
+ if (ret < 0)
+ warn("tuner i2c failed ret:%d", ret);
+
+ kfree(obuf);
+
+ return ret;
}
static struct i2c_algorithm cxd2820r_tuner_i2c_algo = {
diff --git a/drivers/media/dvb/frontends/cxd2820r_priv.h b/drivers/media/dvb/frontends/cxd2820r_priv.h
index 25adbee..0c0ebc9 100644
--- a/drivers/media/dvb/frontends/cxd2820r_priv.h
+++ b/drivers/media/dvb/frontends/cxd2820r_priv.h
@@ -55,13 +55,13 @@ struct cxd2820r_priv {
struct mutex fe_lock; /* FE lock */
int active_fe:2; /* FE lock, -1=NONE, 0=DVB-T/T2, 1=DVB-C */
- int ber_running:1;
+ bool ber_running;
u8 bank[2];
u8 gpio[3];
fe_delivery_system_t delivery_system;
- int last_tune_failed:1; /* for switch between T and T2 tune */
+ bool last_tune_failed; /* for switch between T and T2 tune */
};
/* cxd2820r_core.c */
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
index 0c9f40c..a64a538 100644
--- a/drivers/media/dvb/frontends/dib7000p.c
+++ b/drivers/media/dvb/frontends/dib7000p.c
@@ -2336,6 +2336,11 @@ struct dvb_frontend *dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr,
request_firmware() will hit an OOPS (this should be moved somewhere
more common) */
+ /* FIXME: make sure the dev.parent field is initialized, or else
+ request_firmware() will hit an OOPS (this should be moved somewhere
+ more common) */
+ st->i2c_master.gated_tuner_i2c_adap.dev.parent = i2c_adap->dev.parent;
+
dibx000_init_i2c_master(&st->i2c_master, DIB7000P, st->i2c_adap, st->i2c_addr);
/* init 7090 tuner adapter */
diff --git a/drivers/media/dvb/frontends/drxd_hard.c b/drivers/media/dvb/frontends/drxd_hard.c
index ea4c1c3..2238bf0 100644
--- a/drivers/media/dvb/frontends/drxd_hard.c
+++ b/drivers/media/dvb/frontends/drxd_hard.c
@@ -28,7 +28,6 @@
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
-#include <linux/version.h>
#include <asm/div64.h>
#include "dvb_frontend.h"
@@ -233,7 +232,7 @@ static int i2c_read(struct i2c_adapter *adap,
return 0;
}
-inline u32 MulDiv32(u32 a, u32 b, u32 c)
+static inline u32 MulDiv32(u32 a, u32 b, u32 c)
{
u64 tmp64;
@@ -910,14 +909,16 @@ static int load_firmware(struct drxd_state *state, const char *fw_name)
return -EIO;
}
- state->microcode = kzalloc(fw->size, GFP_KERNEL);
+ state->microcode = kmalloc(fw->size, GFP_KERNEL);
if (state->microcode == NULL) {
- printk(KERN_ERR "drxd: firmware load failure: nomemory\n");
+ release_firmware(fw);
+ printk(KERN_ERR "drxd: firmware load failure: no memory\n");
return -ENOMEM;
}
memcpy(state->microcode, fw->data, fw->size);
state->microcode_length = fw->size;
+ release_firmware(fw);
return 0;
}
diff --git a/drivers/media/dvb/frontends/drxk.h b/drivers/media/dvb/frontends/drxk.h
new file mode 100644
index 0000000..58baf41
--- /dev/null
+++ b/drivers/media/dvb/frontends/drxk.h
@@ -0,0 +1,47 @@
+#ifndef _DRXK_H_
+#define _DRXK_H_
+
+#include <linux/types.h>
+#include <linux/i2c.h>
+
+/**
+ * struct drxk_config - Configure the initial parameters for DRX-K
+ *
+ * adr: I2C Address of the DRX-K
+ * single_master: Device is on the single master mode
+ * no_i2c_bridge: Don't switch the I2C bridge to talk with tuner
+ * antenna_gpio: GPIO bit used to control the antenna
+ * antenna_dvbt: GPIO bit for changing antenna to DVB-C. A value of 1
+ * means that 1=DVBC, 0 = DVBT. Zero means the opposite.
+ * microcode_name: Name of the firmware file with the microcode
+ *
+ * On the *_gpio vars, bit 0 is UIO-1, bit 1 is UIO-2 and bit 2 is
+ * UIO-3.
+ */
+struct drxk_config {
+ u8 adr;
+ bool single_master;
+ bool no_i2c_bridge;
+
+ bool antenna_dvbt;
+ u16 antenna_gpio;
+
+ const char *microcode_name;
+};
+
+#if defined(CONFIG_DVB_DRXK) || (defined(CONFIG_DVB_DRXK_MODULE) \
+ && defined(MODULE))
+extern struct dvb_frontend *drxk_attach(const struct drxk_config *config,
+ struct i2c_adapter *i2c,
+ struct dvb_frontend **fe_t);
+#else
+static inline struct dvb_frontend *drxk_attach(const struct drxk_config *config,
+ struct i2c_adapter *i2c,
+ struct dvb_frontend **fe_t)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/dvb/frontends/drxk_hard.c b/drivers/media/dvb/frontends/drxk_hard.c
new file mode 100644
index 0000000..41b0838
--- /dev/null
+++ b/drivers/media/dvb/frontends/drxk_hard.c
@@ -0,0 +1,6454 @@
+/*
+ * drxk_hard: DRX-K DVB-C/T demodulator driver
+ *
+ * Copyright (C) 2010-2011 Digital Devices GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/version.h>
+#include <asm/div64.h>
+
+#include "dvb_frontend.h"
+#include "drxk.h"
+#include "drxk_hard.h"
+
+static int PowerDownDVBT(struct drxk_state *state, bool setPowerMode);
+static int PowerDownQAM(struct drxk_state *state);
+static int SetDVBTStandard(struct drxk_state *state,
+ enum OperationMode oMode);
+static int SetQAMStandard(struct drxk_state *state,
+ enum OperationMode oMode);
+static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz,
+ s32 tunerFreqOffset);
+static int SetDVBTStandard(struct drxk_state *state,
+ enum OperationMode oMode);
+static int DVBTStart(struct drxk_state *state);
+static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz,
+ s32 tunerFreqOffset);
+static int GetQAMLockStatus(struct drxk_state *state, u32 *pLockStatus);
+static int GetDVBTLockStatus(struct drxk_state *state, u32 *pLockStatus);
+static int SwitchAntennaToQAM(struct drxk_state *state);
+static int SwitchAntennaToDVBT(struct drxk_state *state);
+
+static bool IsDVBT(struct drxk_state *state)
+{
+ return state->m_OperationMode == OM_DVBT;
+}
+
+static bool IsQAM(struct drxk_state *state)
+{
+ return state->m_OperationMode == OM_QAM_ITU_A ||
+ state->m_OperationMode == OM_QAM_ITU_B ||
+ state->m_OperationMode == OM_QAM_ITU_C;
+}
+
+bool IsA1WithPatchCode(struct drxk_state *state)
+{
+ return state->m_DRXK_A1_PATCH_CODE;
+}
+
+bool IsA1WithRomCode(struct drxk_state *state)
+{
+ return state->m_DRXK_A1_ROM_CODE;
+}
+
+#define NOA1ROM 0
+
+#define DRXDAP_FASI_SHORT_FORMAT(addr) (((addr) & 0xFC30FF80) == 0)
+#define DRXDAP_FASI_LONG_FORMAT(addr) (((addr) & 0xFC30FF80) != 0)
+
+#define DEFAULT_MER_83 165
+#define DEFAULT_MER_93 250
+
+#ifndef DRXK_MPEG_SERIAL_OUTPUT_PIN_DRIVE_STRENGTH
+#define DRXK_MPEG_SERIAL_OUTPUT_PIN_DRIVE_STRENGTH (0x02)
+#endif
+
+#ifndef DRXK_MPEG_PARALLEL_OUTPUT_PIN_DRIVE_STRENGTH
+#define DRXK_MPEG_PARALLEL_OUTPUT_PIN_DRIVE_STRENGTH (0x03)
+#endif
+
+#ifndef DRXK_MPEG_OUTPUT_CLK_DRIVE_STRENGTH
+#define DRXK_MPEG_OUTPUT_CLK_DRIVE_STRENGTH (0x06)
+#endif
+
+#define DEFAULT_DRXK_MPEG_LOCK_TIMEOUT 700
+#define DEFAULT_DRXK_DEMOD_LOCK_TIMEOUT 500
+
+#ifndef DRXK_KI_RAGC_ATV
+#define DRXK_KI_RAGC_ATV 4
+#endif
+#ifndef DRXK_KI_IAGC_ATV
+#define DRXK_KI_IAGC_ATV 6
+#endif
+#ifndef DRXK_KI_DAGC_ATV
+#define DRXK_KI_DAGC_ATV 7
+#endif
+
+#ifndef DRXK_KI_RAGC_QAM
+#define DRXK_KI_RAGC_QAM 3
+#endif
+#ifndef DRXK_KI_IAGC_QAM
+#define DRXK_KI_IAGC_QAM 4
+#endif
+#ifndef DRXK_KI_DAGC_QAM
+#define DRXK_KI_DAGC_QAM 7
+#endif
+#ifndef DRXK_KI_RAGC_DVBT
+#define DRXK_KI_RAGC_DVBT (IsA1WithPatchCode(state) ? 3 : 2)
+#endif
+#ifndef DRXK_KI_IAGC_DVBT
+#define DRXK_KI_IAGC_DVBT (IsA1WithPatchCode(state) ? 4 : 2)
+#endif
+#ifndef DRXK_KI_DAGC_DVBT
+#define DRXK_KI_DAGC_DVBT (IsA1WithPatchCode(state) ? 10 : 7)
+#endif
+
+#ifndef DRXK_AGC_DAC_OFFSET
+#define DRXK_AGC_DAC_OFFSET (0x800)
+#endif
+
+#ifndef DRXK_BANDWIDTH_8MHZ_IN_HZ
+#define DRXK_BANDWIDTH_8MHZ_IN_HZ (0x8B8249L)
+#endif
+
+#ifndef DRXK_BANDWIDTH_7MHZ_IN_HZ
+#define DRXK_BANDWIDTH_7MHZ_IN_HZ (0x7A1200L)
+#endif
+
+#ifndef DRXK_BANDWIDTH_6MHZ_IN_HZ
+#define DRXK_BANDWIDTH_6MHZ_IN_HZ (0x68A1B6L)
+#endif
+
+#ifndef DRXK_QAM_SYMBOLRATE_MAX
+#define DRXK_QAM_SYMBOLRATE_MAX (7233000)
+#endif
+
+#define DRXK_BL_ROM_OFFSET_TAPS_DVBT 56
+#define DRXK_BL_ROM_OFFSET_TAPS_ITU_A 64
+#define DRXK_BL_ROM_OFFSET_TAPS_ITU_C 0x5FE0
+#define DRXK_BL_ROM_OFFSET_TAPS_BG 24
+#define DRXK_BL_ROM_OFFSET_TAPS_DKILLP 32
+#define DRXK_BL_ROM_OFFSET_TAPS_NTSC 40
+#define DRXK_BL_ROM_OFFSET_TAPS_FM 48
+#define DRXK_BL_ROM_OFFSET_UCODE 0
+
+#define DRXK_BLC_TIMEOUT 100
+
+#define DRXK_BLCC_NR_ELEMENTS_TAPS 2
+#define DRXK_BLCC_NR_ELEMENTS_UCODE 6
+
+#define DRXK_BLDC_NR_ELEMENTS_TAPS 28
+
+#ifndef DRXK_OFDM_NE_NOTCH_WIDTH
+#define DRXK_OFDM_NE_NOTCH_WIDTH (4)
+#endif
+
+#define DRXK_QAM_SL_SIG_POWER_QAM16 (40960)
+#define DRXK_QAM_SL_SIG_POWER_QAM32 (20480)
+#define DRXK_QAM_SL_SIG_POWER_QAM64 (43008)
+#define DRXK_QAM_SL_SIG_POWER_QAM128 (20992)
+#define DRXK_QAM_SL_SIG_POWER_QAM256 (43520)
+
+static unsigned int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "enable debug messages");
+
+#define dprintk(level, fmt, arg...) do { \
+if (debug >= level) \
+ printk(KERN_DEBUG "drxk: %s" fmt, __func__, ## arg); \
+} while (0)
+
+
+static inline u32 MulDiv32(u32 a, u32 b, u32 c)
+{
+ u64 tmp64;
+
+ tmp64 = (u64) a * (u64) b;
+ do_div(tmp64, c);
+
+ return (u32) tmp64;
+}
+
+inline u32 Frac28a(u32 a, u32 c)
+{
+ int i = 0;
+ u32 Q1 = 0;
+ u32 R0 = 0;
+
+ R0 = (a % c) << 4; /* 32-28 == 4 shifts possible at max */
+ Q1 = a / c; /* integer part, only the 4 least significant bits
+ will be visible in the result */
+
+ /* division using radix 16, 7 nibbles in the result */
+ for (i = 0; i < 7; i++) {
+ Q1 = (Q1 << 4) | (R0 / c);
+ R0 = (R0 % c) << 4;
+ }
+ /* rounding */
+ if ((R0 >> 3) >= c)
+ Q1++;
+
+ return Q1;
+}
+
+static u32 Log10Times100(u32 x)
+{
+ static const u8 scale = 15;
+ static const u8 indexWidth = 5;
+ u8 i = 0;
+ u32 y = 0;
+ u32 d = 0;
+ u32 k = 0;
+ u32 r = 0;
+ /*
+ log2lut[n] = (1<<scale) * 200 * log2(1.0 + ((1.0/(1<<INDEXWIDTH)) * n))
+ 0 <= n < ((1<<INDEXWIDTH)+1)
+ */
+
+ static const u32 log2lut[] = {
+ 0, /* 0.000000 */
+ 290941, /* 290941.300628 */
+ 573196, /* 573196.476418 */
+ 847269, /* 847269.179851 */
+ 1113620, /* 1113620.489452 */
+ 1372674, /* 1372673.576986 */
+ 1624818, /* 1624817.752104 */
+ 1870412, /* 1870411.981536 */
+ 2109788, /* 2109787.962654 */
+ 2343253, /* 2343252.817465 */
+ 2571091, /* 2571091.461923 */
+ 2793569, /* 2793568.696416 */
+ 3010931, /* 3010931.055901 */
+ 3223408, /* 3223408.452106 */
+ 3431216, /* 3431215.635215 */
+ 3634553, /* 3634553.498355 */
+ 3833610, /* 3833610.244726 */
+ 4028562, /* 4028562.434393 */
+ 4219576, /* 4219575.925308 */
+ 4406807, /* 4406806.721144 */
+ 4590402, /* 4590401.736809 */
+ 4770499, /* 4770499.491025 */
+ 4947231, /* 4947230.734179 */
+ 5120719, /* 5120719.018555 */
+ 5291081, /* 5291081.217197 */
+ 5458428, /* 5458427.996830 */
+ 5622864, /* 5622864.249668 */
+ 5784489, /* 5784489.488298 */
+ 5943398, /* 5943398.207380 */
+ 6099680, /* 6099680.215452 */
+ 6253421, /* 6253420.939751 */
+ 6404702, /* 6404701.706649 */
+ 6553600, /* 6553600.000000 */
+ };
+
+
+ if (x == 0)
+ return 0;
+
+ /* Scale x (normalize) */
+ /* computing y in log(x/y) = log(x) - log(y) */
+ if ((x & ((0xffffffff) << (scale + 1))) == 0) {
+ for (k = scale; k > 0; k--) {
+ if (x & (((u32) 1) << scale))
+ break;
+ x <<= 1;
+ }
+ } else {
+ for (k = scale; k < 31; k++) {
+ if ((x & (((u32) (-1)) << (scale + 1))) == 0)
+ break;
+ x >>= 1;
+ }
+ }
+ /*
+ Now x has binary point between bit[scale] and bit[scale-1]
+ and 1.0 <= x < 2.0 */
+
+ /* correction for divison: log(x) = log(x/y)+log(y) */
+ y = k * ((((u32) 1) << scale) * 200);
+
+ /* remove integer part */
+ x &= ((((u32) 1) << scale) - 1);
+ /* get index */
+ i = (u8) (x >> (scale - indexWidth));
+ /* compute delta (x - a) */
+ d = x & ((((u32) 1) << (scale - indexWidth)) - 1);
+ /* compute log, multiplication (d* (..)) must be within range ! */
+ y += log2lut[i] +
+ ((d * (log2lut[i + 1] - log2lut[i])) >> (scale - indexWidth));
+ /* Conver to log10() */
+ y /= 108853; /* (log2(10) << scale) */
+ r = (y >> 1);
+ /* rounding */
+ if (y & ((u32) 1))
+ r++;
+ return r;
+}
+
+/****************************************************************************/
+/* I2C **********************************************************************/
+/****************************************************************************/
+
+static int i2c_read1(struct i2c_adapter *adapter, u8 adr, u8 *val)
+{
+ struct i2c_msg msgs[1] = { {.addr = adr, .flags = I2C_M_RD,
+ .buf = val, .len = 1}
+ };
+
+ return i2c_transfer(adapter, msgs, 1);
+}
+
+static int i2c_write(struct i2c_adapter *adap, u8 adr, u8 *data, int len)
+{
+ int status;
+ struct i2c_msg msg = {
+ .addr = adr, .flags = 0, .buf = data, .len = len };
+
+ dprintk(3, ":");
+ if (debug > 2) {
+ int i;
+ for (i = 0; i < len; i++)
+ printk(KERN_CONT " %02x", data[i]);
+ printk(KERN_CONT "\n");
+ }
+ status = i2c_transfer(adap, &msg, 1);
+ if (status >= 0 && status != 1)
+ status = -EIO;
+
+ if (status < 0)
+ printk(KERN_ERR "drxk: i2c write error at addr 0x%02x\n", adr);
+
+ return status;
+}
+
+static int i2c_read(struct i2c_adapter *adap,
+ u8 adr, u8 *msg, int len, u8 *answ, int alen)
+{
+ int status;
+ struct i2c_msg msgs[2] = {
+ {.addr = adr, .flags = 0,
+ .buf = msg, .len = len},
+ {.addr = adr, .flags = I2C_M_RD,
+ .buf = answ, .len = alen}
+ };
+
+ status = i2c_transfer(adap, msgs, 2);
+ if (status != 2) {
+ if (debug > 2)
+ printk(KERN_CONT ": ERROR!\n");
+ if (status >= 0)
+ status = -EIO;
+
+ printk(KERN_ERR "drxk: i2c read error at addr 0x%02x\n", adr);
+ return status;
+ }
+ if (debug > 2) {
+ int i;
+ dprintk(2, ": read from ");
+ for (i = 0; i < len; i++)
+ printk(KERN_CONT " %02x", msg[i]);
+ printk(KERN_CONT "Value = ");
+ for (i = 0; i < alen; i++)
+ printk(KERN_CONT " %02x", answ[i]);
+ printk(KERN_CONT "\n");
+ }
+ return 0;
+}
+
+static int read16_flags(struct drxk_state *state, u32 reg, u16 *data, u8 flags)
+{
+ int status;
+ u8 adr = state->demod_address, mm1[4], mm2[2], len;
+
+ if (state->single_master)
+ flags |= 0xC0;
+
+ if (DRXDAP_FASI_LONG_FORMAT(reg) || (flags != 0)) {
+ mm1[0] = (((reg << 1) & 0xFF) | 0x01);
+ mm1[1] = ((reg >> 16) & 0xFF);
+ mm1[2] = ((reg >> 24) & 0xFF) | flags;
+ mm1[3] = ((reg >> 7) & 0xFF);
+ len = 4;
+ } else {
+ mm1[0] = ((reg << 1) & 0xFF);
+ mm1[1] = (((reg >> 16) & 0x0F) | ((reg >> 18) & 0xF0));
+ len = 2;
+ }
+ dprintk(2, "(0x%08x, 0x%02x)\n", reg, flags);
+ status = i2c_read(state->i2c, adr, mm1, len, mm2, 2);
+ if (status < 0)
+ return status;
+ if (data)
+ *data = mm2[0] | (mm2[1] << 8);
+
+ return 0;
+}
+
+static int read16(struct drxk_state *state, u32 reg, u16 *data)
+{
+ return read16_flags(state, reg, data, 0);
+}
+
+static int read32_flags(struct drxk_state *state, u32 reg, u32 *data, u8 flags)
+{
+ int status;
+ u8 adr = state->demod_address, mm1[4], mm2[4], len;
+
+ if (state->single_master)
+ flags |= 0xC0;
+
+ if (DRXDAP_FASI_LONG_FORMAT(reg) || (flags != 0)) {
+ mm1[0] = (((reg << 1) & 0xFF) | 0x01);
+ mm1[1] = ((reg >> 16) & 0xFF);
+ mm1[2] = ((reg >> 24) & 0xFF) | flags;
+ mm1[3] = ((reg >> 7) & 0xFF);
+ len = 4;
+ } else {
+ mm1[0] = ((reg << 1) & 0xFF);
+ mm1[1] = (((reg >> 16) & 0x0F) | ((reg >> 18) & 0xF0));
+ len = 2;
+ }
+ dprintk(2, "(0x%08x, 0x%02x)\n", reg, flags);
+ status = i2c_read(state->i2c, adr, mm1, len, mm2, 4);
+ if (status < 0)
+ return status;
+ if (data)
+ *data = mm2[0] | (mm2[1] << 8) |
+ (mm2[2] << 16) | (mm2[3] << 24);
+
+ return 0;
+}
+
+static int read32(struct drxk_state *state, u32 reg, u32 *data)
+{
+ return read32_flags(state, reg, data, 0);
+}
+
+static int write16_flags(struct drxk_state *state, u32 reg, u16 data, u8 flags)
+{
+ u8 adr = state->demod_address, mm[6], len;
+
+ if (state->single_master)
+ flags |= 0xC0;
+ if (DRXDAP_FASI_LONG_FORMAT(reg) || (flags != 0)) {
+ mm[0] = (((reg << 1) & 0xFF) | 0x01);
+ mm[1] = ((reg >> 16) & 0xFF);
+ mm[2] = ((reg >> 24) & 0xFF) | flags;
+ mm[3] = ((reg >> 7) & 0xFF);
+ len = 4;
+ } else {
+ mm[0] = ((reg << 1) & 0xFF);
+ mm[1] = (((reg >> 16) & 0x0F) | ((reg >> 18) & 0xF0));
+ len = 2;
+ }
+ mm[len] = data & 0xff;
+ mm[len + 1] = (data >> 8) & 0xff;
+
+ dprintk(2, "(0x%08x, 0x%04x, 0x%02x)\n", reg, data, flags);
+ return i2c_write(state->i2c, adr, mm, len + 2);
+}
+
+static int write16(struct drxk_state *state, u32 reg, u16 data)
+{
+ return write16_flags(state, reg, data, 0);
+}
+
+static int write32_flags(struct drxk_state *state, u32 reg, u32 data, u8 flags)
+{
+ u8 adr = state->demod_address, mm[8], len;
+
+ if (state->single_master)
+ flags |= 0xC0;
+ if (DRXDAP_FASI_LONG_FORMAT(reg) || (flags != 0)) {
+ mm[0] = (((reg << 1) & 0xFF) | 0x01);
+ mm[1] = ((reg >> 16) & 0xFF);
+ mm[2] = ((reg >> 24) & 0xFF) | flags;
+ mm[3] = ((reg >> 7) & 0xFF);
+ len = 4;
+ } else {
+ mm[0] = ((reg << 1) & 0xFF);
+ mm[1] = (((reg >> 16) & 0x0F) | ((reg >> 18) & 0xF0));
+ len = 2;
+ }
+ mm[len] = data & 0xff;
+ mm[len + 1] = (data >> 8) & 0xff;
+ mm[len + 2] = (data >> 16) & 0xff;
+ mm[len + 3] = (data >> 24) & 0xff;
+ dprintk(2, "(0x%08x, 0x%08x, 0x%02x)\n", reg, data, flags);
+
+ return i2c_write(state->i2c, adr, mm, len + 4);
+}
+
+static int write32(struct drxk_state *state, u32 reg, u32 data)
+{
+ return write32_flags(state, reg, data, 0);
+}
+
+static int write_block(struct drxk_state *state, u32 Address,
+ const int BlockSize, const u8 pBlock[])
+{
+ int status = 0, BlkSize = BlockSize;
+ u8 Flags = 0;
+
+ if (state->single_master)
+ Flags |= 0xC0;
+
+ while (BlkSize > 0) {
+ int Chunk = BlkSize > state->m_ChunkSize ?
+ state->m_ChunkSize : BlkSize;
+ u8 *AdrBuf = &state->Chunk[0];
+ u32 AdrLength = 0;
+
+ if (DRXDAP_FASI_LONG_FORMAT(Address) || (Flags != 0)) {
+ AdrBuf[0] = (((Address << 1) & 0xFF) | 0x01);
+ AdrBuf[1] = ((Address >> 16) & 0xFF);
+ AdrBuf[2] = ((Address >> 24) & 0xFF);
+ AdrBuf[3] = ((Address >> 7) & 0xFF);
+ AdrBuf[2] |= Flags;
+ AdrLength = 4;
+ if (Chunk == state->m_ChunkSize)
+ Chunk -= 2;
+ } else {
+ AdrBuf[0] = ((Address << 1) & 0xFF);
+ AdrBuf[1] = (((Address >> 16) & 0x0F) |
+ ((Address >> 18) & 0xF0));
+ AdrLength = 2;
+ }
+ memcpy(&state->Chunk[AdrLength], pBlock, Chunk);
+ dprintk(2, "(0x%08x, 0x%02x)\n", Address, Flags);
+ if (debug > 1) {
+ int i;
+ if (pBlock)
+ for (i = 0; i < Chunk; i++)
+ printk(KERN_CONT " %02x", pBlock[i]);
+ printk(KERN_CONT "\n");
+ }
+ status = i2c_write(state->i2c, state->demod_address,
+ &state->Chunk[0], Chunk + AdrLength);
+ if (status < 0) {
+ printk(KERN_ERR "drxk: %s: i2c write error at addr 0x%02x\n",
+ __func__, Address);
+ break;
+ }
+ pBlock += Chunk;
+ Address += (Chunk >> 1);
+ BlkSize -= Chunk;
+ }
+ return status;
+}
+
+#ifndef DRXK_MAX_RETRIES_POWERUP
+#define DRXK_MAX_RETRIES_POWERUP 20
+#endif
+
+int PowerUpDevice(struct drxk_state *state)
+{
+ int status;
+ u8 data = 0;
+ u16 retryCount = 0;
+
+ dprintk(1, "\n");
+
+ status = i2c_read1(state->i2c, state->demod_address, &data);
+ if (status < 0) {
+ do {
+ data = 0;
+ status = i2c_write(state->i2c, state->demod_address,
+ &data, 1);
+ msleep(10);
+ retryCount++;
+ if (status < 0)
+ continue;
+ status = i2c_read1(state->i2c, state->demod_address,
+ &data);
+ } while (status < 0 &&
+ (retryCount < DRXK_MAX_RETRIES_POWERUP));
+ if (status < 0 && retryCount >= DRXK_MAX_RETRIES_POWERUP)
+ goto error;
+ }
+
+ /* Make sure all clk domains are active */
+ status = write16(state, SIO_CC_PWD_MODE__A, SIO_CC_PWD_MODE_LEVEL_NONE);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_CC_UPDATE__A, SIO_CC_UPDATE_KEY);
+ if (status < 0)
+ goto error;
+ /* Enable pll lock tests */
+ status = write16(state, SIO_CC_PLL_LOCK__A, 1);
+ if (status < 0)
+ goto error;
+
+ state->m_currentPowerMode = DRX_POWER_UP;
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+
+static int init_state(struct drxk_state *state)
+{
+ /*
+ * FIXME: most (all?) of the values bellow should be moved into
+ * struct drxk_config, as they are probably board-specific
+ */
+ u32 ulVSBIfAgcMode = DRXK_AGC_CTRL_AUTO;
+ u32 ulVSBIfAgcOutputLevel = 0;
+ u32 ulVSBIfAgcMinLevel = 0;
+ u32 ulVSBIfAgcMaxLevel = 0x7FFF;
+ u32 ulVSBIfAgcSpeed = 3;
+
+ u32 ulVSBRfAgcMode = DRXK_AGC_CTRL_AUTO;
+ u32 ulVSBRfAgcOutputLevel = 0;
+ u32 ulVSBRfAgcMinLevel = 0;
+ u32 ulVSBRfAgcMaxLevel = 0x7FFF;
+ u32 ulVSBRfAgcSpeed = 3;
+ u32 ulVSBRfAgcTop = 9500;
+ u32 ulVSBRfAgcCutOffCurrent = 4000;
+
+ u32 ulATVIfAgcMode = DRXK_AGC_CTRL_AUTO;
+ u32 ulATVIfAgcOutputLevel = 0;
+ u32 ulATVIfAgcMinLevel = 0;
+ u32 ulATVIfAgcMaxLevel = 0;
+ u32 ulATVIfAgcSpeed = 3;
+
+ u32 ulATVRfAgcMode = DRXK_AGC_CTRL_OFF;
+ u32 ulATVRfAgcOutputLevel = 0;
+ u32 ulATVRfAgcMinLevel = 0;
+ u32 ulATVRfAgcMaxLevel = 0;
+ u32 ulATVRfAgcTop = 9500;
+ u32 ulATVRfAgcCutOffCurrent = 4000;
+ u32 ulATVRfAgcSpeed = 3;
+
+ u32 ulQual83 = DEFAULT_MER_83;
+ u32 ulQual93 = DEFAULT_MER_93;
+
+ u32 ulDVBTStaticTSClock = 1;
+ u32 ulDVBCStaticTSClock = 1;
+
+ u32 ulMpegLockTimeOut = DEFAULT_DRXK_MPEG_LOCK_TIMEOUT;
+ u32 ulDemodLockTimeOut = DEFAULT_DRXK_DEMOD_LOCK_TIMEOUT;
+
+ /* io_pad_cfg register (8 bit reg.) MSB bit is 1 (default value) */
+ /* io_pad_cfg_mode output mode is drive always */
+ /* io_pad_cfg_drive is set to power 2 (23 mA) */
+ u32 ulGPIOCfg = 0x0113;
+ u32 ulSerialMode = 1;
+ u32 ulInvertTSClock = 0;
+ u32 ulTSDataStrength = DRXK_MPEG_SERIAL_OUTPUT_PIN_DRIVE_STRENGTH;
+ u32 ulTSClockkStrength = DRXK_MPEG_OUTPUT_CLK_DRIVE_STRENGTH;
+ u32 ulDVBTBitrate = 50000000;
+ u32 ulDVBCBitrate = DRXK_QAM_SYMBOLRATE_MAX * 8;
+
+ u32 ulInsertRSByte = 0;
+
+ u32 ulRfMirror = 1;
+ u32 ulPowerDown = 0;
+
+ dprintk(1, "\n");
+
+ state->m_hasLNA = false;
+ state->m_hasDVBT = false;
+ state->m_hasDVBC = false;
+ state->m_hasATV = false;
+ state->m_hasOOB = false;
+ state->m_hasAudio = false;
+
+ state->m_ChunkSize = 124;
+
+ state->m_oscClockFreq = 0;
+ state->m_smartAntInverted = false;
+ state->m_bPDownOpenBridge = false;
+
+ /* real system clock frequency in kHz */
+ state->m_sysClockFreq = 151875;
+ /* Timing div, 250ns/Psys */
+ /* Timing div, = (delay (nano seconds) * sysclk (kHz))/ 1000 */
+ state->m_HICfgTimingDiv = ((state->m_sysClockFreq / 1000) *
+ HI_I2C_DELAY) / 1000;
+ /* Clipping */
+ if (state->m_HICfgTimingDiv > SIO_HI_RA_RAM_PAR_2_CFG_DIV__M)
+ state->m_HICfgTimingDiv = SIO_HI_RA_RAM_PAR_2_CFG_DIV__M;
+ state->m_HICfgWakeUpKey = (state->demod_address << 1);
+ /* port/bridge/power down ctrl */
+ state->m_HICfgCtrl = SIO_HI_RA_RAM_PAR_5_CFG_SLV0_SLAVE;
+
+ state->m_bPowerDown = (ulPowerDown != 0);
+
+ state->m_DRXK_A1_PATCH_CODE = false;
+ state->m_DRXK_A1_ROM_CODE = false;
+ state->m_DRXK_A2_ROM_CODE = false;
+ state->m_DRXK_A3_ROM_CODE = false;
+ state->m_DRXK_A2_PATCH_CODE = false;
+ state->m_DRXK_A3_PATCH_CODE = false;
+
+ /* Init AGC and PGA parameters */
+ /* VSB IF */
+ state->m_vsbIfAgcCfg.ctrlMode = (ulVSBIfAgcMode);
+ state->m_vsbIfAgcCfg.outputLevel = (ulVSBIfAgcOutputLevel);
+ state->m_vsbIfAgcCfg.minOutputLevel = (ulVSBIfAgcMinLevel);
+ state->m_vsbIfAgcCfg.maxOutputLevel = (ulVSBIfAgcMaxLevel);
+ state->m_vsbIfAgcCfg.speed = (ulVSBIfAgcSpeed);
+ state->m_vsbPgaCfg = 140;
+
+ /* VSB RF */
+ state->m_vsbRfAgcCfg.ctrlMode = (ulVSBRfAgcMode);
+ state->m_vsbRfAgcCfg.outputLevel = (ulVSBRfAgcOutputLevel);
+ state->m_vsbRfAgcCfg.minOutputLevel = (ulVSBRfAgcMinLevel);
+ state->m_vsbRfAgcCfg.maxOutputLevel = (ulVSBRfAgcMaxLevel);
+ state->m_vsbRfAgcCfg.speed = (ulVSBRfAgcSpeed);
+ state->m_vsbRfAgcCfg.top = (ulVSBRfAgcTop);
+ state->m_vsbRfAgcCfg.cutOffCurrent = (ulVSBRfAgcCutOffCurrent);
+ state->m_vsbPreSawCfg.reference = 0x07;
+ state->m_vsbPreSawCfg.usePreSaw = true;
+
+ state->m_Quality83percent = DEFAULT_MER_83;
+ state->m_Quality93percent = DEFAULT_MER_93;
+ if (ulQual93 <= 500 && ulQual83 < ulQual93) {
+ state->m_Quality83percent = ulQual83;
+ state->m_Quality93percent = ulQual93;
+ }
+
+ /* ATV IF */
+ state->m_atvIfAgcCfg.ctrlMode = (ulATVIfAgcMode);
+ state->m_atvIfAgcCfg.outputLevel = (ulATVIfAgcOutputLevel);
+ state->m_atvIfAgcCfg.minOutputLevel = (ulATVIfAgcMinLevel);
+ state->m_atvIfAgcCfg.maxOutputLevel = (ulATVIfAgcMaxLevel);
+ state->m_atvIfAgcCfg.speed = (ulATVIfAgcSpeed);
+
+ /* ATV RF */
+ state->m_atvRfAgcCfg.ctrlMode = (ulATVRfAgcMode);
+ state->m_atvRfAgcCfg.outputLevel = (ulATVRfAgcOutputLevel);
+ state->m_atvRfAgcCfg.minOutputLevel = (ulATVRfAgcMinLevel);
+ state->m_atvRfAgcCfg.maxOutputLevel = (ulATVRfAgcMaxLevel);
+ state->m_atvRfAgcCfg.speed = (ulATVRfAgcSpeed);
+ state->m_atvRfAgcCfg.top = (ulATVRfAgcTop);
+ state->m_atvRfAgcCfg.cutOffCurrent = (ulATVRfAgcCutOffCurrent);
+ state->m_atvPreSawCfg.reference = 0x04;
+ state->m_atvPreSawCfg.usePreSaw = true;
+
+
+ /* DVBT RF */
+ state->m_dvbtRfAgcCfg.ctrlMode = DRXK_AGC_CTRL_OFF;
+ state->m_dvbtRfAgcCfg.outputLevel = 0;
+ state->m_dvbtRfAgcCfg.minOutputLevel = 0;
+ state->m_dvbtRfAgcCfg.maxOutputLevel = 0xFFFF;
+ state->m_dvbtRfAgcCfg.top = 0x2100;
+ state->m_dvbtRfAgcCfg.cutOffCurrent = 4000;
+ state->m_dvbtRfAgcCfg.speed = 1;
+
+
+ /* DVBT IF */
+ state->m_dvbtIfAgcCfg.ctrlMode = DRXK_AGC_CTRL_AUTO;
+ state->m_dvbtIfAgcCfg.outputLevel = 0;
+ state->m_dvbtIfAgcCfg.minOutputLevel = 0;
+ state->m_dvbtIfAgcCfg.maxOutputLevel = 9000;
+ state->m_dvbtIfAgcCfg.top = 13424;
+ state->m_dvbtIfAgcCfg.cutOffCurrent = 0;
+ state->m_dvbtIfAgcCfg.speed = 3;
+ state->m_dvbtIfAgcCfg.FastClipCtrlDelay = 30;
+ state->m_dvbtIfAgcCfg.IngainTgtMax = 30000;
+ /* state->m_dvbtPgaCfg = 140; */
+
+ state->m_dvbtPreSawCfg.reference = 4;
+ state->m_dvbtPreSawCfg.usePreSaw = false;
+
+ /* QAM RF */
+ state->m_qamRfAgcCfg.ctrlMode = DRXK_AGC_CTRL_OFF;
+ state->m_qamRfAgcCfg.outputLevel = 0;
+ state->m_qamRfAgcCfg.minOutputLevel = 6023;
+ state->m_qamRfAgcCfg.maxOutputLevel = 27000;
+ state->m_qamRfAgcCfg.top = 0x2380;
+ state->m_qamRfAgcCfg.cutOffCurrent = 4000;
+ state->m_qamRfAgcCfg.speed = 3;
+
+ /* QAM IF */
+ state->m_qamIfAgcCfg.ctrlMode = DRXK_AGC_CTRL_AUTO;
+ state->m_qamIfAgcCfg.outputLevel = 0;
+ state->m_qamIfAgcCfg.minOutputLevel = 0;
+ state->m_qamIfAgcCfg.maxOutputLevel = 9000;
+ state->m_qamIfAgcCfg.top = 0x0511;
+ state->m_qamIfAgcCfg.cutOffCurrent = 0;
+ state->m_qamIfAgcCfg.speed = 3;
+ state->m_qamIfAgcCfg.IngainTgtMax = 5119;
+ state->m_qamIfAgcCfg.FastClipCtrlDelay = 50;
+
+ state->m_qamPgaCfg = 140;
+ state->m_qamPreSawCfg.reference = 4;
+ state->m_qamPreSawCfg.usePreSaw = false;
+
+ state->m_OperationMode = OM_NONE;
+ state->m_DrxkState = DRXK_UNINITIALIZED;
+
+ /* MPEG output configuration */
+ state->m_enableMPEGOutput = true; /* If TRUE; enable MPEG ouput */
+ state->m_insertRSByte = false; /* If TRUE; insert RS byte */
+ state->m_enableParallel = true; /* If TRUE;
+ parallel out otherwise serial */
+ state->m_invertDATA = false; /* If TRUE; invert DATA signals */
+ state->m_invertERR = false; /* If TRUE; invert ERR signal */
+ state->m_invertSTR = false; /* If TRUE; invert STR signals */
+ state->m_invertVAL = false; /* If TRUE; invert VAL signals */
+ state->m_invertCLK = (ulInvertTSClock != 0); /* If TRUE; invert CLK signals */
+ state->m_DVBTStaticCLK = (ulDVBTStaticTSClock != 0);
+ state->m_DVBCStaticCLK = (ulDVBCStaticTSClock != 0);
+ /* If TRUE; static MPEG clockrate will be used;
+ otherwise clockrate will adapt to the bitrate of the TS */
+
+ state->m_DVBTBitrate = ulDVBTBitrate;
+ state->m_DVBCBitrate = ulDVBCBitrate;
+
+ state->m_TSDataStrength = (ulTSDataStrength & 0x07);
+ state->m_TSClockkStrength = (ulTSClockkStrength & 0x07);
+
+ /* Maximum bitrate in b/s in case static clockrate is selected */
+ state->m_mpegTsStaticBitrate = 19392658;
+ state->m_disableTEIhandling = false;
+
+ if (ulInsertRSByte)
+ state->m_insertRSByte = true;
+
+ state->m_MpegLockTimeOut = DEFAULT_DRXK_MPEG_LOCK_TIMEOUT;
+ if (ulMpegLockTimeOut < 10000)
+ state->m_MpegLockTimeOut = ulMpegLockTimeOut;
+ state->m_DemodLockTimeOut = DEFAULT_DRXK_DEMOD_LOCK_TIMEOUT;
+ if (ulDemodLockTimeOut < 10000)
+ state->m_DemodLockTimeOut = ulDemodLockTimeOut;
+
+ /* QAM defaults */
+ state->m_Constellation = DRX_CONSTELLATION_AUTO;
+ state->m_qamInterleaveMode = DRXK_QAM_I12_J17;
+ state->m_fecRsPlen = 204 * 8; /* fecRsPlen annex A */
+ state->m_fecRsPrescale = 1;
+
+ state->m_sqiSpeed = DRXK_DVBT_SQI_SPEED_MEDIUM;
+ state->m_agcFastClipCtrlDelay = 0;
+
+ state->m_GPIOCfg = (ulGPIOCfg);
+
+ state->m_bPowerDown = false;
+ state->m_currentPowerMode = DRX_POWER_DOWN;
+
+ state->m_enableParallel = (ulSerialMode == 0);
+
+ state->m_rfmirror = (ulRfMirror == 0);
+ state->m_IfAgcPol = false;
+ return 0;
+}
+
+static int DRXX_Open(struct drxk_state *state)
+{
+ int status = 0;
+ u32 jtag = 0;
+ u16 bid = 0;
+ u16 key = 0;
+
+ dprintk(1, "\n");
+ /* stop lock indicator process */
+ status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
+ if (status < 0)
+ goto error;
+ /* Check device id */
+ status = read16(state, SIO_TOP_COMM_KEY__A, &key);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY);
+ if (status < 0)
+ goto error;
+ status = read32(state, SIO_TOP_JTAGID_LO__A, &jtag);
+ if (status < 0)
+ goto error;
+ status = read16(state, SIO_PDR_UIO_IN_HI__A, &bid);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_TOP_COMM_KEY__A, key);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int GetDeviceCapabilities(struct drxk_state *state)
+{
+ u16 sioPdrOhwCfg = 0;
+ u32 sioTopJtagidLo = 0;
+ int status;
+ const char *spin = "";
+
+ dprintk(1, "\n");
+
+ /* driver 0.9.0 */
+ /* stop lock indicator process */
+ status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_TOP_COMM_KEY__A, 0xFABA);
+ if (status < 0)
+ goto error;
+ status = read16(state, SIO_PDR_OHW_CFG__A, &sioPdrOhwCfg);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_TOP_COMM_KEY__A, 0x0000);
+ if (status < 0)
+ goto error;
+
+ switch ((sioPdrOhwCfg & SIO_PDR_OHW_CFG_FREF_SEL__M)) {
+ case 0:
+ /* ignore (bypass ?) */
+ break;
+ case 1:
+ /* 27 MHz */
+ state->m_oscClockFreq = 27000;
+ break;
+ case 2:
+ /* 20.25 MHz */
+ state->m_oscClockFreq = 20250;
+ break;
+ case 3:
+ /* 4 MHz */
+ state->m_oscClockFreq = 20250;
+ break;
+ default:
+ printk(KERN_ERR "drxk: Clock Frequency is unkonwn\n");
+ return -EINVAL;
+ }
+ /*
+ Determine device capabilities
+ Based on pinning v14
+ */
+ status = read32(state, SIO_TOP_JTAGID_LO__A, &sioTopJtagidLo);
+ if (status < 0)
+ goto error;
+ /* driver 0.9.0 */
+ switch ((sioTopJtagidLo >> 29) & 0xF) {
+ case 0:
+ state->m_deviceSpin = DRXK_SPIN_A1;
+ spin = "A1";
+ break;
+ case 2:
+ state->m_deviceSpin = DRXK_SPIN_A2;
+ spin = "A2";
+ break;
+ case 3:
+ state->m_deviceSpin = DRXK_SPIN_A3;
+ spin = "A3";
+ break;
+ default:
+ state->m_deviceSpin = DRXK_SPIN_UNKNOWN;
+ status = -EINVAL;
+ printk(KERN_ERR "drxk: Spin unknown\n");
+ goto error2;
+ }
+ switch ((sioTopJtagidLo >> 12) & 0xFF) {
+ case 0x13:
+ /* typeId = DRX3913K_TYPE_ID */
+ state->m_hasLNA = false;
+ state->m_hasOOB = false;
+ state->m_hasATV = false;
+ state->m_hasAudio = false;
+ state->m_hasDVBT = true;
+ state->m_hasDVBC = true;
+ state->m_hasSAWSW = true;
+ state->m_hasGPIO2 = false;
+ state->m_hasGPIO1 = false;
+ state->m_hasIRQN = false;
+ break;
+ case 0x15:
+ /* typeId = DRX3915K_TYPE_ID */
+ state->m_hasLNA = false;
+ state->m_hasOOB = false;
+ state->m_hasATV = true;
+ state->m_hasAudio = false;
+ state->m_hasDVBT = true;
+ state->m_hasDVBC = false;
+ state->m_hasSAWSW = true;
+ state->m_hasGPIO2 = true;
+ state->m_hasGPIO1 = true;
+ state->m_hasIRQN = false;
+ break;
+ case 0x16:
+ /* typeId = DRX3916K_TYPE_ID */
+ state->m_hasLNA = false;
+ state->m_hasOOB = false;
+ state->m_hasATV = true;
+ state->m_hasAudio = false;
+ state->m_hasDVBT = true;
+ state->m_hasDVBC = false;
+ state->m_hasSAWSW = true;
+ state->m_hasGPIO2 = true;
+ state->m_hasGPIO1 = true;
+ state->m_hasIRQN = false;
+ break;
+ case 0x18:
+ /* typeId = DRX3918K_TYPE_ID */
+ state->m_hasLNA = false;
+ state->m_hasOOB = false;
+ state->m_hasATV = true;
+ state->m_hasAudio = true;
+ state->m_hasDVBT = true;
+ state->m_hasDVBC = false;
+ state->m_hasSAWSW = true;
+ state->m_hasGPIO2 = true;
+ state->m_hasGPIO1 = true;
+ state->m_hasIRQN = false;
+ break;
+ case 0x21:
+ /* typeId = DRX3921K_TYPE_ID */
+ state->m_hasLNA = false;
+ state->m_hasOOB = false;
+ state->m_hasATV = true;
+ state->m_hasAudio = true;
+ state->m_hasDVBT = true;
+ state->m_hasDVBC = true;
+ state->m_hasSAWSW = true;
+ state->m_hasGPIO2 = true;
+ state->m_hasGPIO1 = true;
+ state->m_hasIRQN = false;
+ break;
+ case 0x23:
+ /* typeId = DRX3923K_TYPE_ID */
+ state->m_hasLNA = false;
+ state->m_hasOOB = false;
+ state->m_hasATV = true;
+ state->m_hasAudio = true;
+ state->m_hasDVBT = true;
+ state->m_hasDVBC = true;
+ state->m_hasSAWSW = true;
+ state->m_hasGPIO2 = true;
+ state->m_hasGPIO1 = true;
+ state->m_hasIRQN = false;
+ break;
+ case 0x25:
+ /* typeId = DRX3925K_TYPE_ID */
+ state->m_hasLNA = false;
+ state->m_hasOOB = false;
+ state->m_hasATV = true;
+ state->m_hasAudio = true;
+ state->m_hasDVBT = true;
+ state->m_hasDVBC = true;
+ state->m_hasSAWSW = true;
+ state->m_hasGPIO2 = true;
+ state->m_hasGPIO1 = true;
+ state->m_hasIRQN = false;
+ break;
+ case 0x26:
+ /* typeId = DRX3926K_TYPE_ID */
+ state->m_hasLNA = false;
+ state->m_hasOOB = false;
+ state->m_hasATV = true;
+ state->m_hasAudio = false;
+ state->m_hasDVBT = true;
+ state->m_hasDVBC = true;
+ state->m_hasSAWSW = true;
+ state->m_hasGPIO2 = true;
+ state->m_hasGPIO1 = true;
+ state->m_hasIRQN = false;
+ break;
+ default:
+ printk(KERN_ERR "drxk: DeviceID 0x%02x not supported\n",
+ ((sioTopJtagidLo >> 12) & 0xFF));
+ status = -EINVAL;
+ goto error2;
+ }
+
+ printk(KERN_INFO
+ "drxk: detected a drx-39%02xk, spin %s, xtal %d.%03d MHz\n",
+ ((sioTopJtagidLo >> 12) & 0xFF), spin,
+ state->m_oscClockFreq / 1000,
+ state->m_oscClockFreq % 1000);
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+error2:
+ return status;
+}
+
+static int HI_Command(struct drxk_state *state, u16 cmd, u16 *pResult)
+{
+ int status;
+ bool powerdown_cmd;
+
+ dprintk(1, "\n");
+
+ /* Write command */
+ status = write16(state, SIO_HI_RA_RAM_CMD__A, cmd);
+ if (status < 0)
+ goto error;
+ if (cmd == SIO_HI_RA_RAM_CMD_RESET)
+ msleep(1);
+
+ powerdown_cmd =
+ (bool) ((cmd == SIO_HI_RA_RAM_CMD_CONFIG) &&
+ ((state->m_HICfgCtrl) &
+ SIO_HI_RA_RAM_PAR_5_CFG_SLEEP__M) ==
+ SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ);
+ if (powerdown_cmd == false) {
+ /* Wait until command rdy */
+ u32 retryCount = 0;
+ u16 waitCmd;
+
+ do {
+ msleep(1);
+ retryCount += 1;
+ status = read16(state, SIO_HI_RA_RAM_CMD__A,
+ &waitCmd);
+ } while ((status < 0) && (retryCount < DRXK_MAX_RETRIES)
+ && (waitCmd != 0));
+ if (status < 0)
+ goto error;
+ status = read16(state, SIO_HI_RA_RAM_RES__A, pResult);
+ }
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+static int HI_CfgCommand(struct drxk_state *state)
+{
+ int status;
+
+ dprintk(1, "\n");
+
+ mutex_lock(&state->mutex);
+
+ status = write16(state, SIO_HI_RA_RAM_PAR_6__A, state->m_HICfgTimeout);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_HI_RA_RAM_PAR_5__A, state->m_HICfgCtrl);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_HI_RA_RAM_PAR_4__A, state->m_HICfgWakeUpKey);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_HI_RA_RAM_PAR_3__A, state->m_HICfgBridgeDelay);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_HI_RA_RAM_PAR_2__A, state->m_HICfgTimingDiv);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_HI_RA_RAM_PAR_1__A, SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY);
+ if (status < 0)
+ goto error;
+ status = HI_Command(state, SIO_HI_RA_RAM_CMD_CONFIG, 0);
+ if (status < 0)
+ goto error;
+
+ state->m_HICfgCtrl &= ~SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ;
+error:
+ mutex_unlock(&state->mutex);
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int InitHI(struct drxk_state *state)
+{
+ dprintk(1, "\n");
+
+ state->m_HICfgWakeUpKey = (state->demod_address << 1);
+ state->m_HICfgTimeout = 0x96FF;
+ /* port/bridge/power down ctrl */
+ state->m_HICfgCtrl = SIO_HI_RA_RAM_PAR_5_CFG_SLV0_SLAVE;
+
+ return HI_CfgCommand(state);
+}
+
+static int MPEGTSConfigurePins(struct drxk_state *state, bool mpegEnable)
+{
+ int status = -1;
+ u16 sioPdrMclkCfg = 0;
+ u16 sioPdrMdxCfg = 0;
+
+ dprintk(1, "\n");
+
+ /* stop lock indicator process */
+ status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
+ if (status < 0)
+ goto error;
+
+ /* MPEG TS pad configuration */
+ status = write16(state, SIO_TOP_COMM_KEY__A, 0xFABA);
+ if (status < 0)
+ goto error;
+
+ if (mpegEnable == false) {
+ /* Set MPEG TS pads to inputmode */
+ status = write16(state, SIO_PDR_MSTRT_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MERR_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MCLK_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MVAL_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD0_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD1_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD2_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD3_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD4_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD5_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD6_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD7_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ } else {
+ /* Enable MPEG output */
+ sioPdrMdxCfg =
+ ((state->m_TSDataStrength <<
+ SIO_PDR_MD0_CFG_DRIVE__B) | 0x0003);
+ sioPdrMclkCfg = ((state->m_TSClockkStrength <<
+ SIO_PDR_MCLK_CFG_DRIVE__B) |
+ 0x0003);
+
+ status = write16(state, SIO_PDR_MSTRT_CFG__A, sioPdrMdxCfg);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MERR_CFG__A, 0x0000); /* Disable */
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MVAL_CFG__A, 0x0000); /* Disable */
+ if (status < 0)
+ goto error;
+ if (state->m_enableParallel == true) {
+ /* paralel -> enable MD1 to MD7 */
+ status = write16(state, SIO_PDR_MD1_CFG__A, sioPdrMdxCfg);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD2_CFG__A, sioPdrMdxCfg);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD3_CFG__A, sioPdrMdxCfg);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD4_CFG__A, sioPdrMdxCfg);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD5_CFG__A, sioPdrMdxCfg);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD6_CFG__A, sioPdrMdxCfg);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD7_CFG__A, sioPdrMdxCfg);
+ if (status < 0)
+ goto error;
+ } else {
+ sioPdrMdxCfg = ((state->m_TSDataStrength <<
+ SIO_PDR_MD0_CFG_DRIVE__B)
+ | 0x0003);
+ /* serial -> disable MD1 to MD7 */
+ status = write16(state, SIO_PDR_MD1_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD2_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD3_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD4_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD5_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD6_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD7_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ }
+ status = write16(state, SIO_PDR_MCLK_CFG__A, sioPdrMclkCfg);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD0_CFG__A, sioPdrMdxCfg);
+ if (status < 0)
+ goto error;
+ }
+ /* Enable MB output over MPEG pads and ctl input */
+ status = write16(state, SIO_PDR_MON_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ /* Write nomagic word to enable pdr reg write */
+ status = write16(state, SIO_TOP_COMM_KEY__A, 0x0000);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int MPEGTSDisable(struct drxk_state *state)
+{
+ dprintk(1, "\n");
+
+ return MPEGTSConfigurePins(state, false);
+}
+
+static int BLChainCmd(struct drxk_state *state,
+ u16 romOffset, u16 nrOfElements, u32 timeOut)
+{
+ u16 blStatus = 0;
+ int status;
+ unsigned long end;
+
+ dprintk(1, "\n");
+ mutex_lock(&state->mutex);
+ status = write16(state, SIO_BL_MODE__A, SIO_BL_MODE_CHAIN);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_BL_CHAIN_ADDR__A, romOffset);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_BL_CHAIN_LEN__A, nrOfElements);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_BL_ENABLE__A, SIO_BL_ENABLE_ON);
+ if (status < 0)
+ goto error;
+
+ end = jiffies + msecs_to_jiffies(timeOut);
+ do {
+ msleep(1);
+ status = read16(state, SIO_BL_STATUS__A, &blStatus);
+ if (status < 0)
+ goto error;
+ } while ((blStatus == 0x1) &&
+ ((time_is_after_jiffies(end))));
+
+ if (blStatus == 0x1) {
+ printk(KERN_ERR "drxk: SIO not ready\n");
+ status = -EINVAL;
+ goto error2;
+ }
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+error2:
+ mutex_unlock(&state->mutex);
+ return status;
+}
+
+
+static int DownloadMicrocode(struct drxk_state *state,
+ const u8 pMCImage[], u32 Length)
+{
+ const u8 *pSrc = pMCImage;
+ u16 Flags;
+ u16 Drain;
+ u32 Address;
+ u16 nBlocks;
+ u16 BlockSize;
+ u16 BlockCRC;
+ u32 offset = 0;
+ u32 i;
+ int status = 0;
+
+ dprintk(1, "\n");
+
+ /* down the drain (we don care about MAGIC_WORD) */
+ Drain = (pSrc[0] << 8) | pSrc[1];
+ pSrc += sizeof(u16);
+ offset += sizeof(u16);
+ nBlocks = (pSrc[0] << 8) | pSrc[1];
+ pSrc += sizeof(u16);
+ offset += sizeof(u16);
+
+ for (i = 0; i < nBlocks; i += 1) {
+ Address = (pSrc[0] << 24) | (pSrc[1] << 16) |
+ (pSrc[2] << 8) | pSrc[3];
+ pSrc += sizeof(u32);
+ offset += sizeof(u32);
+
+ BlockSize = ((pSrc[0] << 8) | pSrc[1]) * sizeof(u16);
+ pSrc += sizeof(u16);
+ offset += sizeof(u16);
+
+ Flags = (pSrc[0] << 8) | pSrc[1];
+ pSrc += sizeof(u16);
+ offset += sizeof(u16);
+
+ BlockCRC = (pSrc[0] << 8) | pSrc[1];
+ pSrc += sizeof(u16);
+ offset += sizeof(u16);
+
+ if (offset + BlockSize > Length) {
+ printk(KERN_ERR "drxk: Firmware is corrupted.\n");
+ return -EINVAL;
+ }
+
+ status = write_block(state, Address, BlockSize, pSrc);
+ if (status < 0) {
+ printk(KERN_ERR "drxk: Error %d while loading firmware\n", status);
+ break;
+ }
+ pSrc += BlockSize;
+ offset += BlockSize;
+ }
+ return status;
+}
+
+static int DVBTEnableOFDMTokenRing(struct drxk_state *state, bool enable)
+{
+ int status;
+ u16 data = 0;
+ u16 desiredCtrl = SIO_OFDM_SH_OFDM_RING_ENABLE_ON;
+ u16 desiredStatus = SIO_OFDM_SH_OFDM_RING_STATUS_ENABLED;
+ unsigned long end;
+
+ dprintk(1, "\n");
+
+ if (enable == false) {
+ desiredCtrl = SIO_OFDM_SH_OFDM_RING_ENABLE_OFF;
+ desiredStatus = SIO_OFDM_SH_OFDM_RING_STATUS_DOWN;
+ }
+
+ status = read16(state, SIO_OFDM_SH_OFDM_RING_STATUS__A, &data);
+ if (status >= 0 && data == desiredStatus) {
+ /* tokenring already has correct status */
+ return status;
+ }
+ /* Disable/enable dvbt tokenring bridge */
+ status = write16(state, SIO_OFDM_SH_OFDM_RING_ENABLE__A, desiredCtrl);
+
+ end = jiffies + msecs_to_jiffies(DRXK_OFDM_TR_SHUTDOWN_TIMEOUT);
+ do {
+ status = read16(state, SIO_OFDM_SH_OFDM_RING_STATUS__A, &data);
+ if ((status >= 0 && data == desiredStatus) || time_is_after_jiffies(end))
+ break;
+ msleep(1);
+ } while (1);
+ if (data != desiredStatus) {
+ printk(KERN_ERR "drxk: SIO not ready\n");
+ return -EINVAL;
+ }
+ return status;
+}
+
+static int MPEGTSStop(struct drxk_state *state)
+{
+ int status = 0;
+ u16 fecOcSncMode = 0;
+ u16 fecOcIprMode = 0;
+
+ dprintk(1, "\n");
+
+ /* Gracefull shutdown (byte boundaries) */
+ status = read16(state, FEC_OC_SNC_MODE__A, &fecOcSncMode);
+ if (status < 0)
+ goto error;
+ fecOcSncMode |= FEC_OC_SNC_MODE_SHUTDOWN__M;
+ status = write16(state, FEC_OC_SNC_MODE__A, fecOcSncMode);
+ if (status < 0)
+ goto error;
+
+ /* Suppress MCLK during absence of data */
+ status = read16(state, FEC_OC_IPR_MODE__A, &fecOcIprMode);
+ if (status < 0)
+ goto error;
+ fecOcIprMode |= FEC_OC_IPR_MODE_MCLK_DIS_DAT_ABS__M;
+ status = write16(state, FEC_OC_IPR_MODE__A, fecOcIprMode);
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+static int scu_command(struct drxk_state *state,
+ u16 cmd, u8 parameterLen,
+ u16 *parameter, u8 resultLen, u16 *result)
+{
+#if (SCU_RAM_PARAM_0__A - SCU_RAM_PARAM_15__A) != 15
+#error DRXK register mapping no longer compatible with this routine!
+#endif
+ u16 curCmd = 0;
+ int status = -EINVAL;
+ unsigned long end;
+ u8 buffer[34];
+ int cnt = 0, ii;
+ const char *p;
+ char errname[30];
+
+ dprintk(1, "\n");
+
+ if ((cmd == 0) || ((parameterLen > 0) && (parameter == NULL)) ||
+ ((resultLen > 0) && (result == NULL)))
+ goto error;
+
+ mutex_lock(&state->mutex);
+
+ /* assume that the command register is ready
+ since it is checked afterwards */
+ for (ii = parameterLen - 1; ii >= 0; ii -= 1) {
+ buffer[cnt++] = (parameter[ii] & 0xFF);
+ buffer[cnt++] = ((parameter[ii] >> 8) & 0xFF);
+ }
+ buffer[cnt++] = (cmd & 0xFF);
+ buffer[cnt++] = ((cmd >> 8) & 0xFF);
+
+ write_block(state, SCU_RAM_PARAM_0__A -
+ (parameterLen - 1), cnt, buffer);
+ /* Wait until SCU has processed command */
+ end = jiffies + msecs_to_jiffies(DRXK_MAX_WAITTIME);
+ do {
+ msleep(1);
+ status = read16(state, SCU_RAM_COMMAND__A, &curCmd);
+ if (status < 0)
+ goto error;
+ } while (!(curCmd == DRX_SCU_READY) && (time_is_after_jiffies(end)));
+ if (curCmd != DRX_SCU_READY) {
+ printk(KERN_ERR "drxk: SCU not ready\n");
+ status = -EIO;
+ goto error2;
+ }
+ /* read results */
+ if ((resultLen > 0) && (result != NULL)) {
+ s16 err;
+ int ii;
+
+ for (ii = resultLen - 1; ii >= 0; ii -= 1) {
+ status = read16(state, SCU_RAM_PARAM_0__A - ii, &result[ii]);
+ if (status < 0)
+ goto error;
+ }
+
+ /* Check if an error was reported by SCU */
+ err = (s16)result[0];
+ if (err >= 0)
+ goto error;
+
+ /* check for the known error codes */
+ switch (err) {
+ case SCU_RESULT_UNKCMD:
+ p = "SCU_RESULT_UNKCMD";
+ break;
+ case SCU_RESULT_UNKSTD:
+ p = "SCU_RESULT_UNKSTD";
+ break;
+ case SCU_RESULT_SIZE:
+ p = "SCU_RESULT_SIZE";
+ break;
+ case SCU_RESULT_INVPAR:
+ p = "SCU_RESULT_INVPAR";
+ break;
+ default: /* Other negative values are errors */
+ sprintf(errname, "ERROR: %d\n", err);
+ p = errname;
+ }
+ printk(KERN_ERR "drxk: %s while sending cmd 0x%04x with params:", p, cmd);
+ print_hex_dump_bytes("drxk: ", DUMP_PREFIX_NONE, buffer, cnt);
+ status = -EINVAL;
+ goto error2;
+ }
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+error2:
+ mutex_unlock(&state->mutex);
+ return status;
+}
+
+static int SetIqmAf(struct drxk_state *state, bool active)
+{
+ u16 data = 0;
+ int status;
+
+ dprintk(1, "\n");
+
+ /* Configure IQM */
+ status = read16(state, IQM_AF_STDBY__A, &data);
+ if (status < 0)
+ goto error;
+
+ if (!active) {
+ data |= (IQM_AF_STDBY_STDBY_ADC_STANDBY
+ | IQM_AF_STDBY_STDBY_AMP_STANDBY
+ | IQM_AF_STDBY_STDBY_PD_STANDBY
+ | IQM_AF_STDBY_STDBY_TAGC_IF_STANDBY
+ | IQM_AF_STDBY_STDBY_TAGC_RF_STANDBY);
+ } else {
+ data &= ((~IQM_AF_STDBY_STDBY_ADC_STANDBY)
+ & (~IQM_AF_STDBY_STDBY_AMP_STANDBY)
+ & (~IQM_AF_STDBY_STDBY_PD_STANDBY)
+ & (~IQM_AF_STDBY_STDBY_TAGC_IF_STANDBY)
+ & (~IQM_AF_STDBY_STDBY_TAGC_RF_STANDBY)
+ );
+ }
+ status = write16(state, IQM_AF_STDBY__A, data);
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int CtrlPowerMode(struct drxk_state *state, enum DRXPowerMode *mode)
+{
+ int status = 0;
+ u16 sioCcPwdMode = 0;
+
+ dprintk(1, "\n");
+
+ /* Check arguments */
+ if (mode == NULL)
+ return -EINVAL;
+
+ switch (*mode) {
+ case DRX_POWER_UP:
+ sioCcPwdMode = SIO_CC_PWD_MODE_LEVEL_NONE;
+ break;
+ case DRXK_POWER_DOWN_OFDM:
+ sioCcPwdMode = SIO_CC_PWD_MODE_LEVEL_OFDM;
+ break;
+ case DRXK_POWER_DOWN_CORE:
+ sioCcPwdMode = SIO_CC_PWD_MODE_LEVEL_CLOCK;
+ break;
+ case DRXK_POWER_DOWN_PLL:
+ sioCcPwdMode = SIO_CC_PWD_MODE_LEVEL_PLL;
+ break;
+ case DRX_POWER_DOWN:
+ sioCcPwdMode = SIO_CC_PWD_MODE_LEVEL_OSC;
+ break;
+ default:
+ /* Unknow sleep mode */
+ return -EINVAL;
+ }
+
+ /* If already in requested power mode, do nothing */
+ if (state->m_currentPowerMode == *mode)
+ return 0;
+
+ /* For next steps make sure to start from DRX_POWER_UP mode */
+ if (state->m_currentPowerMode != DRX_POWER_UP) {
+ status = PowerUpDevice(state);
+ if (status < 0)
+ goto error;
+ status = DVBTEnableOFDMTokenRing(state, true);
+ if (status < 0)
+ goto error;
+ }
+
+ if (*mode == DRX_POWER_UP) {
+ /* Restore analog & pin configuartion */
+ } else {
+ /* Power down to requested mode */
+ /* Backup some register settings */
+ /* Set pins with possible pull-ups connected
+ to them in input mode */
+ /* Analog power down */
+ /* ADC power down */
+ /* Power down device */
+ /* stop all comm_exec */
+ /* Stop and power down previous standard */
+ switch (state->m_OperationMode) {
+ case OM_DVBT:
+ status = MPEGTSStop(state);
+ if (status < 0)
+ goto error;
+ status = PowerDownDVBT(state, false);
+ if (status < 0)
+ goto error;
+ break;
+ case OM_QAM_ITU_A:
+ case OM_QAM_ITU_C:
+ status = MPEGTSStop(state);
+ if (status < 0)
+ goto error;
+ status = PowerDownQAM(state);
+ if (status < 0)
+ goto error;
+ break;
+ default:
+ break;
+ }
+ status = DVBTEnableOFDMTokenRing(state, false);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_CC_PWD_MODE__A, sioCcPwdMode);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_CC_UPDATE__A, SIO_CC_UPDATE_KEY);
+ if (status < 0)
+ goto error;
+
+ if (*mode != DRXK_POWER_DOWN_OFDM) {
+ state->m_HICfgCtrl |=
+ SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ;
+ status = HI_CfgCommand(state);
+ if (status < 0)
+ goto error;
+ }
+ }
+ state->m_currentPowerMode = *mode;
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+static int PowerDownDVBT(struct drxk_state *state, bool setPowerMode)
+{
+ enum DRXPowerMode powerMode = DRXK_POWER_DOWN_OFDM;
+ u16 cmdResult = 0;
+ u16 data = 0;
+ int status;
+
+ dprintk(1, "\n");
+
+ status = read16(state, SCU_COMM_EXEC__A, &data);
+ if (status < 0)
+ goto error;
+ if (data == SCU_COMM_EXEC_ACTIVE) {
+ /* Send OFDM stop command */
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_STOP, 0, NULL, 1, &cmdResult);
+ if (status < 0)
+ goto error;
+ /* Send OFDM reset command */
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_RESET, 0, NULL, 1, &cmdResult);
+ if (status < 0)
+ goto error;
+ }
+
+ /* Reset datapath for OFDM, processors first */
+ status = write16(state, OFDM_SC_COMM_EXEC__A, OFDM_SC_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_LC_COMM_EXEC__A, OFDM_LC_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_COMM_EXEC__A, IQM_COMM_EXEC_B_STOP);
+ if (status < 0)
+ goto error;
+
+ /* powerdown AFE */
+ status = SetIqmAf(state, false);
+ if (status < 0)
+ goto error;
+
+ /* powerdown to OFDM mode */
+ if (setPowerMode) {
+ status = CtrlPowerMode(state, &powerMode);
+ if (status < 0)
+ goto error;
+ }
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int SetOperationMode(struct drxk_state *state,
+ enum OperationMode oMode)
+{
+ int status = 0;
+
+ dprintk(1, "\n");
+ /*
+ Stop and power down previous standard
+ TODO investigate total power down instead of partial
+ power down depending on "previous" standard.
+ */
+
+ /* disable HW lock indicator */
+ status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
+ if (status < 0)
+ goto error;
+
+ /* Device is already at the required mode */
+ if (state->m_OperationMode == oMode)
+ return 0;
+
+ switch (state->m_OperationMode) {
+ /* OM_NONE was added for start up */
+ case OM_NONE:
+ break;
+ case OM_DVBT:
+ status = MPEGTSStop(state);
+ if (status < 0)
+ goto error;
+ status = PowerDownDVBT(state, true);
+ if (status < 0)
+ goto error;
+ state->m_OperationMode = OM_NONE;
+ break;
+ case OM_QAM_ITU_A: /* fallthrough */
+ case OM_QAM_ITU_C:
+ status = MPEGTSStop(state);
+ if (status < 0)
+ goto error;
+ status = PowerDownQAM(state);
+ if (status < 0)
+ goto error;
+ state->m_OperationMode = OM_NONE;
+ break;
+ case OM_QAM_ITU_B:
+ default:
+ status = -EINVAL;
+ goto error;
+ }
+
+ /*
+ Power up new standard
+ */
+ switch (oMode) {
+ case OM_DVBT:
+ state->m_OperationMode = oMode;
+ status = SetDVBTStandard(state, oMode);
+ if (status < 0)
+ goto error;
+ break;
+ case OM_QAM_ITU_A: /* fallthrough */
+ case OM_QAM_ITU_C:
+ state->m_OperationMode = oMode;
+ status = SetQAMStandard(state, oMode);
+ if (status < 0)
+ goto error;
+ break;
+ case OM_QAM_ITU_B:
+ default:
+ status = -EINVAL;
+ }
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int Start(struct drxk_state *state, s32 offsetFreq,
+ s32 IntermediateFrequency)
+{
+ int status = -EINVAL;
+
+ u16 IFreqkHz;
+ s32 OffsetkHz = offsetFreq / 1000;
+
+ dprintk(1, "\n");
+ if (state->m_DrxkState != DRXK_STOPPED &&
+ state->m_DrxkState != DRXK_DTV_STARTED)
+ goto error;
+
+ state->m_bMirrorFreqSpect = (state->param.inversion == INVERSION_ON);
+
+ if (IntermediateFrequency < 0) {
+ state->m_bMirrorFreqSpect = !state->m_bMirrorFreqSpect;
+ IntermediateFrequency = -IntermediateFrequency;
+ }
+
+ switch (state->m_OperationMode) {
+ case OM_QAM_ITU_A:
+ case OM_QAM_ITU_C:
+ IFreqkHz = (IntermediateFrequency / 1000);
+ status = SetQAM(state, IFreqkHz, OffsetkHz);
+ if (status < 0)
+ goto error;
+ state->m_DrxkState = DRXK_DTV_STARTED;
+ break;
+ case OM_DVBT:
+ IFreqkHz = (IntermediateFrequency / 1000);
+ status = MPEGTSStop(state);
+ if (status < 0)
+ goto error;
+ status = SetDVBT(state, IFreqkHz, OffsetkHz);
+ if (status < 0)
+ goto error;
+ status = DVBTStart(state);
+ if (status < 0)
+ goto error;
+ state->m_DrxkState = DRXK_DTV_STARTED;
+ break;
+ default:
+ break;
+ }
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int ShutDown(struct drxk_state *state)
+{
+ dprintk(1, "\n");
+
+ MPEGTSStop(state);
+ return 0;
+}
+
+static int GetLockStatus(struct drxk_state *state, u32 *pLockStatus,
+ u32 Time)
+{
+ int status = -EINVAL;
+
+ dprintk(1, "\n");
+
+ if (pLockStatus == NULL)
+ goto error;
+
+ *pLockStatus = NOT_LOCKED;
+
+ /* define the SCU command code */
+ switch (state->m_OperationMode) {
+ case OM_QAM_ITU_A:
+ case OM_QAM_ITU_B:
+ case OM_QAM_ITU_C:
+ status = GetQAMLockStatus(state, pLockStatus);
+ break;
+ case OM_DVBT:
+ status = GetDVBTLockStatus(state, pLockStatus);
+ break;
+ default:
+ break;
+ }
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int MPEGTSStart(struct drxk_state *state)
+{
+ int status;
+
+ u16 fecOcSncMode = 0;
+
+ /* Allow OC to sync again */
+ status = read16(state, FEC_OC_SNC_MODE__A, &fecOcSncMode);
+ if (status < 0)
+ goto error;
+ fecOcSncMode &= ~FEC_OC_SNC_MODE_SHUTDOWN__M;
+ status = write16(state, FEC_OC_SNC_MODE__A, fecOcSncMode);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_SNC_UNLOCK__A, 1);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int MPEGTSDtoInit(struct drxk_state *state)
+{
+ int status;
+
+ dprintk(1, "\n");
+
+ /* Rate integration settings */
+ status = write16(state, FEC_OC_RCN_CTL_STEP_LO__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_RCN_CTL_STEP_HI__A, 0x000C);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_RCN_GAIN__A, 0x000A);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_AVR_PARM_A__A, 0x0008);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_AVR_PARM_B__A, 0x0006);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_TMD_HI_MARGIN__A, 0x0680);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_TMD_LO_MARGIN__A, 0x0080);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_TMD_COUNT__A, 0x03F4);
+ if (status < 0)
+ goto error;
+
+ /* Additional configuration */
+ status = write16(state, FEC_OC_OCR_INVERT__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_SNC_LWM__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_SNC_HWM__A, 12);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+static int MPEGTSDtoSetup(struct drxk_state *state,
+ enum OperationMode oMode)
+{
+ int status;
+
+ u16 fecOcRegMode = 0; /* FEC_OC_MODE register value */
+ u16 fecOcRegIprMode = 0; /* FEC_OC_IPR_MODE register value */
+ u16 fecOcDtoMode = 0; /* FEC_OC_IPR_INVERT register value */
+ u16 fecOcFctMode = 0; /* FEC_OC_IPR_INVERT register value */
+ u16 fecOcDtoPeriod = 2; /* FEC_OC_IPR_INVERT register value */
+ u16 fecOcDtoBurstLen = 188; /* FEC_OC_IPR_INVERT register value */
+ u32 fecOcRcnCtlRate = 0; /* FEC_OC_IPR_INVERT register value */
+ u16 fecOcTmdMode = 0;
+ u16 fecOcTmdIntUpdRate = 0;
+ u32 maxBitRate = 0;
+ bool staticCLK = false;
+
+ dprintk(1, "\n");
+
+ /* Check insertion of the Reed-Solomon parity bytes */
+ status = read16(state, FEC_OC_MODE__A, &fecOcRegMode);
+ if (status < 0)
+ goto error;
+ status = read16(state, FEC_OC_IPR_MODE__A, &fecOcRegIprMode);
+ if (status < 0)
+ goto error;
+ fecOcRegMode &= (~FEC_OC_MODE_PARITY__M);
+ fecOcRegIprMode &= (~FEC_OC_IPR_MODE_MVAL_DIS_PAR__M);
+ if (state->m_insertRSByte == true) {
+ /* enable parity symbol forward */
+ fecOcRegMode |= FEC_OC_MODE_PARITY__M;
+ /* MVAL disable during parity bytes */
+ fecOcRegIprMode |= FEC_OC_IPR_MODE_MVAL_DIS_PAR__M;
+ /* TS burst length to 204 */
+ fecOcDtoBurstLen = 204;
+ }
+
+ /* Check serial or parrallel output */
+ fecOcRegIprMode &= (~(FEC_OC_IPR_MODE_SERIAL__M));
+ if (state->m_enableParallel == false) {
+ /* MPEG data output is serial -> set ipr_mode[0] */
+ fecOcRegIprMode |= FEC_OC_IPR_MODE_SERIAL__M;
+ }
+
+ switch (oMode) {
+ case OM_DVBT:
+ maxBitRate = state->m_DVBTBitrate;
+ fecOcTmdMode = 3;
+ fecOcRcnCtlRate = 0xC00000;
+ staticCLK = state->m_DVBTStaticCLK;
+ break;
+ case OM_QAM_ITU_A: /* fallthrough */
+ case OM_QAM_ITU_C:
+ fecOcTmdMode = 0x0004;
+ fecOcRcnCtlRate = 0xD2B4EE; /* good for >63 Mb/s */
+ maxBitRate = state->m_DVBCBitrate;
+ staticCLK = state->m_DVBCStaticCLK;
+ break;
+ default:
+ status = -EINVAL;
+ } /* switch (standard) */
+ if (status < 0)
+ goto error;
+
+ /* Configure DTO's */
+ if (staticCLK) {
+ u32 bitRate = 0;
+
+ /* Rational DTO for MCLK source (static MCLK rate),
+ Dynamic DTO for optimal grouping
+ (avoid intra-packet gaps),
+ DTO offset enable to sync TS burst with MSTRT */
+ fecOcDtoMode = (FEC_OC_DTO_MODE_DYNAMIC__M |
+ FEC_OC_DTO_MODE_OFFSET_ENABLE__M);
+ fecOcFctMode = (FEC_OC_FCT_MODE_RAT_ENA__M |
+ FEC_OC_FCT_MODE_VIRT_ENA__M);
+
+ /* Check user defined bitrate */
+ bitRate = maxBitRate;
+ if (bitRate > 75900000UL) { /* max is 75.9 Mb/s */
+ bitRate = 75900000UL;
+ }
+ /* Rational DTO period:
+ dto_period = (Fsys / bitrate) - 2
+
+ Result should be floored,
+ to make sure >= requested bitrate
+ */
+ fecOcDtoPeriod = (u16) (((state->m_sysClockFreq)
+ * 1000) / bitRate);
+ if (fecOcDtoPeriod <= 2)
+ fecOcDtoPeriod = 0;
+ else
+ fecOcDtoPeriod -= 2;
+ fecOcTmdIntUpdRate = 8;
+ } else {
+ /* (commonAttr->staticCLK == false) => dynamic mode */
+ fecOcDtoMode = FEC_OC_DTO_MODE_DYNAMIC__M;
+ fecOcFctMode = FEC_OC_FCT_MODE__PRE;
+ fecOcTmdIntUpdRate = 5;
+ }
+
+ /* Write appropriate registers with requested configuration */
+ status = write16(state, FEC_OC_DTO_BURST_LEN__A, fecOcDtoBurstLen);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_DTO_PERIOD__A, fecOcDtoPeriod);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_DTO_MODE__A, fecOcDtoMode);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_FCT_MODE__A, fecOcFctMode);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_MODE__A, fecOcRegMode);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_IPR_MODE__A, fecOcRegIprMode);
+ if (status < 0)
+ goto error;
+
+ /* Rate integration settings */
+ status = write32(state, FEC_OC_RCN_CTL_RATE_LO__A, fecOcRcnCtlRate);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_TMD_INT_UPD_RATE__A, fecOcTmdIntUpdRate);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_TMD_MODE__A, fecOcTmdMode);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int MPEGTSConfigurePolarity(struct drxk_state *state)
+{
+ u16 fecOcRegIprInvert = 0;
+
+ /* Data mask for the output data byte */
+ u16 InvertDataMask =
+ FEC_OC_IPR_INVERT_MD7__M | FEC_OC_IPR_INVERT_MD6__M |
+ FEC_OC_IPR_INVERT_MD5__M | FEC_OC_IPR_INVERT_MD4__M |
+ FEC_OC_IPR_INVERT_MD3__M | FEC_OC_IPR_INVERT_MD2__M |
+ FEC_OC_IPR_INVERT_MD1__M | FEC_OC_IPR_INVERT_MD0__M;
+
+ dprintk(1, "\n");
+
+ /* Control selective inversion of output bits */
+ fecOcRegIprInvert &= (~(InvertDataMask));
+ if (state->m_invertDATA == true)
+ fecOcRegIprInvert |= InvertDataMask;
+ fecOcRegIprInvert &= (~(FEC_OC_IPR_INVERT_MERR__M));
+ if (state->m_invertERR == true)
+ fecOcRegIprInvert |= FEC_OC_IPR_INVERT_MERR__M;
+ fecOcRegIprInvert &= (~(FEC_OC_IPR_INVERT_MSTRT__M));
+ if (state->m_invertSTR == true)
+ fecOcRegIprInvert |= FEC_OC_IPR_INVERT_MSTRT__M;
+ fecOcRegIprInvert &= (~(FEC_OC_IPR_INVERT_MVAL__M));
+ if (state->m_invertVAL == true)
+ fecOcRegIprInvert |= FEC_OC_IPR_INVERT_MVAL__M;
+ fecOcRegIprInvert &= (~(FEC_OC_IPR_INVERT_MCLK__M));
+ if (state->m_invertCLK == true)
+ fecOcRegIprInvert |= FEC_OC_IPR_INVERT_MCLK__M;
+
+ return write16(state, FEC_OC_IPR_INVERT__A, fecOcRegIprInvert);
+}
+
+#define SCU_RAM_AGC_KI_INV_RF_POL__M 0x4000
+
+static int SetAgcRf(struct drxk_state *state,
+ struct SCfgAgc *pAgcCfg, bool isDTV)
+{
+ int status = -EINVAL;
+ u16 data = 0;
+ struct SCfgAgc *pIfAgcSettings;
+
+ dprintk(1, "\n");
+
+ if (pAgcCfg == NULL)
+ goto error;
+
+ switch (pAgcCfg->ctrlMode) {
+ case DRXK_AGC_CTRL_AUTO:
+ /* Enable RF AGC DAC */
+ status = read16(state, IQM_AF_STDBY__A, &data);
+ if (status < 0)
+ goto error;
+ data &= ~IQM_AF_STDBY_STDBY_TAGC_RF_STANDBY;
+ status = write16(state, IQM_AF_STDBY__A, data);
+ if (status < 0)
+ goto error;
+ status = read16(state, SCU_RAM_AGC_CONFIG__A, &data);
+ if (status < 0)
+ goto error;
+
+ /* Enable SCU RF AGC loop */
+ data &= ~SCU_RAM_AGC_CONFIG_DISABLE_RF_AGC__M;
+
+ /* Polarity */
+ if (state->m_RfAgcPol)
+ data |= SCU_RAM_AGC_CONFIG_INV_RF_POL__M;
+ else
+ data &= ~SCU_RAM_AGC_CONFIG_INV_RF_POL__M;
+ status = write16(state, SCU_RAM_AGC_CONFIG__A, data);
+ if (status < 0)
+ goto error;
+
+ /* Set speed (using complementary reduction value) */
+ status = read16(state, SCU_RAM_AGC_KI_RED__A, &data);
+ if (status < 0)
+ goto error;
+
+ data &= ~SCU_RAM_AGC_KI_RED_RAGC_RED__M;
+ data |= (~(pAgcCfg->speed <<
+ SCU_RAM_AGC_KI_RED_RAGC_RED__B)
+ & SCU_RAM_AGC_KI_RED_RAGC_RED__M);
+
+ status = write16(state, SCU_RAM_AGC_KI_RED__A, data);
+ if (status < 0)
+ goto error;
+
+ if (IsDVBT(state))
+ pIfAgcSettings = &state->m_dvbtIfAgcCfg;
+ else if (IsQAM(state))
+ pIfAgcSettings = &state->m_qamIfAgcCfg;
+ else
+ pIfAgcSettings = &state->m_atvIfAgcCfg;
+ if (pIfAgcSettings == NULL) {
+ status = -EINVAL;
+ goto error;
+ }
+
+ /* Set TOP, only if IF-AGC is in AUTO mode */
+ if (pIfAgcSettings->ctrlMode == DRXK_AGC_CTRL_AUTO)
+ status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, pAgcCfg->top);
+ if (status < 0)
+ goto error;
+
+ /* Cut-Off current */
+ status = write16(state, SCU_RAM_AGC_RF_IACCU_HI_CO__A, pAgcCfg->cutOffCurrent);
+ if (status < 0)
+ goto error;
+
+ /* Max. output level */
+ status = write16(state, SCU_RAM_AGC_RF_MAX__A, pAgcCfg->maxOutputLevel);
+ if (status < 0)
+ goto error;
+
+ break;
+
+ case DRXK_AGC_CTRL_USER:
+ /* Enable RF AGC DAC */
+ status = read16(state, IQM_AF_STDBY__A, &data);
+ if (status < 0)
+ goto error;
+ data &= ~IQM_AF_STDBY_STDBY_TAGC_RF_STANDBY;
+ status = write16(state, IQM_AF_STDBY__A, data);
+ if (status < 0)
+ goto error;
+
+ /* Disable SCU RF AGC loop */
+ status = read16(state, SCU_RAM_AGC_CONFIG__A, &data);
+ if (status < 0)
+ goto error;
+ data |= SCU_RAM_AGC_CONFIG_DISABLE_RF_AGC__M;
+ if (state->m_RfAgcPol)
+ data |= SCU_RAM_AGC_CONFIG_INV_RF_POL__M;
+ else
+ data &= ~SCU_RAM_AGC_CONFIG_INV_RF_POL__M;
+ status = write16(state, SCU_RAM_AGC_CONFIG__A, data);
+ if (status < 0)
+ goto error;
+
+ /* SCU c.o.c. to 0, enabling full control range */
+ status = write16(state, SCU_RAM_AGC_RF_IACCU_HI_CO__A, 0);
+ if (status < 0)
+ goto error;
+
+ /* Write value to output pin */
+ status = write16(state, SCU_RAM_AGC_RF_IACCU_HI__A, pAgcCfg->outputLevel);
+ if (status < 0)
+ goto error;
+ break;
+
+ case DRXK_AGC_CTRL_OFF:
+ /* Disable RF AGC DAC */
+ status = read16(state, IQM_AF_STDBY__A, &data);
+ if (status < 0)
+ goto error;
+ data |= IQM_AF_STDBY_STDBY_TAGC_RF_STANDBY;
+ status = write16(state, IQM_AF_STDBY__A, data);
+ if (status < 0)
+ goto error;
+
+ /* Disable SCU RF AGC loop */
+ status = read16(state, SCU_RAM_AGC_CONFIG__A, &data);
+ if (status < 0)
+ goto error;
+ data |= SCU_RAM_AGC_CONFIG_DISABLE_RF_AGC__M;
+ status = write16(state, SCU_RAM_AGC_CONFIG__A, data);
+ if (status < 0)
+ goto error;
+ break;
+
+ default:
+ status = -EINVAL;
+
+ }
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+#define SCU_RAM_AGC_KI_INV_IF_POL__M 0x2000
+
+static int SetAgcIf(struct drxk_state *state,
+ struct SCfgAgc *pAgcCfg, bool isDTV)
+{
+ u16 data = 0;
+ int status = 0;
+ struct SCfgAgc *pRfAgcSettings;
+
+ dprintk(1, "\n");
+
+ switch (pAgcCfg->ctrlMode) {
+ case DRXK_AGC_CTRL_AUTO:
+
+ /* Enable IF AGC DAC */
+ status = read16(state, IQM_AF_STDBY__A, &data);
+ if (status < 0)
+ goto error;
+ data &= ~IQM_AF_STDBY_STDBY_TAGC_IF_STANDBY;
+ status = write16(state, IQM_AF_STDBY__A, data);
+ if (status < 0)
+ goto error;
+
+ status = read16(state, SCU_RAM_AGC_CONFIG__A, &data);
+ if (status < 0)
+ goto error;
+
+ /* Enable SCU IF AGC loop */
+ data &= ~SCU_RAM_AGC_CONFIG_DISABLE_IF_AGC__M;
+
+ /* Polarity */
+ if (state->m_IfAgcPol)
+ data |= SCU_RAM_AGC_CONFIG_INV_IF_POL__M;
+ else
+ data &= ~SCU_RAM_AGC_CONFIG_INV_IF_POL__M;
+ status = write16(state, SCU_RAM_AGC_CONFIG__A, data);
+ if (status < 0)
+ goto error;
+
+ /* Set speed (using complementary reduction value) */
+ status = read16(state, SCU_RAM_AGC_KI_RED__A, &data);
+ if (status < 0)
+ goto error;
+ data &= ~SCU_RAM_AGC_KI_RED_IAGC_RED__M;
+ data |= (~(pAgcCfg->speed <<
+ SCU_RAM_AGC_KI_RED_IAGC_RED__B)
+ & SCU_RAM_AGC_KI_RED_IAGC_RED__M);
+
+ status = write16(state, SCU_RAM_AGC_KI_RED__A, data);
+ if (status < 0)
+ goto error;
+
+ if (IsQAM(state))
+ pRfAgcSettings = &state->m_qamRfAgcCfg;
+ else
+ pRfAgcSettings = &state->m_atvRfAgcCfg;
+ if (pRfAgcSettings == NULL)
+ return -1;
+ /* Restore TOP */
+ status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, pRfAgcSettings->top);
+ if (status < 0)
+ goto error;
+ break;
+
+ case DRXK_AGC_CTRL_USER:
+
+ /* Enable IF AGC DAC */
+ status = read16(state, IQM_AF_STDBY__A, &data);
+ if (status < 0)
+ goto error;
+ data &= ~IQM_AF_STDBY_STDBY_TAGC_IF_STANDBY;
+ status = write16(state, IQM_AF_STDBY__A, data);
+ if (status < 0)
+ goto error;
+
+ status = read16(state, SCU_RAM_AGC_CONFIG__A, &data);
+ if (status < 0)
+ goto error;
+
+ /* Disable SCU IF AGC loop */
+ data |= SCU_RAM_AGC_CONFIG_DISABLE_IF_AGC__M;
+
+ /* Polarity */
+ if (state->m_IfAgcPol)
+ data |= SCU_RAM_AGC_CONFIG_INV_IF_POL__M;
+ else
+ data &= ~SCU_RAM_AGC_CONFIG_INV_IF_POL__M;
+ status = write16(state, SCU_RAM_AGC_CONFIG__A, data);
+ if (status < 0)
+ goto error;
+
+ /* Write value to output pin */
+ status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, pAgcCfg->outputLevel);
+ if (status < 0)
+ goto error;
+ break;
+
+ case DRXK_AGC_CTRL_OFF:
+
+ /* Disable If AGC DAC */
+ status = read16(state, IQM_AF_STDBY__A, &data);
+ if (status < 0)
+ goto error;
+ data |= IQM_AF_STDBY_STDBY_TAGC_IF_STANDBY;
+ status = write16(state, IQM_AF_STDBY__A, data);
+ if (status < 0)
+ goto error;
+
+ /* Disable SCU IF AGC loop */
+ status = read16(state, SCU_RAM_AGC_CONFIG__A, &data);
+ if (status < 0)
+ goto error;
+ data |= SCU_RAM_AGC_CONFIG_DISABLE_IF_AGC__M;
+ status = write16(state, SCU_RAM_AGC_CONFIG__A, data);
+ if (status < 0)
+ goto error;
+ break;
+ } /* switch (agcSettingsIf->ctrlMode) */
+
+ /* always set the top to support
+ configurations without if-loop */
+ status = write16(state, SCU_RAM_AGC_INGAIN_TGT_MIN__A, pAgcCfg->top);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int ReadIFAgc(struct drxk_state *state, u32 *pValue)
+{
+ u16 agcDacLvl;
+ int status;
+ u16 Level = 0;
+
+ dprintk(1, "\n");
+
+ status = read16(state, IQM_AF_AGC_IF__A, &agcDacLvl);
+ if (status < 0) {
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+ }
+
+ *pValue = 0;
+
+ if (agcDacLvl > DRXK_AGC_DAC_OFFSET)
+ Level = agcDacLvl - DRXK_AGC_DAC_OFFSET;
+ if (Level < 14000)
+ *pValue = (14000 - Level) / 4;
+ else
+ *pValue = 0;
+
+ return status;
+}
+
+static int GetQAMSignalToNoise(struct drxk_state *state,
+ s32 *pSignalToNoise)
+{
+ int status = 0;
+ u16 qamSlErrPower = 0; /* accum. error between
+ raw and sliced symbols */
+ u32 qamSlSigPower = 0; /* used for MER, depends of
+ QAM constellation */
+ u32 qamSlMer = 0; /* QAM MER */
+
+ dprintk(1, "\n");
+
+ /* MER calculation */
+
+ /* get the register value needed for MER */
+ status = read16(state, QAM_SL_ERR_POWER__A, &qamSlErrPower);
+ if (status < 0) {
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return -EINVAL;
+ }
+
+ switch (state->param.u.qam.modulation) {
+ case QAM_16:
+ qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM16 << 2;
+ break;
+ case QAM_32:
+ qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM32 << 2;
+ break;
+ case QAM_64:
+ qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM64 << 2;
+ break;
+ case QAM_128:
+ qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM128 << 2;
+ break;
+ default:
+ case QAM_256:
+ qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM256 << 2;
+ break;
+ }
+
+ if (qamSlErrPower > 0) {
+ qamSlMer = Log10Times100(qamSlSigPower) -
+ Log10Times100((u32) qamSlErrPower);
+ }
+ *pSignalToNoise = qamSlMer;
+
+ return status;
+}
+
+static int GetDVBTSignalToNoise(struct drxk_state *state,
+ s32 *pSignalToNoise)
+{
+ int status;
+ u16 regData = 0;
+ u32 EqRegTdSqrErrI = 0;
+ u32 EqRegTdSqrErrQ = 0;
+ u16 EqRegTdSqrErrExp = 0;
+ u16 EqRegTdTpsPwrOfs = 0;
+ u16 EqRegTdReqSmbCnt = 0;
+ u32 tpsCnt = 0;
+ u32 SqrErrIQ = 0;
+ u32 a = 0;
+ u32 b = 0;
+ u32 c = 0;
+ u32 iMER = 0;
+ u16 transmissionParams = 0;
+
+ dprintk(1, "\n");
+
+ status = read16(state, OFDM_EQ_TOP_TD_TPS_PWR_OFS__A, &EqRegTdTpsPwrOfs);
+ if (status < 0)
+ goto error;
+ status = read16(state, OFDM_EQ_TOP_TD_REQ_SMB_CNT__A, &EqRegTdReqSmbCnt);
+ if (status < 0)
+ goto error;
+ status = read16(state, OFDM_EQ_TOP_TD_SQR_ERR_EXP__A, &EqRegTdSqrErrExp);
+ if (status < 0)
+ goto error;
+ status = read16(state, OFDM_EQ_TOP_TD_SQR_ERR_I__A, &regData);
+ if (status < 0)
+ goto error;
+ /* Extend SQR_ERR_I operational range */
+ EqRegTdSqrErrI = (u32) regData;
+ if ((EqRegTdSqrErrExp > 11) &&
+ (EqRegTdSqrErrI < 0x00000FFFUL)) {
+ EqRegTdSqrErrI += 0x00010000UL;
+ }
+ status = read16(state, OFDM_EQ_TOP_TD_SQR_ERR_Q__A, &regData);
+ if (status < 0)
+ goto error;
+ /* Extend SQR_ERR_Q operational range */
+ EqRegTdSqrErrQ = (u32) regData;
+ if ((EqRegTdSqrErrExp > 11) &&
+ (EqRegTdSqrErrQ < 0x00000FFFUL))
+ EqRegTdSqrErrQ += 0x00010000UL;
+
+ status = read16(state, OFDM_SC_RA_RAM_OP_PARAM__A, &transmissionParams);
+ if (status < 0)
+ goto error;
+
+ /* Check input data for MER */
+
+ /* MER calculation (in 0.1 dB) without math.h */
+ if ((EqRegTdTpsPwrOfs == 0) || (EqRegTdReqSmbCnt == 0))
+ iMER = 0;
+ else if ((EqRegTdSqrErrI + EqRegTdSqrErrQ) == 0) {
+ /* No error at all, this must be the HW reset value
+ * Apparently no first measurement yet
+ * Set MER to 0.0 */
+ iMER = 0;
+ } else {
+ SqrErrIQ = (EqRegTdSqrErrI + EqRegTdSqrErrQ) <<
+ EqRegTdSqrErrExp;
+ if ((transmissionParams &
+ OFDM_SC_RA_RAM_OP_PARAM_MODE__M)
+ == OFDM_SC_RA_RAM_OP_PARAM_MODE_2K)
+ tpsCnt = 17;
+ else
+ tpsCnt = 68;
+
+ /* IMER = 100 * log10 (x)
+ where x = (EqRegTdTpsPwrOfs^2 *
+ EqRegTdReqSmbCnt * tpsCnt)/SqrErrIQ
+
+ => IMER = a + b -c
+ where a = 100 * log10 (EqRegTdTpsPwrOfs^2)
+ b = 100 * log10 (EqRegTdReqSmbCnt * tpsCnt)
+ c = 100 * log10 (SqrErrIQ)
+ */
+
+ /* log(x) x = 9bits * 9bits->18 bits */
+ a = Log10Times100(EqRegTdTpsPwrOfs *
+ EqRegTdTpsPwrOfs);
+ /* log(x) x = 16bits * 7bits->23 bits */
+ b = Log10Times100(EqRegTdReqSmbCnt * tpsCnt);
+ /* log(x) x = (16bits + 16bits) << 15 ->32 bits */
+ c = Log10Times100(SqrErrIQ);
+
+ iMER = a + b;
+ /* No negative MER, clip to zero */
+ if (iMER > c)
+ iMER -= c;
+ else
+ iMER = 0;
+ }
+ *pSignalToNoise = iMER;
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int GetSignalToNoise(struct drxk_state *state, s32 *pSignalToNoise)
+{
+ dprintk(1, "\n");
+
+ *pSignalToNoise = 0;
+ switch (state->m_OperationMode) {
+ case OM_DVBT:
+ return GetDVBTSignalToNoise(state, pSignalToNoise);
+ case OM_QAM_ITU_A:
+ case OM_QAM_ITU_C:
+ return GetQAMSignalToNoise(state, pSignalToNoise);
+ default:
+ break;
+ }
+ return 0;
+}
+
+#if 0
+static int GetDVBTQuality(struct drxk_state *state, s32 *pQuality)
+{
+ /* SNR Values for quasi errorfree reception rom Nordig 2.2 */
+ int status = 0;
+
+ dprintk(1, "\n");
+
+ static s32 QE_SN[] = {
+ 51, /* QPSK 1/2 */
+ 69, /* QPSK 2/3 */
+ 79, /* QPSK 3/4 */
+ 89, /* QPSK 5/6 */
+ 97, /* QPSK 7/8 */
+ 108, /* 16-QAM 1/2 */
+ 131, /* 16-QAM 2/3 */
+ 146, /* 16-QAM 3/4 */
+ 156, /* 16-QAM 5/6 */
+ 160, /* 16-QAM 7/8 */
+ 165, /* 64-QAM 1/2 */
+ 187, /* 64-QAM 2/3 */
+ 202, /* 64-QAM 3/4 */
+ 216, /* 64-QAM 5/6 */
+ 225, /* 64-QAM 7/8 */
+ };
+
+ *pQuality = 0;
+
+ do {
+ s32 SignalToNoise = 0;
+ u16 Constellation = 0;
+ u16 CodeRate = 0;
+ u32 SignalToNoiseRel;
+ u32 BERQuality;
+
+ status = GetDVBTSignalToNoise(state, &SignalToNoise);
+ if (status < 0)
+ break;
+ status = read16(state, OFDM_EQ_TOP_TD_TPS_CONST__A, &Constellation);
+ if (status < 0)
+ break;
+ Constellation &= OFDM_EQ_TOP_TD_TPS_CONST__M;
+
+ status = read16(state, OFDM_EQ_TOP_TD_TPS_CODE_HP__A, &CodeRate);
+ if (status < 0)
+ break;
+ CodeRate &= OFDM_EQ_TOP_TD_TPS_CODE_HP__M;
+
+ if (Constellation > OFDM_EQ_TOP_TD_TPS_CONST_64QAM ||
+ CodeRate > OFDM_EQ_TOP_TD_TPS_CODE_LP_7_8)
+ break;
+ SignalToNoiseRel = SignalToNoise -
+ QE_SN[Constellation * 5 + CodeRate];
+ BERQuality = 100;
+
+ if (SignalToNoiseRel < -70)
+ *pQuality = 0;
+ else if (SignalToNoiseRel < 30)
+ *pQuality = ((SignalToNoiseRel + 70) *
+ BERQuality) / 100;
+ else
+ *pQuality = BERQuality;
+ } while (0);
+ return 0;
+};
+
+static int GetDVBCQuality(struct drxk_state *state, s32 *pQuality)
+{
+ int status = 0;
+ *pQuality = 0;
+
+ dprintk(1, "\n");
+
+ do {
+ u32 SignalToNoise = 0;
+ u32 BERQuality = 100;
+ u32 SignalToNoiseRel = 0;
+
+ status = GetQAMSignalToNoise(state, &SignalToNoise);
+ if (status < 0)
+ break;
+
+ switch (state->param.u.qam.modulation) {
+ case QAM_16:
+ SignalToNoiseRel = SignalToNoise - 200;
+ break;
+ case QAM_32:
+ SignalToNoiseRel = SignalToNoise - 230;
+ break; /* Not in NorDig */
+ case QAM_64:
+ SignalToNoiseRel = SignalToNoise - 260;
+ break;
+ case QAM_128:
+ SignalToNoiseRel = SignalToNoise - 290;
+ break;
+ default:
+ case QAM_256:
+ SignalToNoiseRel = SignalToNoise - 320;
+ break;
+ }
+
+ if (SignalToNoiseRel < -70)
+ *pQuality = 0;
+ else if (SignalToNoiseRel < 30)
+ *pQuality = ((SignalToNoiseRel + 70) *
+ BERQuality) / 100;
+ else
+ *pQuality = BERQuality;
+ } while (0);
+
+ return status;
+}
+
+static int GetQuality(struct drxk_state *state, s32 *pQuality)
+{
+ dprintk(1, "\n");
+
+ switch (state->m_OperationMode) {
+ case OM_DVBT:
+ return GetDVBTQuality(state, pQuality);
+ case OM_QAM_ITU_A:
+ return GetDVBCQuality(state, pQuality);
+ default:
+ break;
+ }
+
+ return 0;
+}
+#endif
+
+/* Free data ram in SIO HI */
+#define SIO_HI_RA_RAM_USR_BEGIN__A 0x420040
+#define SIO_HI_RA_RAM_USR_END__A 0x420060
+
+#define DRXK_HI_ATOMIC_BUF_START (SIO_HI_RA_RAM_USR_BEGIN__A)
+#define DRXK_HI_ATOMIC_BUF_END (SIO_HI_RA_RAM_USR_BEGIN__A + 7)
+#define DRXK_HI_ATOMIC_READ SIO_HI_RA_RAM_PAR_3_ACP_RW_READ
+#define DRXK_HI_ATOMIC_WRITE SIO_HI_RA_RAM_PAR_3_ACP_RW_WRITE
+
+#define DRXDAP_FASI_ADDR2BLOCK(addr) (((addr) >> 22) & 0x3F)
+#define DRXDAP_FASI_ADDR2BANK(addr) (((addr) >> 16) & 0x3F)
+#define DRXDAP_FASI_ADDR2OFFSET(addr) ((addr) & 0x7FFF)
+
+static int ConfigureI2CBridge(struct drxk_state *state, bool bEnableBridge)
+{
+ int status = -EINVAL;
+
+ dprintk(1, "\n");
+
+ if (state->m_DrxkState == DRXK_UNINITIALIZED)
+ goto error;
+ if (state->m_DrxkState == DRXK_POWERED_DOWN)
+ goto error;
+
+ if (state->no_i2c_bridge)
+ return 0;
+
+ status = write16(state, SIO_HI_RA_RAM_PAR_1__A, SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY);
+ if (status < 0)
+ goto error;
+ if (bEnableBridge) {
+ status = write16(state, SIO_HI_RA_RAM_PAR_2__A, SIO_HI_RA_RAM_PAR_2_BRD_CFG_CLOSED);
+ if (status < 0)
+ goto error;
+ } else {
+ status = write16(state, SIO_HI_RA_RAM_PAR_2__A, SIO_HI_RA_RAM_PAR_2_BRD_CFG_OPEN);
+ if (status < 0)
+ goto error;
+ }
+
+ status = HI_Command(state, SIO_HI_RA_RAM_CMD_BRDCTRL, 0);
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int SetPreSaw(struct drxk_state *state,
+ struct SCfgPreSaw *pPreSawCfg)
+{
+ int status = -EINVAL;
+
+ dprintk(1, "\n");
+
+ if ((pPreSawCfg == NULL)
+ || (pPreSawCfg->reference > IQM_AF_PDREF__M))
+ goto error;
+
+ status = write16(state, IQM_AF_PDREF__A, pPreSawCfg->reference);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int BLDirectCmd(struct drxk_state *state, u32 targetAddr,
+ u16 romOffset, u16 nrOfElements, u32 timeOut)
+{
+ u16 blStatus = 0;
+ u16 offset = (u16) ((targetAddr >> 0) & 0x00FFFF);
+ u16 blockbank = (u16) ((targetAddr >> 16) & 0x000FFF);
+ int status;
+ unsigned long end;
+
+ dprintk(1, "\n");
+
+ mutex_lock(&state->mutex);
+ status = write16(state, SIO_BL_MODE__A, SIO_BL_MODE_DIRECT);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_BL_TGT_HDR__A, blockbank);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_BL_TGT_ADDR__A, offset);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_BL_SRC_ADDR__A, romOffset);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_BL_SRC_LEN__A, nrOfElements);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_BL_ENABLE__A, SIO_BL_ENABLE_ON);
+ if (status < 0)
+ goto error;
+
+ end = jiffies + msecs_to_jiffies(timeOut);
+ do {
+ status = read16(state, SIO_BL_STATUS__A, &blStatus);
+ if (status < 0)
+ goto error;
+ } while ((blStatus == 0x1) && time_is_after_jiffies(end));
+ if (blStatus == 0x1) {
+ printk(KERN_ERR "drxk: SIO not ready\n");
+ status = -EINVAL;
+ goto error2;
+ }
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+error2:
+ mutex_unlock(&state->mutex);
+ return status;
+
+}
+
+static int ADCSyncMeasurement(struct drxk_state *state, u16 *count)
+{
+ u16 data = 0;
+ int status;
+
+ dprintk(1, "\n");
+
+ /* Start measurement */
+ status = write16(state, IQM_AF_COMM_EXEC__A, IQM_AF_COMM_EXEC_ACTIVE);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_AF_START_LOCK__A, 1);
+ if (status < 0)
+ goto error;
+
+ *count = 0;
+ status = read16(state, IQM_AF_PHASE0__A, &data);
+ if (status < 0)
+ goto error;
+ if (data == 127)
+ *count = *count + 1;
+ status = read16(state, IQM_AF_PHASE1__A, &data);
+ if (status < 0)
+ goto error;
+ if (data == 127)
+ *count = *count + 1;
+ status = read16(state, IQM_AF_PHASE2__A, &data);
+ if (status < 0)
+ goto error;
+ if (data == 127)
+ *count = *count + 1;
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int ADCSynchronization(struct drxk_state *state)
+{
+ u16 count = 0;
+ int status;
+
+ dprintk(1, "\n");
+
+ status = ADCSyncMeasurement(state, &count);
+ if (status < 0)
+ goto error;
+
+ if (count == 1) {
+ /* Try sampling on a diffrent edge */
+ u16 clkNeg = 0;
+
+ status = read16(state, IQM_AF_CLKNEG__A, &clkNeg);
+ if (status < 0)
+ goto error;
+ if ((clkNeg | IQM_AF_CLKNEG_CLKNEGDATA__M) ==
+ IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_POS) {
+ clkNeg &= (~(IQM_AF_CLKNEG_CLKNEGDATA__M));
+ clkNeg |=
+ IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_NEG;
+ } else {
+ clkNeg &= (~(IQM_AF_CLKNEG_CLKNEGDATA__M));
+ clkNeg |=
+ IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_POS;
+ }
+ status = write16(state, IQM_AF_CLKNEG__A, clkNeg);
+ if (status < 0)
+ goto error;
+ status = ADCSyncMeasurement(state, &count);
+ if (status < 0)
+ goto error;
+ }
+
+ if (count < 2)
+ status = -EINVAL;
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int SetFrequencyShifter(struct drxk_state *state,
+ u16 intermediateFreqkHz,
+ s32 tunerFreqOffset, bool isDTV)
+{
+ bool selectPosImage = false;
+ u32 rfFreqResidual = tunerFreqOffset;
+ u32 fmFrequencyShift = 0;
+ bool tunerMirror = !state->m_bMirrorFreqSpect;
+ u32 adcFreq;
+ bool adcFlip;
+ int status;
+ u32 ifFreqActual;
+ u32 samplingFrequency = (u32) (state->m_sysClockFreq / 3);
+ u32 frequencyShift;
+ bool imageToSelect;
+
+ dprintk(1, "\n");
+
+ /*
+ Program frequency shifter
+ No need to account for mirroring on RF
+ */
+ if (isDTV) {
+ if ((state->m_OperationMode == OM_QAM_ITU_A) ||
+ (state->m_OperationMode == OM_QAM_ITU_C) ||
+ (state->m_OperationMode == OM_DVBT))
+ selectPosImage = true;
+ else
+ selectPosImage = false;
+ }
+ if (tunerMirror)
+ /* tuner doesn't mirror */
+ ifFreqActual = intermediateFreqkHz +
+ rfFreqResidual + fmFrequencyShift;
+ else
+ /* tuner mirrors */
+ ifFreqActual = intermediateFreqkHz -
+ rfFreqResidual - fmFrequencyShift;
+ if (ifFreqActual > samplingFrequency / 2) {
+ /* adc mirrors */
+ adcFreq = samplingFrequency - ifFreqActual;
+ adcFlip = true;
+ } else {
+ /* adc doesn't mirror */
+ adcFreq = ifFreqActual;
+ adcFlip = false;
+ }
+
+ frequencyShift = adcFreq;
+ imageToSelect = state->m_rfmirror ^ tunerMirror ^
+ adcFlip ^ selectPosImage;
+ state->m_IqmFsRateOfs =
+ Frac28a((frequencyShift), samplingFrequency);
+
+ if (imageToSelect)
+ state->m_IqmFsRateOfs = ~state->m_IqmFsRateOfs + 1;
+
+ /* Program frequency shifter with tuner offset compensation */
+ /* frequencyShift += tunerFreqOffset; TODO */
+ status = write32(state, IQM_FS_RATE_OFS_LO__A,
+ state->m_IqmFsRateOfs);
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int InitAGC(struct drxk_state *state, bool isDTV)
+{
+ u16 ingainTgt = 0;
+ u16 ingainTgtMin = 0;
+ u16 ingainTgtMax = 0;
+ u16 clpCyclen = 0;
+ u16 clpSumMin = 0;
+ u16 clpDirTo = 0;
+ u16 snsSumMin = 0;
+ u16 snsSumMax = 0;
+ u16 clpSumMax = 0;
+ u16 snsDirTo = 0;
+ u16 kiInnergainMin = 0;
+ u16 ifIaccuHiTgt = 0;
+ u16 ifIaccuHiTgtMin = 0;
+ u16 ifIaccuHiTgtMax = 0;
+ u16 data = 0;
+ u16 fastClpCtrlDelay = 0;
+ u16 clpCtrlMode = 0;
+ int status = 0;
+
+ dprintk(1, "\n");
+
+ /* Common settings */
+ snsSumMax = 1023;
+ ifIaccuHiTgtMin = 2047;
+ clpCyclen = 500;
+ clpSumMax = 1023;
+
+ /* AGCInit() not available for DVBT; init done in microcode */
+ if (!IsQAM(state)) {
+ printk(KERN_ERR "drxk: %s: mode %d is not DVB-C\n", __func__, state->m_OperationMode);
+ return -EINVAL;
+ }
+
+ /* FIXME: Analog TV AGC require different settings */
+
+ /* Standard specific settings */
+ clpSumMin = 8;
+ clpDirTo = (u16) -9;
+ clpCtrlMode = 0;
+ snsSumMin = 8;
+ snsDirTo = (u16) -9;
+ kiInnergainMin = (u16) -1030;
+ ifIaccuHiTgtMax = 0x2380;
+ ifIaccuHiTgt = 0x2380;
+ ingainTgtMin = 0x0511;
+ ingainTgt = 0x0511;
+ ingainTgtMax = 5119;
+ fastClpCtrlDelay = state->m_qamIfAgcCfg.FastClipCtrlDelay;
+
+ status = write16(state, SCU_RAM_AGC_FAST_CLP_CTRL_DELAY__A, fastClpCtrlDelay);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_AGC_CLP_CTRL_MODE__A, clpCtrlMode);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_INGAIN_TGT__A, ingainTgt);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_INGAIN_TGT_MIN__A, ingainTgtMin);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_INGAIN_TGT_MAX__A, ingainTgtMax);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MIN__A, ifIaccuHiTgtMin);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, ifIaccuHiTgtMax);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_IF_IACCU_HI__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_IF_IACCU_LO__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_RF_IACCU_HI__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_RF_IACCU_LO__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_CLP_SUM_MAX__A, clpSumMax);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_SNS_SUM_MAX__A, snsSumMax);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_AGC_KI_INNERGAIN_MIN__A, kiInnergainMin);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT__A, ifIaccuHiTgt);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_CLP_CYCLEN__A, clpCyclen);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_AGC_RF_SNS_DEV_MAX__A, 1023);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_RF_SNS_DEV_MIN__A, (u16) -1023);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_FAST_SNS_CTRL_DELAY__A, 50);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_AGC_KI_MAXMINGAIN_TH__A, 20);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_CLP_SUM_MIN__A, clpSumMin);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_SNS_SUM_MIN__A, snsSumMin);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_CLP_DIR_TO__A, clpDirTo);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_SNS_DIR_TO__A, snsDirTo);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_KI_MINGAIN__A, 0x7fff);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_KI_MAXGAIN__A, 0x0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_KI_MIN__A, 0x0117);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_KI_MAX__A, 0x0657);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_CLP_SUM__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_CLP_CYCCNT__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_CLP_DIR_WD__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_CLP_DIR_STP__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_SNS_SUM__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_SNS_CYCCNT__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_SNS_DIR_WD__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_SNS_DIR_STP__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_SNS_CYCLEN__A, 500);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_KI_CYCLEN__A, 500);
+ if (status < 0)
+ goto error;
+
+ /* Initialize inner-loop KI gain factors */
+ status = read16(state, SCU_RAM_AGC_KI__A, &data);
+ if (status < 0)
+ goto error;
+
+ data = 0x0657;
+ data &= ~SCU_RAM_AGC_KI_RF__M;
+ data |= (DRXK_KI_RAGC_QAM << SCU_RAM_AGC_KI_RF__B);
+ data &= ~SCU_RAM_AGC_KI_IF__M;
+ data |= (DRXK_KI_IAGC_QAM << SCU_RAM_AGC_KI_IF__B);
+
+ status = write16(state, SCU_RAM_AGC_KI__A, data);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int DVBTQAMGetAccPktErr(struct drxk_state *state, u16 *packetErr)
+{
+ int status;
+
+ dprintk(1, "\n");
+ if (packetErr == NULL)
+ status = write16(state, SCU_RAM_FEC_ACCUM_PKT_FAILURES__A, 0);
+ else
+ status = read16(state, SCU_RAM_FEC_ACCUM_PKT_FAILURES__A, packetErr);
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int DVBTScCommand(struct drxk_state *state,
+ u16 cmd, u16 subcmd,
+ u16 param0, u16 param1, u16 param2,
+ u16 param3, u16 param4)
+{
+ u16 curCmd = 0;
+ u16 errCode = 0;
+ u16 retryCnt = 0;
+ u16 scExec = 0;
+ int status;
+
+ dprintk(1, "\n");
+ status = read16(state, OFDM_SC_COMM_EXEC__A, &scExec);
+ if (scExec != 1) {
+ /* SC is not running */
+ status = -EINVAL;
+ }
+ if (status < 0)
+ goto error;
+
+ /* Wait until sc is ready to receive command */
+ retryCnt = 0;
+ do {
+ msleep(1);
+ status = read16(state, OFDM_SC_RA_RAM_CMD__A, &curCmd);
+ retryCnt++;
+ } while ((curCmd != 0) && (retryCnt < DRXK_MAX_RETRIES));
+ if (retryCnt >= DRXK_MAX_RETRIES && (status < 0))
+ goto error;
+
+ /* Write sub-command */
+ switch (cmd) {
+ /* All commands using sub-cmd */
+ case OFDM_SC_RA_RAM_CMD_PROC_START:
+ case OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM:
+ case OFDM_SC_RA_RAM_CMD_PROGRAM_PARAM:
+ status = write16(state, OFDM_SC_RA_RAM_CMD_ADDR__A, subcmd);
+ if (status < 0)
+ goto error;
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+
+ /* Write needed parameters and the command */
+ switch (cmd) {
+ /* All commands using 5 parameters */
+ /* All commands using 4 parameters */
+ /* All commands using 3 parameters */
+ /* All commands using 2 parameters */
+ case OFDM_SC_RA_RAM_CMD_PROC_START:
+ case OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM:
+ case OFDM_SC_RA_RAM_CMD_PROGRAM_PARAM:
+ status = write16(state, OFDM_SC_RA_RAM_PARAM1__A, param1);
+ /* All commands using 1 parameters */
+ case OFDM_SC_RA_RAM_CMD_SET_ECHO_TIMING:
+ case OFDM_SC_RA_RAM_CMD_USER_IO:
+ status = write16(state, OFDM_SC_RA_RAM_PARAM0__A, param0);
+ /* All commands using 0 parameters */
+ case OFDM_SC_RA_RAM_CMD_GET_OP_PARAM:
+ case OFDM_SC_RA_RAM_CMD_NULL:
+ /* Write command */
+ status = write16(state, OFDM_SC_RA_RAM_CMD__A, cmd);
+ break;
+ default:
+ /* Unknown command */
+ status = -EINVAL;
+ }
+ if (status < 0)
+ goto error;
+
+ /* Wait until sc is ready processing command */
+ retryCnt = 0;
+ do {
+ msleep(1);
+ status = read16(state, OFDM_SC_RA_RAM_CMD__A, &curCmd);
+ retryCnt++;
+ } while ((curCmd != 0) && (retryCnt < DRXK_MAX_RETRIES));
+ if (retryCnt >= DRXK_MAX_RETRIES && (status < 0))
+ goto error;
+
+ /* Check for illegal cmd */
+ status = read16(state, OFDM_SC_RA_RAM_CMD_ADDR__A, &errCode);
+ if (errCode == 0xFFFF) {
+ /* illegal command */
+ status = -EINVAL;
+ }
+ if (status < 0)
+ goto error;
+
+ /* Retreive results parameters from SC */
+ switch (cmd) {
+ /* All commands yielding 5 results */
+ /* All commands yielding 4 results */
+ /* All commands yielding 3 results */
+ /* All commands yielding 2 results */
+ /* All commands yielding 1 result */
+ case OFDM_SC_RA_RAM_CMD_USER_IO:
+ case OFDM_SC_RA_RAM_CMD_GET_OP_PARAM:
+ status = read16(state, OFDM_SC_RA_RAM_PARAM0__A, &(param0));
+ /* All commands yielding 0 results */
+ case OFDM_SC_RA_RAM_CMD_SET_ECHO_TIMING:
+ case OFDM_SC_RA_RAM_CMD_SET_TIMER:
+ case OFDM_SC_RA_RAM_CMD_PROC_START:
+ case OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM:
+ case OFDM_SC_RA_RAM_CMD_PROGRAM_PARAM:
+ case OFDM_SC_RA_RAM_CMD_NULL:
+ break;
+ default:
+ /* Unknown command */
+ status = -EINVAL;
+ break;
+ } /* switch (cmd->cmd) */
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int PowerUpDVBT(struct drxk_state *state)
+{
+ enum DRXPowerMode powerMode = DRX_POWER_UP;
+ int status;
+
+ dprintk(1, "\n");
+ status = CtrlPowerMode(state, &powerMode);
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int DVBTCtrlSetIncEnable(struct drxk_state *state, bool *enabled)
+{
+ int status;
+
+ dprintk(1, "\n");
+ if (*enabled == true)
+ status = write16(state, IQM_CF_BYPASSDET__A, 0);
+ else
+ status = write16(state, IQM_CF_BYPASSDET__A, 1);
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+#define DEFAULT_FR_THRES_8K 4000
+static int DVBTCtrlSetFrEnable(struct drxk_state *state, bool *enabled)
+{
+
+ int status;
+
+ dprintk(1, "\n");
+ if (*enabled == true) {
+ /* write mask to 1 */
+ status = write16(state, OFDM_SC_RA_RAM_FR_THRES_8K__A,
+ DEFAULT_FR_THRES_8K);
+ } else {
+ /* write mask to 0 */
+ status = write16(state, OFDM_SC_RA_RAM_FR_THRES_8K__A, 0);
+ }
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+static int DVBTCtrlSetEchoThreshold(struct drxk_state *state,
+ struct DRXKCfgDvbtEchoThres_t *echoThres)
+{
+ u16 data = 0;
+ int status;
+
+ dprintk(1, "\n");
+ status = read16(state, OFDM_SC_RA_RAM_ECHO_THRES__A, &data);
+ if (status < 0)
+ goto error;
+
+ switch (echoThres->fftMode) {
+ case DRX_FFTMODE_2K:
+ data &= ~OFDM_SC_RA_RAM_ECHO_THRES_2K__M;
+ data |= ((echoThres->threshold <<
+ OFDM_SC_RA_RAM_ECHO_THRES_2K__B)
+ & (OFDM_SC_RA_RAM_ECHO_THRES_2K__M));
+ break;
+ case DRX_FFTMODE_8K:
+ data &= ~OFDM_SC_RA_RAM_ECHO_THRES_8K__M;
+ data |= ((echoThres->threshold <<
+ OFDM_SC_RA_RAM_ECHO_THRES_8K__B)
+ & (OFDM_SC_RA_RAM_ECHO_THRES_8K__M));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ status = write16(state, OFDM_SC_RA_RAM_ECHO_THRES__A, data);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int DVBTCtrlSetSqiSpeed(struct drxk_state *state,
+ enum DRXKCfgDvbtSqiSpeed *speed)
+{
+ int status = -EINVAL;
+
+ dprintk(1, "\n");
+
+ switch (*speed) {
+ case DRXK_DVBT_SQI_SPEED_FAST:
+ case DRXK_DVBT_SQI_SPEED_MEDIUM:
+ case DRXK_DVBT_SQI_SPEED_SLOW:
+ break;
+ default:
+ goto error;
+ }
+ status = write16(state, SCU_RAM_FEC_PRE_RS_BER_FILTER_SH__A,
+ (u16) *speed);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+/*============================================================================*/
+
+/**
+* \brief Activate DVBT specific presets
+* \param demod instance of demodulator.
+* \return DRXStatus_t.
+*
+* Called in DVBTSetStandard
+*
+*/
+static int DVBTActivatePresets(struct drxk_state *state)
+{
+ int status;
+ bool setincenable = false;
+ bool setfrenable = true;
+
+ struct DRXKCfgDvbtEchoThres_t echoThres2k = { 0, DRX_FFTMODE_2K };
+ struct DRXKCfgDvbtEchoThres_t echoThres8k = { 0, DRX_FFTMODE_8K };
+
+ dprintk(1, "\n");
+ status = DVBTCtrlSetIncEnable(state, &setincenable);
+ if (status < 0)
+ goto error;
+ status = DVBTCtrlSetFrEnable(state, &setfrenable);
+ if (status < 0)
+ goto error;
+ status = DVBTCtrlSetEchoThreshold(state, &echoThres2k);
+ if (status < 0)
+ goto error;
+ status = DVBTCtrlSetEchoThreshold(state, &echoThres8k);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_INGAIN_TGT_MAX__A, state->m_dvbtIfAgcCfg.IngainTgtMax);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+/*============================================================================*/
+
+/**
+* \brief Initialize channelswitch-independent settings for DVBT.
+* \param demod instance of demodulator.
+* \return DRXStatus_t.
+*
+* For ROM code channel filter taps are loaded from the bootloader. For microcode
+* the DVB-T taps from the drxk_filters.h are used.
+*/
+static int SetDVBTStandard(struct drxk_state *state,
+ enum OperationMode oMode)
+{
+ u16 cmdResult = 0;
+ u16 data = 0;
+ int status;
+
+ dprintk(1, "\n");
+
+ PowerUpDVBT(state);
+ /* added antenna switch */
+ SwitchAntennaToDVBT(state);
+ /* send OFDM reset command */
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_RESET, 0, NULL, 1, &cmdResult);
+ if (status < 0)
+ goto error;
+
+ /* send OFDM setenv command */
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV, 0, NULL, 1, &cmdResult);
+ if (status < 0)
+ goto error;
+
+ /* reset datapath for OFDM, processors first */
+ status = write16(state, OFDM_SC_COMM_EXEC__A, OFDM_SC_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_LC_COMM_EXEC__A, OFDM_LC_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_COMM_EXEC__A, IQM_COMM_EXEC_B_STOP);
+ if (status < 0)
+ goto error;
+
+ /* IQM setup */
+ /* synchronize on ofdstate->m_festart */
+ status = write16(state, IQM_AF_UPD_SEL__A, 1);
+ if (status < 0)
+ goto error;
+ /* window size for clipping ADC detection */
+ status = write16(state, IQM_AF_CLP_LEN__A, 0);
+ if (status < 0)
+ goto error;
+ /* window size for for sense pre-SAW detection */
+ status = write16(state, IQM_AF_SNS_LEN__A, 0);
+ if (status < 0)
+ goto error;
+ /* sense threshold for sense pre-SAW detection */
+ status = write16(state, IQM_AF_AMUX__A, IQM_AF_AMUX_SIGNAL2ADC);
+ if (status < 0)
+ goto error;
+ status = SetIqmAf(state, true);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, IQM_AF_AGC_RF__A, 0);
+ if (status < 0)
+ goto error;
+
+ /* Impulse noise cruncher setup */
+ status = write16(state, IQM_AF_INC_LCT__A, 0); /* crunch in IQM_CF */
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_DET_LCT__A, 0); /* detect in IQM_CF */
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_WND_LEN__A, 3); /* peak detector window length */
+ if (status < 0)
+ goto error;
+
+ status = write16(state, IQM_RC_STRETCH__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_OUT_ENA__A, 0x4); /* enable output 2 */
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_DS_ENA__A, 0x4); /* decimate output 2 */
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_SCALE__A, 1600);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_SCALE_SH__A, 0);
+ if (status < 0)
+ goto error;
+
+ /* virtual clipping threshold for clipping ADC detection */
+ status = write16(state, IQM_AF_CLP_TH__A, 448);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_DATATH__A, 495); /* crunching threshold */
+ if (status < 0)
+ goto error;
+
+ status = BLChainCmd(state, DRXK_BL_ROM_OFFSET_TAPS_DVBT, DRXK_BLCC_NR_ELEMENTS_TAPS, DRXK_BLC_TIMEOUT);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, IQM_CF_PKDTH__A, 2); /* peak detector threshold */
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_POW_MEAS_LEN__A, 2);
+ if (status < 0)
+ goto error;
+ /* enable power measurement interrupt */
+ status = write16(state, IQM_CF_COMM_INT_MSK__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_COMM_EXEC__A, IQM_COMM_EXEC_B_ACTIVE);
+ if (status < 0)
+ goto error;
+
+ /* IQM will not be reset from here, sync ADC and update/init AGC */
+ status = ADCSynchronization(state);
+ if (status < 0)
+ goto error;
+ status = SetPreSaw(state, &state->m_dvbtPreSawCfg);
+ if (status < 0)
+ goto error;
+
+ /* Halt SCU to enable safe non-atomic accesses */
+ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_HOLD);
+ if (status < 0)
+ goto error;
+
+ status = SetAgcRf(state, &state->m_dvbtRfAgcCfg, true);
+ if (status < 0)
+ goto error;
+ status = SetAgcIf(state, &state->m_dvbtIfAgcCfg, true);
+ if (status < 0)
+ goto error;
+
+ /* Set Noise Estimation notch width and enable DC fix */
+ status = read16(state, OFDM_SC_RA_RAM_CONFIG__A, &data);
+ if (status < 0)
+ goto error;
+ data |= OFDM_SC_RA_RAM_CONFIG_NE_FIX_ENABLE__M;
+ status = write16(state, OFDM_SC_RA_RAM_CONFIG__A, data);
+ if (status < 0)
+ goto error;
+
+ /* Activate SCU to enable SCU commands */
+ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE);
+ if (status < 0)
+ goto error;
+
+ if (!state->m_DRXK_A3_ROM_CODE) {
+ /* AGCInit() is not done for DVBT, so set agcFastClipCtrlDelay */
+ status = write16(state, SCU_RAM_AGC_FAST_CLP_CTRL_DELAY__A, state->m_dvbtIfAgcCfg.FastClipCtrlDelay);
+ if (status < 0)
+ goto error;
+ }
+
+ /* OFDM_SC setup */
+#ifdef COMPILE_FOR_NONRT
+ status = write16(state, OFDM_SC_RA_RAM_BE_OPT_DELAY__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_RA_RAM_BE_OPT_INIT_DELAY__A, 2);
+ if (status < 0)
+ goto error;
+#endif
+
+ /* FEC setup */
+ status = write16(state, FEC_DI_INPUT_CTL__A, 1); /* OFDM input */
+ if (status < 0)
+ goto error;
+
+
+#ifdef COMPILE_FOR_NONRT
+ status = write16(state, FEC_RS_MEASUREMENT_PERIOD__A, 0x400);
+ if (status < 0)
+ goto error;
+#else
+ status = write16(state, FEC_RS_MEASUREMENT_PERIOD__A, 0x1000);
+ if (status < 0)
+ goto error;
+#endif
+ status = write16(state, FEC_RS_MEASUREMENT_PRESCALE__A, 0x0001);
+ if (status < 0)
+ goto error;
+
+ /* Setup MPEG bus */
+ status = MPEGTSDtoSetup(state, OM_DVBT);
+ if (status < 0)
+ goto error;
+ /* Set DVBT Presets */
+ status = DVBTActivatePresets(state);
+ if (status < 0)
+ goto error;
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+/*============================================================================*/
+/**
+* \brief Start dvbt demodulating for channel.
+* \param demod instance of demodulator.
+* \return DRXStatus_t.
+*/
+static int DVBTStart(struct drxk_state *state)
+{
+ u16 param1;
+ int status;
+ /* DRXKOfdmScCmd_t scCmd; */
+
+ dprintk(1, "\n");
+ /* Start correct processes to get in lock */
+ /* DRXK: OFDM_SC_RA_RAM_PROC_LOCKTRACK is no longer in mapfile! */
+ param1 = OFDM_SC_RA_RAM_LOCKTRACK_MIN;
+ status = DVBTScCommand(state, OFDM_SC_RA_RAM_CMD_PROC_START, 0, OFDM_SC_RA_RAM_SW_EVENT_RUN_NMASK__M, param1, 0, 0, 0);
+ if (status < 0)
+ goto error;
+ /* Start FEC OC */
+ status = MPEGTSStart(state);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_COMM_EXEC__A, FEC_COMM_EXEC_ACTIVE);
+ if (status < 0)
+ goto error;
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+
+/*============================================================================*/
+
+/**
+* \brief Set up dvbt demodulator for channel.
+* \param demod instance of demodulator.
+* \return DRXStatus_t.
+* // original DVBTSetChannel()
+*/
+static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz,
+ s32 tunerFreqOffset)
+{
+ u16 cmdResult = 0;
+ u16 transmissionParams = 0;
+ u16 operationMode = 0;
+ u32 iqmRcRateOfs = 0;
+ u32 bandwidth = 0;
+ u16 param1;
+ int status;
+
+ dprintk(1, "IF =%d, TFO = %d\n", IntermediateFreqkHz, tunerFreqOffset);
+
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_STOP, 0, NULL, 1, &cmdResult);
+ if (status < 0)
+ goto error;
+
+ /* Halt SCU to enable safe non-atomic accesses */
+ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_HOLD);
+ if (status < 0)
+ goto error;
+
+ /* Stop processors */
+ status = write16(state, OFDM_SC_COMM_EXEC__A, OFDM_SC_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_LC_COMM_EXEC__A, OFDM_LC_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+
+ /* Mandatory fix, always stop CP, required to set spl offset back to
+ hardware default (is set to 0 by ucode during pilot detection */
+ status = write16(state, OFDM_CP_COMM_EXEC__A, OFDM_CP_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+
+ /*== Write channel settings to device =====================================*/
+
+ /* mode */
+ switch (state->param.u.ofdm.transmission_mode) {
+ case TRANSMISSION_MODE_AUTO:
+ default:
+ operationMode |= OFDM_SC_RA_RAM_OP_AUTO_MODE__M;
+ /* fall through , try first guess DRX_FFTMODE_8K */
+ case TRANSMISSION_MODE_8K:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_MODE_8K;
+ break;
+ case TRANSMISSION_MODE_2K:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_MODE_2K;
+ break;
+ }
+
+ /* guard */
+ switch (state->param.u.ofdm.guard_interval) {
+ default:
+ case GUARD_INTERVAL_AUTO:
+ operationMode |= OFDM_SC_RA_RAM_OP_AUTO_GUARD__M;
+ /* fall through , try first guess DRX_GUARD_1DIV4 */
+ case GUARD_INTERVAL_1_4:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_4;
+ break;
+ case GUARD_INTERVAL_1_32:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_32;
+ break;
+ case GUARD_INTERVAL_1_16:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_16;
+ break;
+ case GUARD_INTERVAL_1_8:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_8;
+ break;
+ }
+
+ /* hierarchy */
+ switch (state->param.u.ofdm.hierarchy_information) {
+ case HIERARCHY_AUTO:
+ case HIERARCHY_NONE:
+ default:
+ operationMode |= OFDM_SC_RA_RAM_OP_AUTO_HIER__M;
+ /* fall through , try first guess SC_RA_RAM_OP_PARAM_HIER_NO */
+ /* transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_HIER_NO; */
+ /* break; */
+ case HIERARCHY_1:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A1;
+ break;
+ case HIERARCHY_2:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A2;
+ break;
+ case HIERARCHY_4:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A4;
+ break;
+ }
+
+
+ /* constellation */
+ switch (state->param.u.ofdm.constellation) {
+ case QAM_AUTO:
+ default:
+ operationMode |= OFDM_SC_RA_RAM_OP_AUTO_CONST__M;
+ /* fall through , try first guess DRX_CONSTELLATION_QAM64 */
+ case QAM_64:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM64;
+ break;
+ case QPSK:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QPSK;
+ break;
+ case QAM_16:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM16;
+ break;
+ }
+#if 0
+ /* No hierachical channels support in BDA */
+ /* Priority (only for hierarchical channels) */
+ switch (channel->priority) {
+ case DRX_PRIORITY_LOW:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_PRIO_LO;
+ WR16(devAddr, OFDM_EC_SB_PRIOR__A,
+ OFDM_EC_SB_PRIOR_LO);
+ break;
+ case DRX_PRIORITY_HIGH:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_PRIO_HI;
+ WR16(devAddr, OFDM_EC_SB_PRIOR__A,
+ OFDM_EC_SB_PRIOR_HI));
+ break;
+ case DRX_PRIORITY_UNKNOWN: /* fall through */
+ default:
+ status = -EINVAL;
+ goto error;
+ }
+#else
+ /* Set Priorty high */
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_PRIO_HI;
+ status = write16(state, OFDM_EC_SB_PRIOR__A, OFDM_EC_SB_PRIOR_HI);
+ if (status < 0)
+ goto error;
+#endif
+
+ /* coderate */
+ switch (state->param.u.ofdm.code_rate_HP) {
+ case FEC_AUTO:
+ default:
+ operationMode |= OFDM_SC_RA_RAM_OP_AUTO_RATE__M;
+ /* fall through , try first guess DRX_CODERATE_2DIV3 */
+ case FEC_2_3:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_RATE_2_3;
+ break;
+ case FEC_1_2:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_RATE_1_2;
+ break;
+ case FEC_3_4:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_RATE_3_4;
+ break;
+ case FEC_5_6:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_RATE_5_6;
+ break;
+ case FEC_7_8:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_RATE_7_8;
+ break;
+ }
+
+ /* SAW filter selection: normaly not necesarry, but if wanted
+ the application can select a SAW filter via the driver by using UIOs */
+ /* First determine real bandwidth (Hz) */
+ /* Also set delay for impulse noise cruncher */
+ /* Also set parameters for EC_OC fix, note EC_OC_REG_TMD_HIL_MAR is changed
+ by SC for fix for some 8K,1/8 guard but is restored by InitEC and ResetEC
+ functions */
+ switch (state->param.u.ofdm.bandwidth) {
+ case BANDWIDTH_AUTO:
+ case BANDWIDTH_8_MHZ:
+ bandwidth = DRXK_BANDWIDTH_8MHZ_IN_HZ;
+ status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A, 3052);
+ if (status < 0)
+ goto error;
+ /* cochannel protection for PAL 8 MHz */
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_LEFT__A, 7);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_RIGHT__A, 7);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_LEFT__A, 7);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_RIGHT__A, 1);
+ if (status < 0)
+ goto error;
+ break;
+ case BANDWIDTH_7_MHZ:
+ bandwidth = DRXK_BANDWIDTH_7MHZ_IN_HZ;
+ status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A, 3491);
+ if (status < 0)
+ goto error;
+ /* cochannel protection for PAL 7 MHz */
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_LEFT__A, 8);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_RIGHT__A, 8);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_LEFT__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_RIGHT__A, 1);
+ if (status < 0)
+ goto error;
+ break;
+ case BANDWIDTH_6_MHZ:
+ bandwidth = DRXK_BANDWIDTH_6MHZ_IN_HZ;
+ status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A, 4073);
+ if (status < 0)
+ goto error;
+ /* cochannel protection for NTSC 6 MHz */
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_LEFT__A, 19);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_RIGHT__A, 19);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_LEFT__A, 14);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_RIGHT__A, 1);
+ if (status < 0)
+ goto error;
+ break;
+ default:
+ status = -EINVAL;
+ goto error;
+ }
+
+ if (iqmRcRateOfs == 0) {
+ /* Now compute IQM_RC_RATE_OFS
+ (((SysFreq/BandWidth)/2)/2) -1) * 2^23)
+ =>
+ ((SysFreq / BandWidth) * (2^21)) - (2^23)
+ */
+ /* (SysFreq / BandWidth) * (2^28) */
+ /* assert (MAX(sysClk)/MIN(bandwidth) < 16)
+ => assert(MAX(sysClk) < 16*MIN(bandwidth))
+ => assert(109714272 > 48000000) = true so Frac 28 can be used */
+ iqmRcRateOfs = Frac28a((u32)
+ ((state->m_sysClockFreq *
+ 1000) / 3), bandwidth);
+ /* (SysFreq / BandWidth) * (2^21), rounding before truncating */
+ if ((iqmRcRateOfs & 0x7fL) >= 0x40)
+ iqmRcRateOfs += 0x80L;
+ iqmRcRateOfs = iqmRcRateOfs >> 7;
+ /* ((SysFreq / BandWidth) * (2^21)) - (2^23) */
+ iqmRcRateOfs = iqmRcRateOfs - (1 << 23);
+ }
+
+ iqmRcRateOfs &=
+ ((((u32) IQM_RC_RATE_OFS_HI__M) <<
+ IQM_RC_RATE_OFS_LO__W) | IQM_RC_RATE_OFS_LO__M);
+ status = write32(state, IQM_RC_RATE_OFS_LO__A, iqmRcRateOfs);
+ if (status < 0)
+ goto error;
+
+ /* Bandwidth setting done */
+
+#if 0
+ status = DVBTSetFrequencyShift(demod, channel, tunerOffset);
+ if (status < 0)
+ goto error;
+#endif
+ status = SetFrequencyShifter(state, IntermediateFreqkHz, tunerFreqOffset, true);
+ if (status < 0)
+ goto error;
+
+ /*== Start SC, write channel settings to SC ===============================*/
+
+ /* Activate SCU to enable SCU commands */
+ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE);
+ if (status < 0)
+ goto error;
+
+ /* Enable SC after setting all other parameters */
+ status = write16(state, OFDM_SC_COMM_STATE__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_COMM_EXEC__A, 1);
+ if (status < 0)
+ goto error;
+
+
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_START, 0, NULL, 1, &cmdResult);
+ if (status < 0)
+ goto error;
+
+ /* Write SC parameter registers, set all AUTO flags in operation mode */
+ param1 = (OFDM_SC_RA_RAM_OP_AUTO_MODE__M |
+ OFDM_SC_RA_RAM_OP_AUTO_GUARD__M |
+ OFDM_SC_RA_RAM_OP_AUTO_CONST__M |
+ OFDM_SC_RA_RAM_OP_AUTO_HIER__M |
+ OFDM_SC_RA_RAM_OP_AUTO_RATE__M);
+ status = DVBTScCommand(state, OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM,
+ 0, transmissionParams, param1, 0, 0, 0);
+ if (status < 0)
+ goto error;
+
+ if (!state->m_DRXK_A3_ROM_CODE)
+ status = DVBTCtrlSetSqiSpeed(state, &state->m_sqiSpeed);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+
+/*============================================================================*/
+
+/**
+* \brief Retreive lock status .
+* \param demod Pointer to demodulator instance.
+* \param lockStat Pointer to lock status structure.
+* \return DRXStatus_t.
+*
+*/
+static int GetDVBTLockStatus(struct drxk_state *state, u32 *pLockStatus)
+{
+ int status;
+ const u16 mpeg_lock_mask = (OFDM_SC_RA_RAM_LOCK_MPEG__M |
+ OFDM_SC_RA_RAM_LOCK_FEC__M);
+ const u16 fec_lock_mask = (OFDM_SC_RA_RAM_LOCK_FEC__M);
+ const u16 demod_lock_mask = OFDM_SC_RA_RAM_LOCK_DEMOD__M;
+
+ u16 ScRaRamLock = 0;
+ u16 ScCommExec = 0;
+
+ dprintk(1, "\n");
+
+ *pLockStatus = NOT_LOCKED;
+ /* driver 0.9.0 */
+ /* Check if SC is running */
+ status = read16(state, OFDM_SC_COMM_EXEC__A, &ScCommExec);
+ if (status < 0)
+ goto end;
+ if (ScCommExec == OFDM_SC_COMM_EXEC_STOP)
+ goto end;
+
+ status = read16(state, OFDM_SC_RA_RAM_LOCK__A, &ScRaRamLock);
+ if (status < 0)
+ goto end;
+
+ if ((ScRaRamLock & mpeg_lock_mask) == mpeg_lock_mask)
+ *pLockStatus = MPEG_LOCK;
+ else if ((ScRaRamLock & fec_lock_mask) == fec_lock_mask)
+ *pLockStatus = FEC_LOCK;
+ else if ((ScRaRamLock & demod_lock_mask) == demod_lock_mask)
+ *pLockStatus = DEMOD_LOCK;
+ else if (ScRaRamLock & OFDM_SC_RA_RAM_LOCK_NODVBT__M)
+ *pLockStatus = NEVER_LOCK;
+end:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+static int PowerUpQAM(struct drxk_state *state)
+{
+ enum DRXPowerMode powerMode = DRXK_POWER_DOWN_OFDM;
+ int status;
+
+ dprintk(1, "\n");
+ status = CtrlPowerMode(state, &powerMode);
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+
+/** Power Down QAM */
+static int PowerDownQAM(struct drxk_state *state)
+{
+ u16 data = 0;
+ u16 cmdResult;
+ int status = 0;
+
+ dprintk(1, "\n");
+ status = read16(state, SCU_COMM_EXEC__A, &data);
+ if (status < 0)
+ goto error;
+ if (data == SCU_COMM_EXEC_ACTIVE) {
+ /*
+ STOP demodulator
+ QAM and HW blocks
+ */
+ /* stop all comstate->m_exec */
+ status = write16(state, QAM_COMM_EXEC__A, QAM_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_STOP, 0, NULL, 1, &cmdResult);
+ if (status < 0)
+ goto error;
+ }
+ /* powerdown AFE */
+ status = SetIqmAf(state, false);
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+/*============================================================================*/
+
+/**
+* \brief Setup of the QAM Measurement intervals for signal quality
+* \param demod instance of demod.
+* \param constellation current constellation.
+* \return DRXStatus_t.
+*
+* NOTE:
+* Take into account that for certain settings the errorcounters can overflow.
+* The implementation does not check this.
+*
+*/
+static int SetQAMMeasurement(struct drxk_state *state,
+ enum EDrxkConstellation constellation,
+ u32 symbolRate)
+{
+ u32 fecBitsDesired = 0; /* BER accounting period */
+ u32 fecRsPeriodTotal = 0; /* Total period */
+ u16 fecRsPrescale = 0; /* ReedSolomon Measurement Prescale */
+ u16 fecRsPeriod = 0; /* Value for corresponding I2C register */
+ int status = 0;
+
+ dprintk(1, "\n");
+
+ fecRsPrescale = 1;
+ /* fecBitsDesired = symbolRate [kHz] *
+ FrameLenght [ms] *
+ (constellation + 1) *
+ SyncLoss (== 1) *
+ ViterbiLoss (==1)
+ */
+ switch (constellation) {
+ case DRX_CONSTELLATION_QAM16:
+ fecBitsDesired = 4 * symbolRate;
+ break;
+ case DRX_CONSTELLATION_QAM32:
+ fecBitsDesired = 5 * symbolRate;
+ break;
+ case DRX_CONSTELLATION_QAM64:
+ fecBitsDesired = 6 * symbolRate;
+ break;
+ case DRX_CONSTELLATION_QAM128:
+ fecBitsDesired = 7 * symbolRate;
+ break;
+ case DRX_CONSTELLATION_QAM256:
+ fecBitsDesired = 8 * symbolRate;
+ break;
+ default:
+ status = -EINVAL;
+ }
+ if (status < 0)
+ goto error;
+
+ fecBitsDesired /= 1000; /* symbolRate [Hz] -> symbolRate [kHz] */
+ fecBitsDesired *= 500; /* meas. period [ms] */
+
+ /* Annex A/C: bits/RsPeriod = 204 * 8 = 1632 */
+ /* fecRsPeriodTotal = fecBitsDesired / 1632 */
+ fecRsPeriodTotal = (fecBitsDesired / 1632UL) + 1; /* roughly ceil */
+
+ /* fecRsPeriodTotal = fecRsPrescale * fecRsPeriod */
+ fecRsPrescale = 1 + (u16) (fecRsPeriodTotal >> 16);
+ if (fecRsPrescale == 0) {
+ /* Divide by zero (though impossible) */
+ status = -EINVAL;
+ if (status < 0)
+ goto error;
+ }
+ fecRsPeriod =
+ ((u16) fecRsPeriodTotal +
+ (fecRsPrescale >> 1)) / fecRsPrescale;
+
+ /* write corresponding registers */
+ status = write16(state, FEC_RS_MEASUREMENT_PERIOD__A, fecRsPeriod);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_RS_MEASUREMENT_PRESCALE__A, fecRsPrescale);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_SNC_FAIL_PERIOD__A, fecRsPeriod);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int SetQAM16(struct drxk_state *state)
+{
+ int status = 0;
+
+ dprintk(1, "\n");
+ /* QAM Equalizer Setup */
+ /* Equalizer */
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD0__A, 13517);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD1__A, 13517);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD2__A, 13517);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD3__A, 13517);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD4__A, 13517);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD5__A, 13517);
+ if (status < 0)
+ goto error;
+ /* Decision Feedback Equalizer */
+ status = write16(state, QAM_DQ_QUAL_FUN0__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN1__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN2__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN3__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN4__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN5__A, 0);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, QAM_SY_SYNC_HWM__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_SYNC_AWM__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_SYNC_LWM__A, 3);
+ if (status < 0)
+ goto error;
+
+ /* QAM Slicer Settings */
+ status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, DRXK_QAM_SL_SIG_POWER_QAM16);
+ if (status < 0)
+ goto error;
+
+ /* QAM Loop Controller Coeficients */
+ status = write16(state, SCU_RAM_QAM_LC_CA_FINE__A, 15);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CA_COARSE__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_FINE__A, 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_COARSE__A, 24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_FINE__A, 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_COARSE__A, 16);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_LC_CP_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CP_MEDIUM__A, 20);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CP_COARSE__A, 80);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_MEDIUM__A, 20);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_COARSE__A, 50);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_FINE__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_MEDIUM__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_COARSE__A, 32);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 10);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_COARSE__A, 10);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM State Machine (FSM) Thresholds */
+
+ status = write16(state, SCU_RAM_QAM_FSM_RTH__A, 140);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_FTH__A, 50);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_CTH__A, 95);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_PTH__A, 120);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_QTH__A, 230);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_MTH__A, 105);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_FSM_RATE_LIM__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_COUNT_LIM__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_FREQ_LIM__A, 24);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM FSM Tracking Parameters */
+
+ status = write16(state, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, (u16) 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, (u16) 220);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, (u16) 25);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, (u16) 6);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16) -24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16) -65);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16) -127);
+ if (status < 0)
+ goto error;
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+/*============================================================================*/
+
+/**
+* \brief QAM32 specific setup
+* \param demod instance of demod.
+* \return DRXStatus_t.
+*/
+static int SetQAM32(struct drxk_state *state)
+{
+ int status = 0;
+
+ dprintk(1, "\n");
+
+ /* QAM Equalizer Setup */
+ /* Equalizer */
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD0__A, 6707);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD1__A, 6707);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD2__A, 6707);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD3__A, 6707);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD4__A, 6707);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD5__A, 6707);
+ if (status < 0)
+ goto error;
+
+ /* Decision Feedback Equalizer */
+ status = write16(state, QAM_DQ_QUAL_FUN0__A, 3);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN1__A, 3);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN2__A, 3);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN3__A, 3);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN4__A, 3);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN5__A, 0);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, QAM_SY_SYNC_HWM__A, 6);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_SYNC_AWM__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_SYNC_LWM__A, 3);
+ if (status < 0)
+ goto error;
+
+ /* QAM Slicer Settings */
+
+ status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, DRXK_QAM_SL_SIG_POWER_QAM32);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM Loop Controller Coeficients */
+
+ status = write16(state, SCU_RAM_QAM_LC_CA_FINE__A, 15);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CA_COARSE__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_FINE__A, 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_COARSE__A, 24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_FINE__A, 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_COARSE__A, 16);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_LC_CP_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CP_MEDIUM__A, 20);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CP_COARSE__A, 80);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_MEDIUM__A, 20);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_COARSE__A, 50);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_FINE__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_MEDIUM__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_COARSE__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 10);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_COARSE__A, 0);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM State Machine (FSM) Thresholds */
+
+ status = write16(state, SCU_RAM_QAM_FSM_RTH__A, 90);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_FTH__A, 50);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_CTH__A, 80);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_PTH__A, 100);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_QTH__A, 170);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_MTH__A, 100);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_FSM_RATE_LIM__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_COUNT_LIM__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_FREQ_LIM__A, 10);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM FSM Tracking Parameters */
+
+ status = write16(state, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, (u16) 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, (u16) 140);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, (u16) -8);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, (u16) -16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16) -26);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16) -56);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16) -86);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+/*============================================================================*/
+
+/**
+* \brief QAM64 specific setup
+* \param demod instance of demod.
+* \return DRXStatus_t.
+*/
+static int SetQAM64(struct drxk_state *state)
+{
+ int status = 0;
+
+ dprintk(1, "\n");
+ /* QAM Equalizer Setup */
+ /* Equalizer */
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD0__A, 13336);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD1__A, 12618);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD2__A, 11988);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD3__A, 13809);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD4__A, 13809);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD5__A, 15609);
+ if (status < 0)
+ goto error;
+
+ /* Decision Feedback Equalizer */
+ status = write16(state, QAM_DQ_QUAL_FUN0__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN1__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN2__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN3__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN4__A, 3);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN5__A, 0);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, QAM_SY_SYNC_HWM__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_SYNC_AWM__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_SYNC_LWM__A, 3);
+ if (status < 0)
+ goto error;
+
+ /* QAM Slicer Settings */
+ status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, DRXK_QAM_SL_SIG_POWER_QAM64);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM Loop Controller Coeficients */
+
+ status = write16(state, SCU_RAM_QAM_LC_CA_FINE__A, 15);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CA_COARSE__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_FINE__A, 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_COARSE__A, 24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_FINE__A, 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_COARSE__A, 16);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_LC_CP_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CP_MEDIUM__A, 30);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CP_COARSE__A, 100);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_MEDIUM__A, 30);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_COARSE__A, 50);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_FINE__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_MEDIUM__A, 25);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_COARSE__A, 48);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 10);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_COARSE__A, 10);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM State Machine (FSM) Thresholds */
+
+ status = write16(state, SCU_RAM_QAM_FSM_RTH__A, 100);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_FTH__A, 60);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_CTH__A, 80);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_PTH__A, 110);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_QTH__A, 200);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_MTH__A, 95);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_FSM_RATE_LIM__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_COUNT_LIM__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_FREQ_LIM__A, 15);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM FSM Tracking Parameters */
+
+ status = write16(state, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, (u16) 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, (u16) 141);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, (u16) 7);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, (u16) 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16) -15);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16) -45);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16) -80);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+/*============================================================================*/
+
+/**
+* \brief QAM128 specific setup
+* \param demod: instance of demod.
+* \return DRXStatus_t.
+*/
+static int SetQAM128(struct drxk_state *state)
+{
+ int status = 0;
+
+ dprintk(1, "\n");
+ /* QAM Equalizer Setup */
+ /* Equalizer */
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD0__A, 6564);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD1__A, 6598);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD2__A, 6394);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD3__A, 6409);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD4__A, 6656);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD5__A, 7238);
+ if (status < 0)
+ goto error;
+
+ /* Decision Feedback Equalizer */
+ status = write16(state, QAM_DQ_QUAL_FUN0__A, 6);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN1__A, 6);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN2__A, 6);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN3__A, 6);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN4__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN5__A, 0);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, QAM_SY_SYNC_HWM__A, 6);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_SYNC_AWM__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_SYNC_LWM__A, 3);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM Slicer Settings */
+
+ status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, DRXK_QAM_SL_SIG_POWER_QAM128);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM Loop Controller Coeficients */
+
+ status = write16(state, SCU_RAM_QAM_LC_CA_FINE__A, 15);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CA_COARSE__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_FINE__A, 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_COARSE__A, 24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_FINE__A, 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_COARSE__A, 16);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_LC_CP_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CP_MEDIUM__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CP_COARSE__A, 120);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_MEDIUM__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_COARSE__A, 60);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_FINE__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_MEDIUM__A, 25);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_COARSE__A, 64);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 10);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_COARSE__A, 0);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM State Machine (FSM) Thresholds */
+
+ status = write16(state, SCU_RAM_QAM_FSM_RTH__A, 50);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_FTH__A, 60);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_CTH__A, 80);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_PTH__A, 100);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_QTH__A, 140);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_MTH__A, 100);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_FSM_RATE_LIM__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_COUNT_LIM__A, 5);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_FSM_FREQ_LIM__A, 12);
+ if (status < 0)
+ goto error;
+
+ /* QAM FSM Tracking Parameters */
+
+ status = write16(state, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, (u16) 8);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, (u16) 65);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, (u16) 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, (u16) 3);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16) -1);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16) -12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16) -23);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+/*============================================================================*/
+
+/**
+* \brief QAM256 specific setup
+* \param demod: instance of demod.
+* \return DRXStatus_t.
+*/
+static int SetQAM256(struct drxk_state *state)
+{
+ int status = 0;
+
+ dprintk(1, "\n");
+ /* QAM Equalizer Setup */
+ /* Equalizer */
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD0__A, 11502);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD1__A, 12084);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD2__A, 12543);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD3__A, 12931);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD4__A, 13629);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD5__A, 15385);
+ if (status < 0)
+ goto error;
+
+ /* Decision Feedback Equalizer */
+ status = write16(state, QAM_DQ_QUAL_FUN0__A, 8);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN1__A, 8);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN2__A, 8);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN3__A, 8);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN4__A, 6);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN5__A, 0);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, QAM_SY_SYNC_HWM__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_SYNC_AWM__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_SYNC_LWM__A, 3);
+ if (status < 0)
+ goto error;
+
+ /* QAM Slicer Settings */
+
+ status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, DRXK_QAM_SL_SIG_POWER_QAM256);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM Loop Controller Coeficients */
+
+ status = write16(state, SCU_RAM_QAM_LC_CA_FINE__A, 15);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CA_COARSE__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_FINE__A, 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_COARSE__A, 24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_FINE__A, 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_COARSE__A, 16);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_LC_CP_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CP_MEDIUM__A, 50);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CP_COARSE__A, 250);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_MEDIUM__A, 50);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_COARSE__A, 125);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_FINE__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_MEDIUM__A, 25);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_COARSE__A, 48);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 10);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_COARSE__A, 10);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM State Machine (FSM) Thresholds */
+
+ status = write16(state, SCU_RAM_QAM_FSM_RTH__A, 50);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_FTH__A, 60);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_CTH__A, 80);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_PTH__A, 100);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_QTH__A, 150);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_MTH__A, 110);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_FSM_RATE_LIM__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_COUNT_LIM__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_FREQ_LIM__A, 12);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM FSM Tracking Parameters */
+
+ status = write16(state, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, (u16) 8);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, (u16) 74);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, (u16) 18);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, (u16) 13);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16) 7);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16) 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16) -8);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+
+/*============================================================================*/
+/**
+* \brief Reset QAM block.
+* \param demod: instance of demod.
+* \param channel: pointer to channel data.
+* \return DRXStatus_t.
+*/
+static int QAMResetQAM(struct drxk_state *state)
+{
+ int status;
+ u16 cmdResult;
+
+ dprintk(1, "\n");
+ /* Stop QAM comstate->m_exec */
+ status = write16(state, QAM_COMM_EXEC__A, QAM_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_RESET, 0, NULL, 1, &cmdResult);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+/*============================================================================*/
+
+/**
+* \brief Set QAM symbolrate.
+* \param demod: instance of demod.
+* \param channel: pointer to channel data.
+* \return DRXStatus_t.
+*/
+static int QAMSetSymbolrate(struct drxk_state *state)
+{
+ u32 adcFrequency = 0;
+ u32 symbFreq = 0;
+ u32 iqmRcRate = 0;
+ u16 ratesel = 0;
+ u32 lcSymbRate = 0;
+ int status;
+
+ dprintk(1, "\n");
+ /* Select & calculate correct IQM rate */
+ adcFrequency = (state->m_sysClockFreq * 1000) / 3;
+ ratesel = 0;
+ /* printk(KERN_DEBUG "drxk: SR %d\n", state->param.u.qam.symbol_rate); */
+ if (state->param.u.qam.symbol_rate <= 1188750)
+ ratesel = 3;
+ else if (state->param.u.qam.symbol_rate <= 2377500)
+ ratesel = 2;
+ else if (state->param.u.qam.symbol_rate <= 4755000)
+ ratesel = 1;
+ status = write16(state, IQM_FD_RATESEL__A, ratesel);
+ if (status < 0)
+ goto error;
+
+ /*
+ IqmRcRate = ((Fadc / (symbolrate * (4<<ratesel))) - 1) * (1<<23)
+ */
+ symbFreq = state->param.u.qam.symbol_rate * (1 << ratesel);
+ if (symbFreq == 0) {
+ /* Divide by zero */
+ status = -EINVAL;
+ goto error;
+ }
+ iqmRcRate = (adcFrequency / symbFreq) * (1 << 21) +
+ (Frac28a((adcFrequency % symbFreq), symbFreq) >> 7) -
+ (1 << 23);
+ status = write32(state, IQM_RC_RATE_OFS_LO__A, iqmRcRate);
+ if (status < 0)
+ goto error;
+ state->m_iqmRcRate = iqmRcRate;
+ /*
+ LcSymbFreq = round (.125 * symbolrate / adcFreq * (1<<15))
+ */
+ symbFreq = state->param.u.qam.symbol_rate;
+ if (adcFrequency == 0) {
+ /* Divide by zero */
+ status = -EINVAL;
+ goto error;
+ }
+ lcSymbRate = (symbFreq / adcFrequency) * (1 << 12) +
+ (Frac28a((symbFreq % adcFrequency), adcFrequency) >>
+ 16);
+ if (lcSymbRate > 511)
+ lcSymbRate = 511;
+ status = write16(state, QAM_LC_SYMBOL_FREQ__A, (u16) lcSymbRate);
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+/*============================================================================*/
+
+/**
+* \brief Get QAM lock status.
+* \param demod: instance of demod.
+* \param channel: pointer to channel data.
+* \return DRXStatus_t.
+*/
+
+static int GetQAMLockStatus(struct drxk_state *state, u32 *pLockStatus)
+{
+ int status;
+ u16 Result[2] = { 0, 0 };
+
+ dprintk(1, "\n");
+ *pLockStatus = NOT_LOCKED;
+ status = scu_command(state,
+ SCU_RAM_COMMAND_STANDARD_QAM |
+ SCU_RAM_COMMAND_CMD_DEMOD_GET_LOCK, 0, NULL, 2,
+ Result);
+ if (status < 0)
+ printk(KERN_ERR "drxk: %s status = %08x\n", __func__, status);
+
+ if (Result[1] < SCU_RAM_QAM_LOCKED_LOCKED_DEMOD_LOCKED) {
+ /* 0x0000 NOT LOCKED */
+ } else if (Result[1] < SCU_RAM_QAM_LOCKED_LOCKED_LOCKED) {
+ /* 0x4000 DEMOD LOCKED */
+ *pLockStatus = DEMOD_LOCK;
+ } else if (Result[1] < SCU_RAM_QAM_LOCKED_LOCKED_NEVER_LOCK) {
+ /* 0x8000 DEMOD + FEC LOCKED (system lock) */
+ *pLockStatus = MPEG_LOCK;
+ } else {
+ /* 0xC000 NEVER LOCKED */
+ /* (system will never be able to lock to the signal) */
+ /* TODO: check this, intermediate & standard specific lock states are not
+ taken into account here */
+ *pLockStatus = NEVER_LOCK;
+ }
+ return status;
+}
+
+#define QAM_MIRROR__M 0x03
+#define QAM_MIRROR_NORMAL 0x00
+#define QAM_MIRRORED 0x01
+#define QAM_MIRROR_AUTO_ON 0x02
+#define QAM_LOCKRANGE__M 0x10
+#define QAM_LOCKRANGE_NORMAL 0x10
+
+static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz,
+ s32 tunerFreqOffset)
+{
+ int status;
+ u16 setParamParameters[4] = { 0, 0, 0, 0 };
+ u16 cmdResult;
+
+ dprintk(1, "\n");
+ /*
+ * STEP 1: reset demodulator
+ * resets FEC DI and FEC RS
+ * resets QAM block
+ * resets SCU variables
+ */
+ status = write16(state, FEC_DI_COMM_EXEC__A, FEC_DI_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_RS_COMM_EXEC__A, FEC_RS_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+ status = QAMResetQAM(state);
+ if (status < 0)
+ goto error;
+
+ /*
+ * STEP 2: configure demodulator
+ * -set params; resets IQM,QAM,FEC HW; initializes some
+ * SCU variables
+ */
+ status = QAMSetSymbolrate(state);
+ if (status < 0)
+ goto error;
+
+ /* Set params */
+ switch (state->param.u.qam.modulation) {
+ case QAM_256:
+ state->m_Constellation = DRX_CONSTELLATION_QAM256;
+ break;
+ case QAM_AUTO:
+ case QAM_64:
+ state->m_Constellation = DRX_CONSTELLATION_QAM64;
+ break;
+ case QAM_16:
+ state->m_Constellation = DRX_CONSTELLATION_QAM16;
+ break;
+ case QAM_32:
+ state->m_Constellation = DRX_CONSTELLATION_QAM32;
+ break;
+ case QAM_128:
+ state->m_Constellation = DRX_CONSTELLATION_QAM128;
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ }
+ if (status < 0)
+ goto error;
+ setParamParameters[0] = state->m_Constellation; /* constellation */
+ setParamParameters[1] = DRXK_QAM_I12_J17; /* interleave mode */
+ if (state->m_OperationMode == OM_QAM_ITU_C)
+ setParamParameters[2] = QAM_TOP_ANNEX_C;
+ else
+ setParamParameters[2] = QAM_TOP_ANNEX_A;
+ setParamParameters[3] |= (QAM_MIRROR_AUTO_ON);
+ /* Env parameters */
+ /* check for LOCKRANGE Extented */
+ /* setParamParameters[3] |= QAM_LOCKRANGE_NORMAL; */
+
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM, 4, setParamParameters, 1, &cmdResult);
+ if (status < 0) {
+ /* Fall-back to the simpler call */
+ if (state->m_OperationMode == OM_QAM_ITU_C)
+ setParamParameters[0] = QAM_TOP_ANNEX_C;
+ else
+ setParamParameters[0] = QAM_TOP_ANNEX_A;
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV, 1, setParamParameters, 1, &cmdResult);
+ if (status < 0)
+ goto error;
+
+ setParamParameters[0] = state->m_Constellation; /* constellation */
+ setParamParameters[1] = DRXK_QAM_I12_J17; /* interleave mode */
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM, 2, setParamParameters, 1, &cmdResult);
+ }
+ if (status < 0)
+ goto error;
+
+ /*
+ * STEP 3: enable the system in a mode where the ADC provides valid
+ * signal setup constellation independent registers
+ */
+#if 0
+ status = SetFrequency(channel, tunerFreqOffset));
+ if (status < 0)
+ goto error;
+#endif
+ status = SetFrequencyShifter(state, IntermediateFreqkHz, tunerFreqOffset, true);
+ if (status < 0)
+ goto error;
+
+ /* Setup BER measurement */
+ status = SetQAMMeasurement(state, state->m_Constellation, state->param.u. qam.symbol_rate);
+ if (status < 0)
+ goto error;
+
+ /* Reset default values */
+ status = write16(state, IQM_CF_SCALE_SH__A, IQM_CF_SCALE_SH__PRE);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_TIMEOUT__A, QAM_SY_TIMEOUT__PRE);
+ if (status < 0)
+ goto error;
+
+ /* Reset default LC values */
+ status = write16(state, QAM_LC_RATE_LIMIT__A, 3);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_LPF_FACTORP__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_LPF_FACTORI__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_MODE__A, 7);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, QAM_LC_QUAL_TAB0__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB1__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB2__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB3__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB4__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB5__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB6__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB8__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB9__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB10__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB12__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB15__A, 3);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB16__A, 3);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB20__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB25__A, 4);
+ if (status < 0)
+ goto error;
+
+ /* Mirroring, QAM-block starting point not inverted */
+ status = write16(state, QAM_SY_SP_INV__A, QAM_SY_SP_INV_SPECTRUM_INV_DIS);
+ if (status < 0)
+ goto error;
+
+ /* Halt SCU to enable safe non-atomic accesses */
+ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_HOLD);
+ if (status < 0)
+ goto error;
+
+ /* STEP 4: constellation specific setup */
+ switch (state->param.u.qam.modulation) {
+ case QAM_16:
+ status = SetQAM16(state);
+ break;
+ case QAM_32:
+ status = SetQAM32(state);
+ break;
+ case QAM_AUTO:
+ case QAM_64:
+ status = SetQAM64(state);
+ break;
+ case QAM_128:
+ status = SetQAM128(state);
+ break;
+ case QAM_256:
+ status = SetQAM256(state);
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ }
+ if (status < 0)
+ goto error;
+
+ /* Activate SCU to enable SCU commands */
+ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE);
+ if (status < 0)
+ goto error;
+
+ /* Re-configure MPEG output, requires knowledge of channel bitrate */
+ /* extAttr->currentChannel.constellation = channel->constellation; */
+ /* extAttr->currentChannel.symbolrate = channel->symbolrate; */
+ status = MPEGTSDtoSetup(state, state->m_OperationMode);
+ if (status < 0)
+ goto error;
+
+ /* Start processes */
+ status = MPEGTSStart(state);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_COMM_EXEC__A, FEC_COMM_EXEC_ACTIVE);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_COMM_EXEC__A, QAM_COMM_EXEC_ACTIVE);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_COMM_EXEC__A, IQM_COMM_EXEC_B_ACTIVE);
+ if (status < 0)
+ goto error;
+
+ /* STEP 5: start QAM demodulator (starts FEC, QAM and IQM HW) */
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_START, 0, NULL, 1, &cmdResult);
+ if (status < 0)
+ goto error;
+
+ /* update global DRXK data container */
+/*? extAttr->qamInterleaveMode = DRXK_QAM_I12_J17; */
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int SetQAMStandard(struct drxk_state *state,
+ enum OperationMode oMode)
+{
+ int status;
+#ifdef DRXK_QAM_TAPS
+#define DRXK_QAMA_TAPS_SELECT
+#include "drxk_filters.h"
+#undef DRXK_QAMA_TAPS_SELECT
+#endif
+
+ dprintk(1, "\n");
+
+ /* added antenna switch */
+ SwitchAntennaToQAM(state);
+
+ /* Ensure correct power-up mode */
+ status = PowerUpQAM(state);
+ if (status < 0)
+ goto error;
+ /* Reset QAM block */
+ status = QAMResetQAM(state);
+ if (status < 0)
+ goto error;
+
+ /* Setup IQM */
+
+ status = write16(state, IQM_COMM_EXEC__A, IQM_COMM_EXEC_B_STOP);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_AF_AMUX__A, IQM_AF_AMUX_SIGNAL2ADC);
+ if (status < 0)
+ goto error;
+
+ /* Upload IQM Channel Filter settings by
+ boot loader from ROM table */
+ switch (oMode) {
+ case OM_QAM_ITU_A:
+ status = BLChainCmd(state, DRXK_BL_ROM_OFFSET_TAPS_ITU_A, DRXK_BLCC_NR_ELEMENTS_TAPS, DRXK_BLC_TIMEOUT);
+ break;
+ case OM_QAM_ITU_C:
+ status = BLDirectCmd(state, IQM_CF_TAP_RE0__A, DRXK_BL_ROM_OFFSET_TAPS_ITU_C, DRXK_BLDC_NR_ELEMENTS_TAPS, DRXK_BLC_TIMEOUT);
+ if (status < 0)
+ goto error;
+ status = BLDirectCmd(state, IQM_CF_TAP_IM0__A, DRXK_BL_ROM_OFFSET_TAPS_ITU_C, DRXK_BLDC_NR_ELEMENTS_TAPS, DRXK_BLC_TIMEOUT);
+ break;
+ default:
+ status = -EINVAL;
+ }
+ if (status < 0)
+ goto error;
+
+ status = write16(state, IQM_CF_OUT_ENA__A, (1 << IQM_CF_OUT_ENA_QAM__B));
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_SYMMETRIC__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_MIDTAP__A, ((1 << IQM_CF_MIDTAP_RE__B) | (1 << IQM_CF_MIDTAP_IM__B)));
+ if (status < 0)
+ goto error;
+
+ status = write16(state, IQM_RC_STRETCH__A, 21);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_AF_CLP_LEN__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_AF_CLP_TH__A, 448);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_AF_SNS_LEN__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_POW_MEAS_LEN__A, 0);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, IQM_FS_ADJ_SEL__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_RC_ADJ_SEL__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_ADJ_SEL__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_AF_UPD_SEL__A, 0);
+ if (status < 0)
+ goto error;
+
+ /* IQM Impulse Noise Processing Unit */
+ status = write16(state, IQM_CF_CLP_VAL__A, 500);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_DATATH__A, 1000);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_BYPASSDET__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_DET_LCT__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_WND_LEN__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_PKDTH__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_AF_INC_BYPASS__A, 1);
+ if (status < 0)
+ goto error;
+
+ /* turn on IQMAF. Must be done before setAgc**() */
+ status = SetIqmAf(state, true);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_AF_START_LOCK__A, 0x01);
+ if (status < 0)
+ goto error;
+
+ /* IQM will not be reset from here, sync ADC and update/init AGC */
+ status = ADCSynchronization(state);
+ if (status < 0)
+ goto error;
+
+ /* Set the FSM step period */
+ status = write16(state, SCU_RAM_QAM_FSM_STEP_PERIOD__A, 2000);
+ if (status < 0)
+ goto error;
+
+ /* Halt SCU to enable safe non-atomic accesses */
+ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_HOLD);
+ if (status < 0)
+ goto error;
+
+ /* No more resets of the IQM, current standard correctly set =>
+ now AGCs can be configured. */
+
+ status = InitAGC(state, true);
+ if (status < 0)
+ goto error;
+ status = SetPreSaw(state, &(state->m_qamPreSawCfg));
+ if (status < 0)
+ goto error;
+
+ /* Configure AGC's */
+ status = SetAgcRf(state, &(state->m_qamRfAgcCfg), true);
+ if (status < 0)
+ goto error;
+ status = SetAgcIf(state, &(state->m_qamIfAgcCfg), true);
+ if (status < 0)
+ goto error;
+
+ /* Activate SCU to enable SCU commands */
+ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int WriteGPIO(struct drxk_state *state)
+{
+ int status;
+ u16 value = 0;
+
+ dprintk(1, "\n");
+ /* stop lock indicator process */
+ status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
+ if (status < 0)
+ goto error;
+
+ /* Write magic word to enable pdr reg write */
+ status = write16(state, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY);
+ if (status < 0)
+ goto error;
+
+ if (state->m_hasSAWSW) {
+ if (state->UIO_mask & 0x0001) { /* UIO-1 */
+ /* write to io pad configuration register - output mode */
+ status = write16(state, SIO_PDR_SMA_TX_CFG__A, state->m_GPIOCfg);
+ if (status < 0)
+ goto error;
+
+ /* use corresponding bit in io data output registar */
+ status = read16(state, SIO_PDR_UIO_OUT_LO__A, &value);
+ if (status < 0)
+ goto error;
+ if ((state->m_GPIO & 0x0001) == 0)
+ value &= 0x7FFF; /* write zero to 15th bit - 1st UIO */
+ else
+ value |= 0x8000; /* write one to 15th bit - 1st UIO */
+ /* write back to io data output register */
+ status = write16(state, SIO_PDR_UIO_OUT_LO__A, value);
+ if (status < 0)
+ goto error;
+ }
+ if (state->UIO_mask & 0x0002) { /* UIO-2 */
+ /* write to io pad configuration register - output mode */
+ status = write16(state, SIO_PDR_SMA_TX_CFG__A, state->m_GPIOCfg);
+ if (status < 0)
+ goto error;
+
+ /* use corresponding bit in io data output registar */
+ status = read16(state, SIO_PDR_UIO_OUT_LO__A, &value);
+ if (status < 0)
+ goto error;
+ if ((state->m_GPIO & 0x0002) == 0)
+ value &= 0xBFFF; /* write zero to 14th bit - 2st UIO */
+ else
+ value |= 0x4000; /* write one to 14th bit - 2st UIO */
+ /* write back to io data output register */
+ status = write16(state, SIO_PDR_UIO_OUT_LO__A, value);
+ if (status < 0)
+ goto error;
+ }
+ if (state->UIO_mask & 0x0004) { /* UIO-3 */
+ /* write to io pad configuration register - output mode */
+ status = write16(state, SIO_PDR_SMA_TX_CFG__A, state->m_GPIOCfg);
+ if (status < 0)
+ goto error;
+
+ /* use corresponding bit in io data output registar */
+ status = read16(state, SIO_PDR_UIO_OUT_LO__A, &value);
+ if (status < 0)
+ goto error;
+ if ((state->m_GPIO & 0x0004) == 0)
+ value &= 0xFFFB; /* write zero to 2nd bit - 3rd UIO */
+ else
+ value |= 0x0004; /* write one to 2nd bit - 3rd UIO */
+ /* write back to io data output register */
+ status = write16(state, SIO_PDR_UIO_OUT_LO__A, value);
+ if (status < 0)
+ goto error;
+ }
+ }
+ /* Write magic word to disable pdr reg write */
+ status = write16(state, SIO_TOP_COMM_KEY__A, 0x0000);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int SwitchAntennaToQAM(struct drxk_state *state)
+{
+ int status = 0;
+ bool gpio_state;
+
+ dprintk(1, "\n");
+
+ if (!state->antenna_gpio)
+ return 0;
+
+ gpio_state = state->m_GPIO & state->antenna_gpio;
+
+ if (state->antenna_dvbt ^ gpio_state) {
+ /* Antenna is on DVB-T mode. Switch */
+ if (state->antenna_dvbt)
+ state->m_GPIO &= ~state->antenna_gpio;
+ else
+ state->m_GPIO |= state->antenna_gpio;
+ status = WriteGPIO(state);
+ }
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int SwitchAntennaToDVBT(struct drxk_state *state)
+{
+ int status = 0;
+ bool gpio_state;
+
+ dprintk(1, "\n");
+
+ if (!state->antenna_gpio)
+ return 0;
+
+ gpio_state = state->m_GPIO & state->antenna_gpio;
+
+ if (!(state->antenna_dvbt ^ gpio_state)) {
+ /* Antenna is on DVB-C mode. Switch */
+ if (state->antenna_dvbt)
+ state->m_GPIO |= state->antenna_gpio;
+ else
+ state->m_GPIO &= ~state->antenna_gpio;
+ status = WriteGPIO(state);
+ }
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+
+static int PowerDownDevice(struct drxk_state *state)
+{
+ /* Power down to requested mode */
+ /* Backup some register settings */
+ /* Set pins with possible pull-ups connected to them in input mode */
+ /* Analog power down */
+ /* ADC power down */
+ /* Power down device */
+ int status;
+
+ dprintk(1, "\n");
+ if (state->m_bPDownOpenBridge) {
+ /* Open I2C bridge before power down of DRXK */
+ status = ConfigureI2CBridge(state, true);
+ if (status < 0)
+ goto error;
+ }
+ /* driver 0.9.0 */
+ status = DVBTEnableOFDMTokenRing(state, false);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SIO_CC_PWD_MODE__A, SIO_CC_PWD_MODE_LEVEL_CLOCK);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_CC_UPDATE__A, SIO_CC_UPDATE_KEY);
+ if (status < 0)
+ goto error;
+ state->m_HICfgCtrl |= SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ;
+ status = HI_CfgCommand(state);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+static int load_microcode(struct drxk_state *state, const char *mc_name)
+{
+ const struct firmware *fw = NULL;
+ int err = 0;
+
+ dprintk(1, "\n");
+
+ err = request_firmware(&fw, mc_name, state->i2c->dev.parent);
+ if (err < 0) {
+ printk(KERN_ERR
+ "drxk: Could not load firmware file %s.\n", mc_name);
+ printk(KERN_INFO
+ "drxk: Copy %s to your hotplug directory!\n", mc_name);
+ return err;
+ }
+ err = DownloadMicrocode(state, fw->data, fw->size);
+ release_firmware(fw);
+ return err;
+}
+
+static int init_drxk(struct drxk_state *state)
+{
+ int status = 0;
+ enum DRXPowerMode powerMode = DRXK_POWER_DOWN_OFDM;
+ u16 driverVersion;
+
+ dprintk(1, "\n");
+ if ((state->m_DrxkState == DRXK_UNINITIALIZED)) {
+ status = PowerUpDevice(state);
+ if (status < 0)
+ goto error;
+ status = DRXX_Open(state);
+ if (status < 0)
+ goto error;
+ /* Soft reset of OFDM-, sys- and osc-clockdomain */
+ status = write16(state, SIO_CC_SOFT_RST__A, SIO_CC_SOFT_RST_OFDM__M | SIO_CC_SOFT_RST_SYS__M | SIO_CC_SOFT_RST_OSC__M);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_CC_UPDATE__A, SIO_CC_UPDATE_KEY);
+ if (status < 0)
+ goto error;
+ /* TODO is this needed, if yes how much delay in worst case scenario */
+ msleep(1);
+ state->m_DRXK_A3_PATCH_CODE = true;
+ status = GetDeviceCapabilities(state);
+ if (status < 0)
+ goto error;
+
+ /* Bridge delay, uses oscilator clock */
+ /* Delay = (delay (nano seconds) * oscclk (kHz))/ 1000 */
+ /* SDA brdige delay */
+ state->m_HICfgBridgeDelay =
+ (u16) ((state->m_oscClockFreq / 1000) *
+ HI_I2C_BRIDGE_DELAY) / 1000;
+ /* Clipping */
+ if (state->m_HICfgBridgeDelay >
+ SIO_HI_RA_RAM_PAR_3_CFG_DBL_SDA__M) {
+ state->m_HICfgBridgeDelay =
+ SIO_HI_RA_RAM_PAR_3_CFG_DBL_SDA__M;
+ }
+ /* SCL bridge delay, same as SDA for now */
+ state->m_HICfgBridgeDelay +=
+ state->m_HICfgBridgeDelay <<
+ SIO_HI_RA_RAM_PAR_3_CFG_DBL_SCL__B;
+
+ status = InitHI(state);
+ if (status < 0)
+ goto error;
+ /* disable various processes */
+#if NOA1ROM
+ if (!(state->m_DRXK_A1_ROM_CODE)
+ && !(state->m_DRXK_A2_ROM_CODE))
+#endif
+ {
+ status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
+ if (status < 0)
+ goto error;
+ }
+
+ /* disable MPEG port */
+ status = MPEGTSDisable(state);
+ if (status < 0)
+ goto error;
+
+ /* Stop AUD and SCU */
+ status = write16(state, AUD_COMM_EXEC__A, AUD_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+
+ /* enable token-ring bus through OFDM block for possible ucode upload */
+ status = write16(state, SIO_OFDM_SH_OFDM_RING_ENABLE__A, SIO_OFDM_SH_OFDM_RING_ENABLE_ON);
+ if (status < 0)
+ goto error;
+
+ /* include boot loader section */
+ status = write16(state, SIO_BL_COMM_EXEC__A, SIO_BL_COMM_EXEC_ACTIVE);
+ if (status < 0)
+ goto error;
+ status = BLChainCmd(state, 0, 6, 100);
+ if (status < 0)
+ goto error;
+
+ if (!state->microcode_name)
+ load_microcode(state, "drxk_a3.mc");
+ else
+ load_microcode(state, state->microcode_name);
+
+ /* disable token-ring bus through OFDM block for possible ucode upload */
+ status = write16(state, SIO_OFDM_SH_OFDM_RING_ENABLE__A, SIO_OFDM_SH_OFDM_RING_ENABLE_OFF);
+ if (status < 0)
+ goto error;
+
+ /* Run SCU for a little while to initialize microcode version numbers */
+ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE);
+ if (status < 0)
+ goto error;
+ status = DRXX_Open(state);
+ if (status < 0)
+ goto error;
+ /* added for test */
+ msleep(30);
+
+ powerMode = DRXK_POWER_DOWN_OFDM;
+ status = CtrlPowerMode(state, &powerMode);
+ if (status < 0)
+ goto error;
+
+ /* Stamp driver version number in SCU data RAM in BCD code
+ Done to enable field application engineers to retreive drxdriver version
+ via I2C from SCU RAM.
+ Not using SCU command interface for SCU register access since no
+ microcode may be present.
+ */
+ driverVersion =
+ (((DRXK_VERSION_MAJOR / 100) % 10) << 12) +
+ (((DRXK_VERSION_MAJOR / 10) % 10) << 8) +
+ ((DRXK_VERSION_MAJOR % 10) << 4) +
+ (DRXK_VERSION_MINOR % 10);
+ status = write16(state, SCU_RAM_DRIVER_VER_HI__A, driverVersion);
+ if (status < 0)
+ goto error;
+ driverVersion =
+ (((DRXK_VERSION_PATCH / 1000) % 10) << 12) +
+ (((DRXK_VERSION_PATCH / 100) % 10) << 8) +
+ (((DRXK_VERSION_PATCH / 10) % 10) << 4) +
+ (DRXK_VERSION_PATCH % 10);
+ status = write16(state, SCU_RAM_DRIVER_VER_LO__A, driverVersion);
+ if (status < 0)
+ goto error;
+
+ printk(KERN_INFO "DRXK driver version %d.%d.%d\n",
+ DRXK_VERSION_MAJOR, DRXK_VERSION_MINOR,
+ DRXK_VERSION_PATCH);
+
+ /* Dirty fix of default values for ROM/PATCH microcode
+ Dirty because this fix makes it impossible to setup suitable values
+ before calling DRX_Open. This solution requires changes to RF AGC speed
+ to be done via the CTRL function after calling DRX_Open */
+
+ /* m_dvbtRfAgcCfg.speed = 3; */
+
+ /* Reset driver debug flags to 0 */
+ status = write16(state, SCU_RAM_DRIVER_DEBUG__A, 0);
+ if (status < 0)
+ goto error;
+ /* driver 0.9.0 */
+ /* Setup FEC OC:
+ NOTE: No more full FEC resets allowed afterwards!! */
+ status = write16(state, FEC_COMM_EXEC__A, FEC_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+ /* MPEGTS functions are still the same */
+ status = MPEGTSDtoInit(state);
+ if (status < 0)
+ goto error;
+ status = MPEGTSStop(state);
+ if (status < 0)
+ goto error;
+ status = MPEGTSConfigurePolarity(state);
+ if (status < 0)
+ goto error;
+ status = MPEGTSConfigurePins(state, state->m_enableMPEGOutput);
+ if (status < 0)
+ goto error;
+ /* added: configure GPIO */
+ status = WriteGPIO(state);
+ if (status < 0)
+ goto error;
+
+ state->m_DrxkState = DRXK_STOPPED;
+
+ if (state->m_bPowerDown) {
+ status = PowerDownDevice(state);
+ if (status < 0)
+ goto error;
+ state->m_DrxkState = DRXK_POWERED_DOWN;
+ } else
+ state->m_DrxkState = DRXK_STOPPED;
+ }
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+static void drxk_c_release(struct dvb_frontend *fe)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+
+ dprintk(1, "\n");
+ kfree(state);
+}
+
+static int drxk_c_init(struct dvb_frontend *fe)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+
+ dprintk(1, "\n");
+ if (mutex_trylock(&state->ctlock) == 0)
+ return -EBUSY;
+ SetOperationMode(state, OM_QAM_ITU_A);
+ return 0;
+}
+
+static int drxk_c_sleep(struct dvb_frontend *fe)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+
+ dprintk(1, "\n");
+ ShutDown(state);
+ mutex_unlock(&state->ctlock);
+ return 0;
+}
+
+static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+
+ dprintk(1, "%s\n", enable ? "enable" : "disable");
+ return ConfigureI2CBridge(state, enable ? true : false);
+}
+
+static int drxk_set_parameters(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+ u32 IF;
+
+ dprintk(1, "\n");
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ if (fe->ops.tuner_ops.set_params)
+ fe->ops.tuner_ops.set_params(fe, p);
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ state->param = *p;
+ fe->ops.tuner_ops.get_frequency(fe, &IF);
+ Start(state, 0, IF);
+
+ /* printk(KERN_DEBUG "drxk: %s IF=%d done\n", __func__, IF); */
+
+ return 0;
+}
+
+static int drxk_c_get_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ dprintk(1, "\n");
+ return 0;
+}
+
+static int drxk_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+ u32 stat;
+
+ dprintk(1, "\n");
+ *status = 0;
+ GetLockStatus(state, &stat, 0);
+ if (stat == MPEG_LOCK)
+ *status |= 0x1f;
+ if (stat == FEC_LOCK)
+ *status |= 0x0f;
+ if (stat == DEMOD_LOCK)
+ *status |= 0x07;
+ return 0;
+}
+
+static int drxk_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ dprintk(1, "\n");
+
+ *ber = 0;
+ return 0;
+}
+
+static int drxk_read_signal_strength(struct dvb_frontend *fe,
+ u16 *strength)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+ u32 val = 0;
+
+ dprintk(1, "\n");
+ ReadIFAgc(state, &val);
+ *strength = val & 0xffff;
+ return 0;
+}
+
+static int drxk_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+ s32 snr2;
+
+ dprintk(1, "\n");
+ GetSignalToNoise(state, &snr2);
+ *snr = snr2 & 0xffff;
+ return 0;
+}
+
+static int drxk_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+ u16 err;
+
+ dprintk(1, "\n");
+ DVBTQAMGetAccPktErr(state, &err);
+ *ucblocks = (u32) err;
+ return 0;
+}
+
+static int drxk_c_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings
+ *sets)
+{
+ dprintk(1, "\n");
+ sets->min_delay_ms = 3000;
+ sets->max_drift = 0;
+ sets->step_size = 0;
+ return 0;
+}
+
+static void drxk_t_release(struct dvb_frontend *fe)
+{
+ /*
+ * There's nothing to release here, as the state struct
+ * is already freed by drxk_c_release.
+ */
+}
+
+static int drxk_t_init(struct dvb_frontend *fe)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+
+ dprintk(1, "\n");
+ if (mutex_trylock(&state->ctlock) == 0)
+ return -EBUSY;
+ SetOperationMode(state, OM_DVBT);
+ return 0;
+}
+
+static int drxk_t_sleep(struct dvb_frontend *fe)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+
+ dprintk(1, "\n");
+ mutex_unlock(&state->ctlock);
+ return 0;
+}
+
+static int drxk_t_get_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ dprintk(1, "\n");
+
+ return 0;
+}
+
+static struct dvb_frontend_ops drxk_c_ops = {
+ .info = {
+ .name = "DRXK DVB-C",
+ .type = FE_QAM,
+ .frequency_stepsize = 62500,
+ .frequency_min = 47000000,
+ .frequency_max = 862000000,
+ .symbol_rate_min = 870000,
+ .symbol_rate_max = 11700000,
+ .caps = FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 |
+ FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_FEC_AUTO},
+ .release = drxk_c_release,
+ .init = drxk_c_init,
+ .sleep = drxk_c_sleep,
+ .i2c_gate_ctrl = drxk_gate_ctrl,
+
+ .set_frontend = drxk_set_parameters,
+ .get_frontend = drxk_c_get_frontend,
+ .get_tune_settings = drxk_c_get_tune_settings,
+
+ .read_status = drxk_read_status,
+ .read_ber = drxk_read_ber,
+ .read_signal_strength = drxk_read_signal_strength,
+ .read_snr = drxk_read_snr,
+ .read_ucblocks = drxk_read_ucblocks,
+};
+
+static struct dvb_frontend_ops drxk_t_ops = {
+ .info = {
+ .name = "DRXK DVB-T",
+ .type = FE_OFDM,
+ .frequency_min = 47125000,
+ .frequency_max = 865000000,
+ .frequency_stepsize = 166667,
+ .frequency_tolerance = 0,
+ .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QAM_16 | FE_CAN_QAM_64 |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_HIERARCHY_AUTO | FE_CAN_RECOVER | FE_CAN_MUTE_TS},
+ .release = drxk_t_release,
+ .init = drxk_t_init,
+ .sleep = drxk_t_sleep,
+ .i2c_gate_ctrl = drxk_gate_ctrl,
+
+ .set_frontend = drxk_set_parameters,
+ .get_frontend = drxk_t_get_frontend,
+
+ .read_status = drxk_read_status,
+ .read_ber = drxk_read_ber,
+ .read_signal_strength = drxk_read_signal_strength,
+ .read_snr = drxk_read_snr,
+ .read_ucblocks = drxk_read_ucblocks,
+};
+
+struct dvb_frontend *drxk_attach(const struct drxk_config *config,
+ struct i2c_adapter *i2c,
+ struct dvb_frontend **fe_t)
+{
+ struct drxk_state *state = NULL;
+ u8 adr = config->adr;
+
+ dprintk(1, "\n");
+ state = kzalloc(sizeof(struct drxk_state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ state->i2c = i2c;
+ state->demod_address = adr;
+ state->single_master = config->single_master;
+ state->microcode_name = config->microcode_name;
+ state->no_i2c_bridge = config->no_i2c_bridge;
+ state->antenna_gpio = config->antenna_gpio;
+ state->antenna_dvbt = config->antenna_dvbt;
+
+ /* NOTE: as more UIO bits will be used, add them to the mask */
+ state->UIO_mask = config->antenna_gpio;
+
+ /* Default gpio to DVB-C */
+ if (!state->antenna_dvbt && state->antenna_gpio)
+ state->m_GPIO |= state->antenna_gpio;
+ else
+ state->m_GPIO &= ~state->antenna_gpio;
+
+ mutex_init(&state->mutex);
+ mutex_init(&state->ctlock);
+
+ memcpy(&state->c_frontend.ops, &drxk_c_ops,
+ sizeof(struct dvb_frontend_ops));
+ memcpy(&state->t_frontend.ops, &drxk_t_ops,
+ sizeof(struct dvb_frontend_ops));
+ state->c_frontend.demodulator_priv = state;
+ state->t_frontend.demodulator_priv = state;
+
+ init_state(state);
+ if (init_drxk(state) < 0)
+ goto error;
+ *fe_t = &state->t_frontend;
+
+ return &state->c_frontend;
+
+error:
+ printk(KERN_ERR "drxk: not found\n");
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL(drxk_attach);
+
+MODULE_DESCRIPTION("DRX-K driver");
+MODULE_AUTHOR("Ralph Metzler");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/drxk_hard.h b/drivers/media/dvb/frontends/drxk_hard.h
new file mode 100644
index 0000000..a05c32e
--- /dev/null
+++ b/drivers/media/dvb/frontends/drxk_hard.h
@@ -0,0 +1,348 @@
+#include "drxk_map.h"
+
+#define DRXK_VERSION_MAJOR 0
+#define DRXK_VERSION_MINOR 9
+#define DRXK_VERSION_PATCH 4300
+
+#define HI_I2C_DELAY 42
+#define HI_I2C_BRIDGE_DELAY 350
+#define DRXK_MAX_RETRIES 100
+
+#define DRIVER_4400 1
+
+#define DRXX_JTAGID 0x039210D9
+#define DRXX_J_JTAGID 0x239310D9
+#define DRXX_K_JTAGID 0x039210D9
+
+#define DRX_UNKNOWN 254
+#define DRX_AUTO 255
+
+#define DRX_SCU_READY 0
+#define DRXK_MAX_WAITTIME (200)
+#define SCU_RESULT_OK 0
+#define SCU_RESULT_SIZE -4
+#define SCU_RESULT_INVPAR -3
+#define SCU_RESULT_UNKSTD -2
+#define SCU_RESULT_UNKCMD -1
+
+#ifndef DRXK_OFDM_TR_SHUTDOWN_TIMEOUT
+#define DRXK_OFDM_TR_SHUTDOWN_TIMEOUT (200)
+#endif
+
+#define DRXK_8VSB_MPEG_BIT_RATE 19392658UL /*bps*/
+#define DRXK_DVBT_MPEG_BIT_RATE 32000000UL /*bps*/
+#define DRXK_QAM16_MPEG_BIT_RATE 27000000UL /*bps*/
+#define DRXK_QAM32_MPEG_BIT_RATE 33000000UL /*bps*/
+#define DRXK_QAM64_MPEG_BIT_RATE 40000000UL /*bps*/
+#define DRXK_QAM128_MPEG_BIT_RATE 46000000UL /*bps*/
+#define DRXK_QAM256_MPEG_BIT_RATE 52000000UL /*bps*/
+#define DRXK_MAX_MPEG_BIT_RATE 52000000UL /*bps*/
+
+#define IQM_CF_OUT_ENA_OFDM__M 0x4
+#define IQM_FS_ADJ_SEL_B_QAM 0x1
+#define IQM_FS_ADJ_SEL_B_OFF 0x0
+#define IQM_FS_ADJ_SEL_B_VSB 0x2
+#define IQM_RC_ADJ_SEL_B_OFF 0x0
+#define IQM_RC_ADJ_SEL_B_QAM 0x1
+#define IQM_RC_ADJ_SEL_B_VSB 0x2
+
+enum OperationMode {
+ OM_NONE,
+ OM_QAM_ITU_A,
+ OM_QAM_ITU_B,
+ OM_QAM_ITU_C,
+ OM_DVBT
+};
+
+enum DRXPowerMode {
+ DRX_POWER_UP = 0,
+ DRX_POWER_MODE_1,
+ DRX_POWER_MODE_2,
+ DRX_POWER_MODE_3,
+ DRX_POWER_MODE_4,
+ DRX_POWER_MODE_5,
+ DRX_POWER_MODE_6,
+ DRX_POWER_MODE_7,
+ DRX_POWER_MODE_8,
+
+ DRX_POWER_MODE_9,
+ DRX_POWER_MODE_10,
+ DRX_POWER_MODE_11,
+ DRX_POWER_MODE_12,
+ DRX_POWER_MODE_13,
+ DRX_POWER_MODE_14,
+ DRX_POWER_MODE_15,
+ DRX_POWER_MODE_16,
+ DRX_POWER_DOWN = 255
+};
+
+
+/** /brief Intermediate power mode for DRXK, power down OFDM clock domain */
+#ifndef DRXK_POWER_DOWN_OFDM
+#define DRXK_POWER_DOWN_OFDM DRX_POWER_MODE_1
+#endif
+
+/** /brief Intermediate power mode for DRXK, power down core (sysclk) */
+#ifndef DRXK_POWER_DOWN_CORE
+#define DRXK_POWER_DOWN_CORE DRX_POWER_MODE_9
+#endif
+
+/** /brief Intermediate power mode for DRXK, power down pll (only osc runs) */
+#ifndef DRXK_POWER_DOWN_PLL
+#define DRXK_POWER_DOWN_PLL DRX_POWER_MODE_10
+#endif
+
+
+enum AGC_CTRL_MODE { DRXK_AGC_CTRL_AUTO = 0, DRXK_AGC_CTRL_USER, DRXK_AGC_CTRL_OFF };
+enum EDrxkState { DRXK_UNINITIALIZED = 0, DRXK_STOPPED, DRXK_DTV_STARTED, DRXK_ATV_STARTED, DRXK_POWERED_DOWN };
+enum EDrxkCoefArrayIndex {
+ DRXK_COEF_IDX_MN = 0,
+ DRXK_COEF_IDX_FM ,
+ DRXK_COEF_IDX_L ,
+ DRXK_COEF_IDX_LP ,
+ DRXK_COEF_IDX_BG ,
+ DRXK_COEF_IDX_DK ,
+ DRXK_COEF_IDX_I ,
+ DRXK_COEF_IDX_MAX
+};
+enum EDrxkSifAttenuation {
+ DRXK_SIF_ATTENUATION_0DB,
+ DRXK_SIF_ATTENUATION_3DB,
+ DRXK_SIF_ATTENUATION_6DB,
+ DRXK_SIF_ATTENUATION_9DB
+};
+enum EDrxkConstellation {
+ DRX_CONSTELLATION_BPSK = 0,
+ DRX_CONSTELLATION_QPSK,
+ DRX_CONSTELLATION_PSK8,
+ DRX_CONSTELLATION_QAM16,
+ DRX_CONSTELLATION_QAM32,
+ DRX_CONSTELLATION_QAM64,
+ DRX_CONSTELLATION_QAM128,
+ DRX_CONSTELLATION_QAM256,
+ DRX_CONSTELLATION_QAM512,
+ DRX_CONSTELLATION_QAM1024,
+ DRX_CONSTELLATION_UNKNOWN = DRX_UNKNOWN,
+ DRX_CONSTELLATION_AUTO = DRX_AUTO
+};
+enum EDrxkInterleaveMode {
+ DRXK_QAM_I12_J17 = 16,
+ DRXK_QAM_I_UNKNOWN = DRX_UNKNOWN
+};
+enum {
+ DRXK_SPIN_A1 = 0,
+ DRXK_SPIN_A2,
+ DRXK_SPIN_A3,
+ DRXK_SPIN_UNKNOWN
+};
+
+enum DRXKCfgDvbtSqiSpeed {
+ DRXK_DVBT_SQI_SPEED_FAST = 0,
+ DRXK_DVBT_SQI_SPEED_MEDIUM,
+ DRXK_DVBT_SQI_SPEED_SLOW,
+ DRXK_DVBT_SQI_SPEED_UNKNOWN = DRX_UNKNOWN
+} ;
+
+enum DRXFftmode_t {
+ DRX_FFTMODE_2K = 0,
+ DRX_FFTMODE_4K,
+ DRX_FFTMODE_8K,
+ DRX_FFTMODE_UNKNOWN = DRX_UNKNOWN,
+ DRX_FFTMODE_AUTO = DRX_AUTO
+};
+
+enum DRXMPEGStrWidth_t {
+ DRX_MPEG_STR_WIDTH_1,
+ DRX_MPEG_STR_WIDTH_8
+};
+
+enum DRXQamLockRange_t {
+ DRX_QAM_LOCKRANGE_NORMAL,
+ DRX_QAM_LOCKRANGE_EXTENDED
+};
+
+struct DRXKCfgDvbtEchoThres_t {
+ u16 threshold;
+ enum DRXFftmode_t fftMode;
+} ;
+
+struct SCfgAgc {
+ enum AGC_CTRL_MODE ctrlMode; /* off, user, auto */
+ u16 outputLevel; /* range dependent on AGC */
+ u16 minOutputLevel; /* range dependent on AGC */
+ u16 maxOutputLevel; /* range dependent on AGC */
+ u16 speed; /* range dependent on AGC */
+ u16 top; /* rf-agc take over point */
+ u16 cutOffCurrent; /* rf-agc is accelerated if output current
+ is below cut-off current */
+ u16 IngainTgtMax;
+ u16 FastClipCtrlDelay;
+};
+
+struct SCfgPreSaw {
+ u16 reference; /* pre SAW reference value, range 0 .. 31 */
+ bool usePreSaw; /* TRUE algorithms must use pre SAW sense */
+};
+
+struct DRXKOfdmScCmd_t {
+ u16 cmd; /**< Command number */
+ u16 subcmd; /**< Sub-command parameter*/
+ u16 param0; /**< General purpous param */
+ u16 param1; /**< General purpous param */
+ u16 param2; /**< General purpous param */
+ u16 param3; /**< General purpous param */
+ u16 param4; /**< General purpous param */
+};
+
+struct drxk_state {
+ struct dvb_frontend c_frontend;
+ struct dvb_frontend t_frontend;
+ struct dvb_frontend_parameters param;
+ struct device *dev;
+
+ struct i2c_adapter *i2c;
+ u8 demod_address;
+ void *priv;
+
+ struct mutex mutex;
+ struct mutex ctlock;
+
+ u32 m_Instance; /**< Channel 1,2,3 or 4 */
+
+ int m_ChunkSize;
+ u8 Chunk[256];
+
+ bool m_hasLNA;
+ bool m_hasDVBT;
+ bool m_hasDVBC;
+ bool m_hasAudio;
+ bool m_hasATV;
+ bool m_hasOOB;
+ bool m_hasSAWSW; /**< TRUE if mat_tx is available */
+ bool m_hasGPIO1; /**< TRUE if mat_rx is available */
+ bool m_hasGPIO2; /**< TRUE if GPIO is available */
+ bool m_hasIRQN; /**< TRUE if IRQN is available */
+ u16 m_oscClockFreq;
+ u16 m_HICfgTimingDiv;
+ u16 m_HICfgBridgeDelay;
+ u16 m_HICfgWakeUpKey;
+ u16 m_HICfgTimeout;
+ u16 m_HICfgCtrl;
+ s32 m_sysClockFreq; /**< system clock frequency in kHz */
+
+ enum EDrxkState m_DrxkState; /**< State of Drxk (init,stopped,started) */
+ enum OperationMode m_OperationMode; /**< digital standards */
+ struct SCfgAgc m_vsbRfAgcCfg; /**< settings for VSB RF-AGC */
+ struct SCfgAgc m_vsbIfAgcCfg; /**< settings for VSB IF-AGC */
+ u16 m_vsbPgaCfg; /**< settings for VSB PGA */
+ struct SCfgPreSaw m_vsbPreSawCfg; /**< settings for pre SAW sense */
+ s32 m_Quality83percent; /**< MER level (*0.1 dB) for 83% quality indication */
+ s32 m_Quality93percent; /**< MER level (*0.1 dB) for 93% quality indication */
+ bool m_smartAntInverted;
+ bool m_bDebugEnableBridge;
+ bool m_bPDownOpenBridge; /**< only open DRXK bridge before power-down once it has been accessed */
+ bool m_bPowerDown; /**< Power down when not used */
+
+ u32 m_IqmFsRateOfs; /**< frequency shift as written to DRXK register (28bit fixpoint) */
+
+ bool m_enableMPEGOutput; /**< If TRUE, enable MPEG output */
+ bool m_insertRSByte; /**< If TRUE, insert RS byte */
+ bool m_enableParallel; /**< If TRUE, parallel out otherwise serial */
+ bool m_invertDATA; /**< If TRUE, invert DATA signals */
+ bool m_invertERR; /**< If TRUE, invert ERR signal */
+ bool m_invertSTR; /**< If TRUE, invert STR signals */
+ bool m_invertVAL; /**< If TRUE, invert VAL signals */
+ bool m_invertCLK; /**< If TRUE, invert CLK signals */
+ bool m_DVBCStaticCLK;
+ bool m_DVBTStaticCLK; /**< If TRUE, static MPEG clockrate will
+ be used, otherwise clockrate will
+ adapt to the bitrate of the TS */
+ u32 m_DVBTBitrate;
+ u32 m_DVBCBitrate;
+
+ u8 m_TSDataStrength;
+ u8 m_TSClockkStrength;
+
+ enum DRXMPEGStrWidth_t m_widthSTR; /**< MPEG start width */
+ u32 m_mpegTsStaticBitrate; /**< Maximum bitrate in b/s in case
+ static clockrate is selected */
+
+ /* LARGE_INTEGER m_StartTime; */ /**< Contains the time of the last demod start */
+ s32 m_MpegLockTimeOut; /**< WaitForLockStatus Timeout (counts from start time) */
+ s32 m_DemodLockTimeOut; /**< WaitForLockStatus Timeout (counts from start time) */
+
+ bool m_disableTEIhandling;
+
+ bool m_RfAgcPol;
+ bool m_IfAgcPol;
+
+ struct SCfgAgc m_atvRfAgcCfg; /**< settings for ATV RF-AGC */
+ struct SCfgAgc m_atvIfAgcCfg; /**< settings for ATV IF-AGC */
+ struct SCfgPreSaw m_atvPreSawCfg; /**< settings for ATV pre SAW sense */
+ bool m_phaseCorrectionBypass;
+ s16 m_atvTopVidPeak;
+ u16 m_atvTopNoiseTh;
+ enum EDrxkSifAttenuation m_sifAttenuation;
+ bool m_enableCVBSOutput;
+ bool m_enableSIFOutput;
+ bool m_bMirrorFreqSpect;
+ enum EDrxkConstellation m_Constellation; /**< Constellation type of the channel */
+ u32 m_CurrSymbolRate; /**< Current QAM symbol rate */
+ struct SCfgAgc m_qamRfAgcCfg; /**< settings for QAM RF-AGC */
+ struct SCfgAgc m_qamIfAgcCfg; /**< settings for QAM IF-AGC */
+ u16 m_qamPgaCfg; /**< settings for QAM PGA */
+ struct SCfgPreSaw m_qamPreSawCfg; /**< settings for QAM pre SAW sense */
+ enum EDrxkInterleaveMode m_qamInterleaveMode; /**< QAM Interleave mode */
+ u16 m_fecRsPlen;
+ u16 m_fecRsPrescale;
+
+ enum DRXKCfgDvbtSqiSpeed m_sqiSpeed;
+
+ u16 m_GPIO;
+ u16 m_GPIOCfg;
+
+ struct SCfgAgc m_dvbtRfAgcCfg; /**< settings for QAM RF-AGC */
+ struct SCfgAgc m_dvbtIfAgcCfg; /**< settings for QAM IF-AGC */
+ struct SCfgPreSaw m_dvbtPreSawCfg; /**< settings for QAM pre SAW sense */
+
+ u16 m_agcFastClipCtrlDelay;
+ bool m_adcCompPassed;
+ u16 m_adcCompCoef[64];
+ u16 m_adcState;
+
+ u8 *m_microcode;
+ int m_microcode_length;
+ bool m_DRXK_A1_PATCH_CODE;
+ bool m_DRXK_A1_ROM_CODE;
+ bool m_DRXK_A2_ROM_CODE;
+ bool m_DRXK_A3_ROM_CODE;
+ bool m_DRXK_A2_PATCH_CODE;
+ bool m_DRXK_A3_PATCH_CODE;
+
+ bool m_rfmirror;
+ u8 m_deviceSpin;
+ u32 m_iqmRcRate;
+
+ enum DRXPowerMode m_currentPowerMode;
+
+ /*
+ * Configurable parameters at the driver. They stores the values found
+ * at struct drxk_config.
+ */
+
+ u16 UIO_mask; /* Bits used by UIO */
+
+ bool single_master;
+ bool no_i2c_bridge;
+ bool antenna_dvbt;
+ u16 antenna_gpio;
+
+ const char *microcode_name;
+};
+
+#define NEVER_LOCK 0
+#define NOT_LOCKED 1
+#define DEMOD_LOCK 2
+#define FEC_LOCK 3
+#define MPEG_LOCK 4
+
diff --git a/drivers/media/dvb/frontends/drxk_map.h b/drivers/media/dvb/frontends/drxk_map.h
new file mode 100644
index 0000000..9b11a83
--- /dev/null
+++ b/drivers/media/dvb/frontends/drxk_map.h
@@ -0,0 +1,449 @@
+#define AUD_COMM_EXEC__A 0x1000000
+#define AUD_COMM_EXEC_STOP 0x0
+#define FEC_COMM_EXEC__A 0x1C00000
+#define FEC_COMM_EXEC_STOP 0x0
+#define FEC_COMM_EXEC_ACTIVE 0x1
+#define FEC_DI_COMM_EXEC__A 0x1C20000
+#define FEC_DI_COMM_EXEC_STOP 0x0
+#define FEC_DI_INPUT_CTL__A 0x1C20016
+#define FEC_RS_COMM_EXEC__A 0x1C30000
+#define FEC_RS_COMM_EXEC_STOP 0x0
+#define FEC_RS_MEASUREMENT_PERIOD__A 0x1C30012
+#define FEC_RS_MEASUREMENT_PRESCALE__A 0x1C30013
+#define FEC_OC_MODE__A 0x1C40011
+#define FEC_OC_MODE_PARITY__M 0x1
+#define FEC_OC_DTO_MODE__A 0x1C40014
+#define FEC_OC_DTO_MODE_DYNAMIC__M 0x1
+#define FEC_OC_DTO_MODE_OFFSET_ENABLE__M 0x4
+#define FEC_OC_DTO_PERIOD__A 0x1C40015
+#define FEC_OC_DTO_BURST_LEN__A 0x1C40018
+#define FEC_OC_FCT_MODE__A 0x1C4001A
+#define FEC_OC_FCT_MODE__PRE 0x0
+#define FEC_OC_FCT_MODE_RAT_ENA__M 0x1
+#define FEC_OC_FCT_MODE_VIRT_ENA__M 0x2
+#define FEC_OC_TMD_MODE__A 0x1C4001E
+#define FEC_OC_TMD_COUNT__A 0x1C4001F
+#define FEC_OC_TMD_HI_MARGIN__A 0x1C40020
+#define FEC_OC_TMD_LO_MARGIN__A 0x1C40021
+#define FEC_OC_TMD_INT_UPD_RATE__A 0x1C40023
+#define FEC_OC_AVR_PARM_A__A 0x1C40026
+#define FEC_OC_AVR_PARM_B__A 0x1C40027
+#define FEC_OC_RCN_GAIN__A 0x1C4002E
+#define FEC_OC_RCN_CTL_RATE_LO__A 0x1C40030
+#define FEC_OC_RCN_CTL_STEP_LO__A 0x1C40032
+#define FEC_OC_RCN_CTL_STEP_HI__A 0x1C40033
+#define FEC_OC_SNC_MODE__A 0x1C40040
+#define FEC_OC_SNC_MODE_SHUTDOWN__M 0x10
+#define FEC_OC_SNC_LWM__A 0x1C40041
+#define FEC_OC_SNC_HWM__A 0x1C40042
+#define FEC_OC_SNC_UNLOCK__A 0x1C40043
+#define FEC_OC_SNC_FAIL_PERIOD__A 0x1C40046
+#define FEC_OC_IPR_MODE__A 0x1C40048
+#define FEC_OC_IPR_MODE_SERIAL__M 0x1
+#define FEC_OC_IPR_MODE_MCLK_DIS_DAT_ABS__M 0x4
+#define FEC_OC_IPR_MODE_MVAL_DIS_PAR__M 0x10
+#define FEC_OC_IPR_INVERT__A 0x1C40049
+#define FEC_OC_IPR_INVERT_MD0__M 0x1
+#define FEC_OC_IPR_INVERT_MD1__M 0x2
+#define FEC_OC_IPR_INVERT_MD2__M 0x4
+#define FEC_OC_IPR_INVERT_MD3__M 0x8
+#define FEC_OC_IPR_INVERT_MD4__M 0x10
+#define FEC_OC_IPR_INVERT_MD5__M 0x20
+#define FEC_OC_IPR_INVERT_MD6__M 0x40
+#define FEC_OC_IPR_INVERT_MD7__M 0x80
+#define FEC_OC_IPR_INVERT_MERR__M 0x100
+#define FEC_OC_IPR_INVERT_MSTRT__M 0x200
+#define FEC_OC_IPR_INVERT_MVAL__M 0x400
+#define FEC_OC_IPR_INVERT_MCLK__M 0x800
+#define FEC_OC_OCR_INVERT__A 0x1C40052
+#define IQM_COMM_EXEC__A 0x1800000
+#define IQM_COMM_EXEC_B_STOP 0x0
+#define IQM_COMM_EXEC_B_ACTIVE 0x1
+#define IQM_FS_RATE_OFS_LO__A 0x1820010
+#define IQM_FS_ADJ_SEL__A 0x1820014
+#define IQM_FS_ADJ_SEL_B_OFF 0x0
+#define IQM_FS_ADJ_SEL_B_QAM 0x1
+#define IQM_FS_ADJ_SEL_B_VSB 0x2
+#define IQM_FD_RATESEL__A 0x1830010
+#define IQM_RC_RATE_OFS_LO__A 0x1840010
+#define IQM_RC_RATE_OFS_LO__W 16
+#define IQM_RC_RATE_OFS_LO__M 0xFFFF
+#define IQM_RC_RATE_OFS_HI__M 0xFF
+#define IQM_RC_ADJ_SEL__A 0x1840014
+#define IQM_RC_ADJ_SEL_B_OFF 0x0
+#define IQM_RC_ADJ_SEL_B_QAM 0x1
+#define IQM_RC_ADJ_SEL_B_VSB 0x2
+#define IQM_RC_STRETCH__A 0x1840016
+#define IQM_CF_COMM_INT_MSK__A 0x1860006
+#define IQM_CF_SYMMETRIC__A 0x1860010
+#define IQM_CF_MIDTAP__A 0x1860011
+#define IQM_CF_MIDTAP_RE__B 0
+#define IQM_CF_MIDTAP_IM__B 1
+#define IQM_CF_OUT_ENA__A 0x1860012
+#define IQM_CF_OUT_ENA_QAM__B 1
+#define IQM_CF_OUT_ENA_OFDM__M 0x4
+#define IQM_CF_ADJ_SEL__A 0x1860013
+#define IQM_CF_SCALE__A 0x1860014
+#define IQM_CF_SCALE_SH__A 0x1860015
+#define IQM_CF_SCALE_SH__PRE 0x0
+#define IQM_CF_POW_MEAS_LEN__A 0x1860017
+#define IQM_CF_DS_ENA__A 0x1860019
+#define IQM_CF_TAP_RE0__A 0x1860020
+#define IQM_CF_TAP_IM0__A 0x1860040
+#define IQM_CF_CLP_VAL__A 0x1860060
+#define IQM_CF_DATATH__A 0x1860061
+#define IQM_CF_PKDTH__A 0x1860062
+#define IQM_CF_WND_LEN__A 0x1860063
+#define IQM_CF_DET_LCT__A 0x1860064
+#define IQM_CF_BYPASSDET__A 0x1860067
+#define IQM_AF_COMM_EXEC__A 0x1870000
+#define IQM_AF_COMM_EXEC_ACTIVE 0x1
+#define IQM_AF_CLKNEG__A 0x1870012
+#define IQM_AF_CLKNEG_CLKNEGDATA__M 0x2
+#define IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_POS 0x0
+#define IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_NEG 0x2
+#define IQM_AF_START_LOCK__A 0x187001B
+#define IQM_AF_PHASE0__A 0x187001C
+#define IQM_AF_PHASE1__A 0x187001D
+#define IQM_AF_PHASE2__A 0x187001E
+#define IQM_AF_CLP_LEN__A 0x1870023
+#define IQM_AF_CLP_TH__A 0x1870024
+#define IQM_AF_SNS_LEN__A 0x1870026
+#define IQM_AF_AGC_IF__A 0x1870028
+#define IQM_AF_AGC_RF__A 0x1870029
+#define IQM_AF_PDREF__A 0x187002B
+#define IQM_AF_PDREF__M 0x1F
+#define IQM_AF_STDBY__A 0x187002C
+#define IQM_AF_STDBY_STDBY_ADC_STANDBY 0x2
+#define IQM_AF_STDBY_STDBY_AMP_STANDBY 0x4
+#define IQM_AF_STDBY_STDBY_PD_STANDBY 0x8
+#define IQM_AF_STDBY_STDBY_TAGC_IF_STANDBY 0x10
+#define IQM_AF_STDBY_STDBY_TAGC_RF_STANDBY 0x20
+#define IQM_AF_AMUX__A 0x187002D
+#define IQM_AF_AMUX_SIGNAL2ADC 0x1
+#define IQM_AF_UPD_SEL__A 0x187002F
+#define IQM_AF_INC_LCT__A 0x1870034
+#define IQM_AF_INC_BYPASS__A 0x1870036
+#define OFDM_CP_COMM_EXEC__A 0x2800000
+#define OFDM_CP_COMM_EXEC_STOP 0x0
+#define OFDM_EC_SB_PRIOR__A 0x3410013
+#define OFDM_EC_SB_PRIOR_HI 0x0
+#define OFDM_EC_SB_PRIOR_LO 0x1
+#define OFDM_EQ_TOP_TD_TPS_CONST__A 0x3010054
+#define OFDM_EQ_TOP_TD_TPS_CONST__M 0x3
+#define OFDM_EQ_TOP_TD_TPS_CONST_64QAM 0x2
+#define OFDM_EQ_TOP_TD_TPS_CODE_HP__A 0x3010056
+#define OFDM_EQ_TOP_TD_TPS_CODE_HP__M 0x7
+#define OFDM_EQ_TOP_TD_TPS_CODE_LP_7_8 0x4
+#define OFDM_EQ_TOP_TD_SQR_ERR_I__A 0x301005E
+#define OFDM_EQ_TOP_TD_SQR_ERR_Q__A 0x301005F
+#define OFDM_EQ_TOP_TD_SQR_ERR_EXP__A 0x3010060
+#define OFDM_EQ_TOP_TD_REQ_SMB_CNT__A 0x3010061
+#define OFDM_EQ_TOP_TD_TPS_PWR_OFS__A 0x3010062
+#define OFDM_LC_COMM_EXEC__A 0x3800000
+#define OFDM_LC_COMM_EXEC_STOP 0x0
+#define OFDM_SC_COMM_EXEC__A 0x3C00000
+#define OFDM_SC_COMM_EXEC_STOP 0x0
+#define OFDM_SC_COMM_STATE__A 0x3C00001
+#define OFDM_SC_RA_RAM_PARAM0__A 0x3C20040
+#define OFDM_SC_RA_RAM_PARAM1__A 0x3C20041
+#define OFDM_SC_RA_RAM_CMD_ADDR__A 0x3C20042
+#define OFDM_SC_RA_RAM_CMD__A 0x3C20043
+#define OFDM_SC_RA_RAM_CMD_NULL 0x0
+#define OFDM_SC_RA_RAM_CMD_PROC_START 0x1
+#define OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM 0x3
+#define OFDM_SC_RA_RAM_CMD_PROGRAM_PARAM 0x4
+#define OFDM_SC_RA_RAM_CMD_GET_OP_PARAM 0x5
+#define OFDM_SC_RA_RAM_CMD_USER_IO 0x6
+#define OFDM_SC_RA_RAM_CMD_SET_TIMER 0x7
+#define OFDM_SC_RA_RAM_CMD_SET_ECHO_TIMING 0x8
+#define OFDM_SC_RA_RAM_SW_EVENT_RUN_NMASK__M 0x1
+#define OFDM_SC_RA_RAM_LOCKTRACK_MIN 0x1
+#define OFDM_SC_RA_RAM_OP_PARAM__A 0x3C20048
+#define OFDM_SC_RA_RAM_OP_PARAM_MODE__M 0x3
+#define OFDM_SC_RA_RAM_OP_PARAM_MODE_2K 0x0
+#define OFDM_SC_RA_RAM_OP_PARAM_MODE_8K 0x1
+#define OFDM_SC_RA_RAM_OP_PARAM_GUARD_32 0x0
+#define OFDM_SC_RA_RAM_OP_PARAM_GUARD_16 0x4
+#define OFDM_SC_RA_RAM_OP_PARAM_GUARD_8 0x8
+#define OFDM_SC_RA_RAM_OP_PARAM_GUARD_4 0xC
+#define OFDM_SC_RA_RAM_OP_PARAM_CONST_QPSK 0x0
+#define OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM16 0x10
+#define OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM64 0x20
+#define OFDM_SC_RA_RAM_OP_PARAM_HIER_NO 0x0
+#define OFDM_SC_RA_RAM_OP_PARAM_HIER_A1 0x40
+#define OFDM_SC_RA_RAM_OP_PARAM_HIER_A2 0x80
+#define OFDM_SC_RA_RAM_OP_PARAM_HIER_A4 0xC0
+#define OFDM_SC_RA_RAM_OP_PARAM_RATE_1_2 0x0
+#define OFDM_SC_RA_RAM_OP_PARAM_RATE_2_3 0x200
+#define OFDM_SC_RA_RAM_OP_PARAM_RATE_3_4 0x400
+#define OFDM_SC_RA_RAM_OP_PARAM_RATE_5_6 0x600
+#define OFDM_SC_RA_RAM_OP_PARAM_RATE_7_8 0x800
+#define OFDM_SC_RA_RAM_OP_PARAM_PRIO_HI 0x0
+#define OFDM_SC_RA_RAM_OP_PARAM_PRIO_LO 0x1000
+#define OFDM_SC_RA_RAM_OP_AUTO_MODE__M 0x1
+#define OFDM_SC_RA_RAM_OP_AUTO_GUARD__M 0x2
+#define OFDM_SC_RA_RAM_OP_AUTO_CONST__M 0x4
+#define OFDM_SC_RA_RAM_OP_AUTO_HIER__M 0x8
+#define OFDM_SC_RA_RAM_OP_AUTO_RATE__M 0x10
+#define OFDM_SC_RA_RAM_LOCK__A 0x3C2004B
+#define OFDM_SC_RA_RAM_LOCK_DEMOD__M 0x1
+#define OFDM_SC_RA_RAM_LOCK_FEC__M 0x2
+#define OFDM_SC_RA_RAM_LOCK_MPEG__M 0x4
+#define OFDM_SC_RA_RAM_LOCK_NODVBT__M 0x8
+#define OFDM_SC_RA_RAM_BE_OPT_DELAY__A 0x3C2004D
+#define OFDM_SC_RA_RAM_BE_OPT_INIT_DELAY__A 0x3C2004E
+#define OFDM_SC_RA_RAM_ECHO_THRES__A 0x3C2004F
+#define OFDM_SC_RA_RAM_ECHO_THRES_8K__B 0
+#define OFDM_SC_RA_RAM_ECHO_THRES_8K__M 0xFF
+#define OFDM_SC_RA_RAM_ECHO_THRES_2K__B 8
+#define OFDM_SC_RA_RAM_ECHO_THRES_2K__M 0xFF00
+#define OFDM_SC_RA_RAM_CONFIG__A 0x3C20050
+#define OFDM_SC_RA_RAM_CONFIG_NE_FIX_ENABLE__M 0x800
+#define OFDM_SC_RA_RAM_FR_THRES_8K__A 0x3C2007D
+#define OFDM_SC_RA_RAM_NI_INIT_2K_PER_LEFT__A 0x3C200E0
+#define OFDM_SC_RA_RAM_NI_INIT_2K_PER_RIGHT__A 0x3C200E1
+#define OFDM_SC_RA_RAM_NI_INIT_8K_PER_LEFT__A 0x3C200E3
+#define OFDM_SC_RA_RAM_NI_INIT_8K_PER_RIGHT__A 0x3C200E4
+#define OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A 0x3C200F8
+#define QAM_COMM_EXEC__A 0x1400000
+#define QAM_COMM_EXEC_STOP 0x0
+#define QAM_COMM_EXEC_ACTIVE 0x1
+#define QAM_TOP_ANNEX_A 0x0
+#define QAM_TOP_ANNEX_C 0x2
+#define QAM_SL_ERR_POWER__A 0x1430017
+#define QAM_DQ_QUAL_FUN0__A 0x1440018
+#define QAM_DQ_QUAL_FUN1__A 0x1440019
+#define QAM_DQ_QUAL_FUN2__A 0x144001A
+#define QAM_DQ_QUAL_FUN3__A 0x144001B
+#define QAM_DQ_QUAL_FUN4__A 0x144001C
+#define QAM_DQ_QUAL_FUN5__A 0x144001D
+#define QAM_LC_MODE__A 0x1450010
+#define QAM_LC_QUAL_TAB0__A 0x1450018
+#define QAM_LC_QUAL_TAB1__A 0x1450019
+#define QAM_LC_QUAL_TAB2__A 0x145001A
+#define QAM_LC_QUAL_TAB3__A 0x145001B
+#define QAM_LC_QUAL_TAB4__A 0x145001C
+#define QAM_LC_QUAL_TAB5__A 0x145001D
+#define QAM_LC_QUAL_TAB6__A 0x145001E
+#define QAM_LC_QUAL_TAB8__A 0x145001F
+#define QAM_LC_QUAL_TAB9__A 0x1450020
+#define QAM_LC_QUAL_TAB10__A 0x1450021
+#define QAM_LC_QUAL_TAB12__A 0x1450022
+#define QAM_LC_QUAL_TAB15__A 0x1450023
+#define QAM_LC_QUAL_TAB16__A 0x1450024
+#define QAM_LC_QUAL_TAB20__A 0x1450025
+#define QAM_LC_QUAL_TAB25__A 0x1450026
+#define QAM_LC_LPF_FACTORP__A 0x1450028
+#define QAM_LC_LPF_FACTORI__A 0x1450029
+#define QAM_LC_RATE_LIMIT__A 0x145002A
+#define QAM_LC_SYMBOL_FREQ__A 0x145002B
+#define QAM_SY_TIMEOUT__A 0x1470011
+#define QAM_SY_TIMEOUT__PRE 0x3A98
+#define QAM_SY_SYNC_LWM__A 0x1470012
+#define QAM_SY_SYNC_AWM__A 0x1470013
+#define QAM_SY_SYNC_HWM__A 0x1470014
+#define QAM_SY_SP_INV__A 0x1470017
+#define QAM_SY_SP_INV_SPECTRUM_INV_DIS 0x0
+#define SCU_COMM_EXEC__A 0x800000
+#define SCU_COMM_EXEC_STOP 0x0
+#define SCU_COMM_EXEC_ACTIVE 0x1
+#define SCU_COMM_EXEC_HOLD 0x2
+#define SCU_RAM_DRIVER_DEBUG__A 0x831EBF
+#define SCU_RAM_QAM_FSM_STEP_PERIOD__A 0x831EC4
+#define SCU_RAM_GPIO__A 0x831EC7
+#define SCU_RAM_GPIO_HW_LOCK_IND_DISABLE 0x0
+#define SCU_RAM_AGC_CLP_CTRL_MODE__A 0x831EC8
+#define SCU_RAM_FEC_ACCUM_PKT_FAILURES__A 0x831ECB
+#define SCU_RAM_FEC_PRE_RS_BER_FILTER_SH__A 0x831F05
+#define SCU_RAM_AGC_FAST_SNS_CTRL_DELAY__A 0x831F15
+#define SCU_RAM_AGC_KI_CYCLEN__A 0x831F17
+#define SCU_RAM_AGC_SNS_CYCLEN__A 0x831F18
+#define SCU_RAM_AGC_RF_SNS_DEV_MAX__A 0x831F19
+#define SCU_RAM_AGC_RF_SNS_DEV_MIN__A 0x831F1A
+#define SCU_RAM_AGC_RF_MAX__A 0x831F1B
+#define SCU_RAM_AGC_CONFIG__A 0x831F24
+#define SCU_RAM_AGC_CONFIG_DISABLE_RF_AGC__M 0x1
+#define SCU_RAM_AGC_CONFIG_DISABLE_IF_AGC__M 0x2
+#define SCU_RAM_AGC_CONFIG_INV_IF_POL__M 0x100
+#define SCU_RAM_AGC_CONFIG_INV_RF_POL__M 0x200
+#define SCU_RAM_AGC_KI__A 0x831F25
+#define SCU_RAM_AGC_KI_RF__B 4
+#define SCU_RAM_AGC_KI_RF__M 0xF0
+#define SCU_RAM_AGC_KI_IF__B 8
+#define SCU_RAM_AGC_KI_IF__M 0xF00
+#define SCU_RAM_AGC_KI_RED__A 0x831F26
+#define SCU_RAM_AGC_KI_RED_RAGC_RED__B 2
+#define SCU_RAM_AGC_KI_RED_RAGC_RED__M 0xC
+#define SCU_RAM_AGC_KI_RED_IAGC_RED__B 4
+#define SCU_RAM_AGC_KI_RED_IAGC_RED__M 0x30
+#define SCU_RAM_AGC_KI_INNERGAIN_MIN__A 0x831F27
+#define SCU_RAM_AGC_KI_MINGAIN__A 0x831F28
+#define SCU_RAM_AGC_KI_MAXGAIN__A 0x831F29
+#define SCU_RAM_AGC_KI_MAXMINGAIN_TH__A 0x831F2A
+#define SCU_RAM_AGC_KI_MIN__A 0x831F2B
+#define SCU_RAM_AGC_KI_MAX__A 0x831F2C
+#define SCU_RAM_AGC_CLP_SUM__A 0x831F2D
+#define SCU_RAM_AGC_CLP_SUM_MIN__A 0x831F2E
+#define SCU_RAM_AGC_CLP_SUM_MAX__A 0x831F2F
+#define SCU_RAM_AGC_CLP_CYCLEN__A 0x831F30
+#define SCU_RAM_AGC_CLP_CYCCNT__A 0x831F31
+#define SCU_RAM_AGC_CLP_DIR_TO__A 0x831F32
+#define SCU_RAM_AGC_CLP_DIR_WD__A 0x831F33
+#define SCU_RAM_AGC_CLP_DIR_STP__A 0x831F34
+#define SCU_RAM_AGC_SNS_SUM__A 0x831F35
+#define SCU_RAM_AGC_SNS_SUM_MIN__A 0x831F36
+#define SCU_RAM_AGC_SNS_SUM_MAX__A 0x831F37
+#define SCU_RAM_AGC_SNS_CYCCNT__A 0x831F38
+#define SCU_RAM_AGC_SNS_DIR_TO__A 0x831F39
+#define SCU_RAM_AGC_SNS_DIR_WD__A 0x831F3A
+#define SCU_RAM_AGC_SNS_DIR_STP__A 0x831F3B
+#define SCU_RAM_AGC_INGAIN_TGT__A 0x831F3D
+#define SCU_RAM_AGC_INGAIN_TGT_MIN__A 0x831F3E
+#define SCU_RAM_AGC_INGAIN_TGT_MAX__A 0x831F3F
+#define SCU_RAM_AGC_IF_IACCU_HI__A 0x831F40
+#define SCU_RAM_AGC_IF_IACCU_LO__A 0x831F41
+#define SCU_RAM_AGC_IF_IACCU_HI_TGT__A 0x831F42
+#define SCU_RAM_AGC_IF_IACCU_HI_TGT_MIN__A 0x831F43
+#define SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A 0x831F44
+#define SCU_RAM_AGC_RF_IACCU_HI__A 0x831F45
+#define SCU_RAM_AGC_RF_IACCU_LO__A 0x831F46
+#define SCU_RAM_AGC_RF_IACCU_HI_CO__A 0x831F47
+#define SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A 0x831F84
+#define SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A 0x831F85
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A 0x831F86
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A 0x831F87
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A 0x831F88
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A 0x831F89
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A 0x831F8A
+#define SCU_RAM_QAM_FSM_RTH__A 0x831F8E
+#define SCU_RAM_QAM_FSM_FTH__A 0x831F8F
+#define SCU_RAM_QAM_FSM_PTH__A 0x831F90
+#define SCU_RAM_QAM_FSM_MTH__A 0x831F91
+#define SCU_RAM_QAM_FSM_CTH__A 0x831F92
+#define SCU_RAM_QAM_FSM_QTH__A 0x831F93
+#define SCU_RAM_QAM_FSM_RATE_LIM__A 0x831F94
+#define SCU_RAM_QAM_FSM_FREQ_LIM__A 0x831F95
+#define SCU_RAM_QAM_FSM_COUNT_LIM__A 0x831F96
+#define SCU_RAM_QAM_LC_CA_COARSE__A 0x831F97
+#define SCU_RAM_QAM_LC_CA_FINE__A 0x831F99
+#define SCU_RAM_QAM_LC_CP_COARSE__A 0x831F9A
+#define SCU_RAM_QAM_LC_CP_MEDIUM__A 0x831F9B
+#define SCU_RAM_QAM_LC_CP_FINE__A 0x831F9C
+#define SCU_RAM_QAM_LC_CI_COARSE__A 0x831F9D
+#define SCU_RAM_QAM_LC_CI_MEDIUM__A 0x831F9E
+#define SCU_RAM_QAM_LC_CI_FINE__A 0x831F9F
+#define SCU_RAM_QAM_LC_EP_COARSE__A 0x831FA0
+#define SCU_RAM_QAM_LC_EP_MEDIUM__A 0x831FA1
+#define SCU_RAM_QAM_LC_EP_FINE__A 0x831FA2
+#define SCU_RAM_QAM_LC_EI_COARSE__A 0x831FA3
+#define SCU_RAM_QAM_LC_EI_MEDIUM__A 0x831FA4
+#define SCU_RAM_QAM_LC_EI_FINE__A 0x831FA5
+#define SCU_RAM_QAM_LC_CF_COARSE__A 0x831FA6
+#define SCU_RAM_QAM_LC_CF_MEDIUM__A 0x831FA7
+#define SCU_RAM_QAM_LC_CF_FINE__A 0x831FA8
+#define SCU_RAM_QAM_LC_CF1_COARSE__A 0x831FA9
+#define SCU_RAM_QAM_LC_CF1_MEDIUM__A 0x831FAA
+#define SCU_RAM_QAM_LC_CF1_FINE__A 0x831FAB
+#define SCU_RAM_QAM_SL_SIG_POWER__A 0x831FAC
+#define SCU_RAM_QAM_EQ_CMA_RAD0__A 0x831FAD
+#define SCU_RAM_QAM_EQ_CMA_RAD1__A 0x831FAE
+#define SCU_RAM_QAM_EQ_CMA_RAD2__A 0x831FAF
+#define SCU_RAM_QAM_EQ_CMA_RAD3__A 0x831FB0
+#define SCU_RAM_QAM_EQ_CMA_RAD4__A 0x831FB1
+#define SCU_RAM_QAM_EQ_CMA_RAD5__A 0x831FB2
+#define SCU_RAM_QAM_LOCKED_LOCKED_DEMOD_LOCKED 0x4000
+#define SCU_RAM_QAM_LOCKED_LOCKED_LOCKED 0x8000
+#define SCU_RAM_QAM_LOCKED_LOCKED_NEVER_LOCK 0xC000
+#define SCU_RAM_AGC_FAST_CLP_CTRL_DELAY__A 0x831FEA
+#define SCU_RAM_DRIVER_VER_HI__A 0x831FEB
+#define SCU_RAM_DRIVER_VER_LO__A 0x831FEC
+#define SCU_RAM_PARAM_15__A 0x831FED
+#define SCU_RAM_PARAM_0__A 0x831FFC
+#define SCU_RAM_COMMAND__A 0x831FFD
+#define SCU_RAM_COMMAND_CMD_DEMOD_RESET 0x1
+#define SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV 0x2
+#define SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM 0x3
+#define SCU_RAM_COMMAND_CMD_DEMOD_START 0x4
+#define SCU_RAM_COMMAND_CMD_DEMOD_GET_LOCK 0x5
+#define SCU_RAM_COMMAND_CMD_DEMOD_STOP 0x9
+#define SCU_RAM_COMMAND_STANDARD_QAM 0x200
+#define SCU_RAM_COMMAND_STANDARD_OFDM 0x400
+#define SIO_TOP_COMM_KEY__A 0x41000F
+#define SIO_TOP_COMM_KEY_KEY 0xFABA
+#define SIO_TOP_JTAGID_LO__A 0x410012
+#define SIO_HI_RA_RAM_RES__A 0x420031
+#define SIO_HI_RA_RAM_CMD__A 0x420032
+#define SIO_HI_RA_RAM_CMD_RESET 0x2
+#define SIO_HI_RA_RAM_CMD_CONFIG 0x3
+#define SIO_HI_RA_RAM_CMD_BRDCTRL 0x7
+#define SIO_HI_RA_RAM_PAR_1__A 0x420033
+#define SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY 0x3945
+#define SIO_HI_RA_RAM_PAR_2__A 0x420034
+#define SIO_HI_RA_RAM_PAR_2_CFG_DIV__M 0x7F
+#define SIO_HI_RA_RAM_PAR_2_BRD_CFG_OPEN 0x0
+#define SIO_HI_RA_RAM_PAR_2_BRD_CFG_CLOSED 0x4
+#define SIO_HI_RA_RAM_PAR_3__A 0x420035
+#define SIO_HI_RA_RAM_PAR_3_CFG_DBL_SDA__M 0x7F
+#define SIO_HI_RA_RAM_PAR_3_CFG_DBL_SCL__B 7
+#define SIO_HI_RA_RAM_PAR_3_ACP_RW_READ 0x0
+#define SIO_HI_RA_RAM_PAR_3_ACP_RW_WRITE 0x8
+#define SIO_HI_RA_RAM_PAR_4__A 0x420036
+#define SIO_HI_RA_RAM_PAR_5__A 0x420037
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLV0_SLAVE 0x1
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLEEP__M 0x8
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ 0x8
+#define SIO_HI_RA_RAM_PAR_6__A 0x420038
+#define SIO_CC_PLL_LOCK__A 0x450012
+#define SIO_CC_PWD_MODE__A 0x450015
+#define SIO_CC_PWD_MODE_LEVEL_NONE 0x0
+#define SIO_CC_PWD_MODE_LEVEL_OFDM 0x1
+#define SIO_CC_PWD_MODE_LEVEL_CLOCK 0x2
+#define SIO_CC_PWD_MODE_LEVEL_PLL 0x3
+#define SIO_CC_PWD_MODE_LEVEL_OSC 0x4
+#define SIO_CC_SOFT_RST__A 0x450016
+#define SIO_CC_SOFT_RST_OFDM__M 0x1
+#define SIO_CC_SOFT_RST_SYS__M 0x2
+#define SIO_CC_SOFT_RST_OSC__M 0x4
+#define SIO_CC_UPDATE__A 0x450017
+#define SIO_CC_UPDATE_KEY 0xFABA
+#define SIO_OFDM_SH_OFDM_RING_ENABLE__A 0x470010
+#define SIO_OFDM_SH_OFDM_RING_ENABLE_OFF 0x0
+#define SIO_OFDM_SH_OFDM_RING_ENABLE_ON 0x1
+#define SIO_OFDM_SH_OFDM_RING_STATUS__A 0x470012
+#define SIO_OFDM_SH_OFDM_RING_STATUS_DOWN 0x0
+#define SIO_OFDM_SH_OFDM_RING_STATUS_ENABLED 0x1
+#define SIO_BL_COMM_EXEC__A 0x480000
+#define SIO_BL_COMM_EXEC_ACTIVE 0x1
+#define SIO_BL_STATUS__A 0x480010
+#define SIO_BL_MODE__A 0x480011
+#define SIO_BL_MODE_DIRECT 0x0
+#define SIO_BL_MODE_CHAIN 0x1
+#define SIO_BL_ENABLE__A 0x480012
+#define SIO_BL_ENABLE_ON 0x1
+#define SIO_BL_TGT_HDR__A 0x480014
+#define SIO_BL_TGT_ADDR__A 0x480015
+#define SIO_BL_SRC_ADDR__A 0x480016
+#define SIO_BL_SRC_LEN__A 0x480017
+#define SIO_BL_CHAIN_ADDR__A 0x480018
+#define SIO_BL_CHAIN_LEN__A 0x480019
+#define SIO_PDR_MON_CFG__A 0x7F0010
+#define SIO_PDR_UIO_IN_HI__A 0x7F0015
+#define SIO_PDR_UIO_OUT_LO__A 0x7F0016
+#define SIO_PDR_OHW_CFG__A 0x7F001F
+#define SIO_PDR_OHW_CFG_FREF_SEL__M 0x3
+#define SIO_PDR_MSTRT_CFG__A 0x7F0025
+#define SIO_PDR_MERR_CFG__A 0x7F0026
+#define SIO_PDR_MCLK_CFG__A 0x7F0028
+#define SIO_PDR_MCLK_CFG_DRIVE__B 3
+#define SIO_PDR_MVAL_CFG__A 0x7F0029
+#define SIO_PDR_MD0_CFG__A 0x7F002A
+#define SIO_PDR_MD0_CFG_DRIVE__B 3
+#define SIO_PDR_MD1_CFG__A 0x7F002B
+#define SIO_PDR_MD2_CFG__A 0x7F002C
+#define SIO_PDR_MD3_CFG__A 0x7F002D
+#define SIO_PDR_MD4_CFG__A 0x7F002F
+#define SIO_PDR_MD5_CFG__A 0x7F0030
+#define SIO_PDR_MD6_CFG__A 0x7F0031
+#define SIO_PDR_MD7_CFG__A 0x7F0032
+#define SIO_PDR_SMA_TX_CFG__A 0x7F0038
diff --git a/drivers/media/dvb/frontends/itd1000.c b/drivers/media/dvb/frontends/itd1000.c
index f7a40a1..aa9ccb8 100644
--- a/drivers/media/dvb/frontends/itd1000.c
+++ b/drivers/media/dvb/frontends/itd1000.c
@@ -35,21 +35,18 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
-#define deb(args...) do { \
+#define itd_dbg(args...) do { \
if (debug) { \
printk(KERN_DEBUG "ITD1000: " args);\
- printk("\n"); \
} \
} while (0)
-#define warn(args...) do { \
+#define itd_warn(args...) do { \
printk(KERN_WARNING "ITD1000: " args); \
- printk("\n"); \
} while (0)
-#define info(args...) do { \
+#define itd_info(args...) do { \
printk(KERN_INFO "ITD1000: " args); \
- printk("\n"); \
} while (0)
/* don't write more than one byte with flexcop behind */
@@ -62,7 +59,7 @@ static int itd1000_write_regs(struct itd1000_state *state, u8 reg, u8 v[], u8 le
buf[0] = reg;
memcpy(&buf[1], v, len);
- /* deb("wr %02x: %02x", reg, v[0]); */
+ /* itd_dbg("wr %02x: %02x\n", reg, v[0]); */
if (i2c_transfer(state->i2c, &msg, 1) != 1) {
printk(KERN_WARNING "itd1000 I2C write failed\n");
@@ -83,7 +80,7 @@ static int itd1000_read_reg(struct itd1000_state *state, u8 reg)
itd1000_write_regs(state, (reg - 1) & 0xff, &state->shadow[(reg - 1) & 0xff], 1);
if (i2c_transfer(state->i2c, msg, 2) != 2) {
- warn("itd1000 I2C read failed");
+ itd_warn("itd1000 I2C read failed\n");
return -EREMOTEIO;
}
return val;
@@ -127,14 +124,14 @@ static void itd1000_set_lpf_bw(struct itd1000_state *state, u32 symbol_rate)
u8 bbgvmin = itd1000_read_reg(state, BBGVMIN) & 0xf0;
u8 bw = itd1000_read_reg(state, BW) & 0xf0;
- deb("symbol_rate = %d", symbol_rate);
+ itd_dbg("symbol_rate = %d\n", symbol_rate);
/* not sure what is that ? - starting to download the table */
itd1000_write_reg(state, CON1, con1 | (1 << 1));
for (i = 0; i < ARRAY_SIZE(itd1000_lpf_pga); i++)
if (symbol_rate < itd1000_lpf_pga[i].symbol_rate) {
- deb("symrate: index: %d pgaext: %x, bbgvmin: %x", i, itd1000_lpf_pga[i].pgaext, itd1000_lpf_pga[i].bbgvmin);
+ itd_dbg("symrate: index: %d pgaext: %x, bbgvmin: %x\n", i, itd1000_lpf_pga[i].pgaext, itd1000_lpf_pga[i].bbgvmin);
itd1000_write_reg(state, PLLFH, pllfh | (itd1000_lpf_pga[i].pgaext << 4));
itd1000_write_reg(state, BBGVMIN, bbgvmin | (itd1000_lpf_pga[i].bbgvmin));
itd1000_write_reg(state, BW, bw | (i & 0x0f));
@@ -182,7 +179,7 @@ static void itd1000_set_vco(struct itd1000_state *state, u32 freq_khz)
adcout = itd1000_read_reg(state, PLLLOCK) & 0x0f;
- deb("VCO: %dkHz: %d -> ADCOUT: %d %02x", freq_khz, itd1000_vcorg[i].vcorg, adcout, vco_chp1_i2c);
+ itd_dbg("VCO: %dkHz: %d -> ADCOUT: %d %02x\n", freq_khz, itd1000_vcorg[i].vcorg, adcout, vco_chp1_i2c);
if (adcout > 13) {
if (!(itd1000_vcorg[i].vcorg == 7 || itd1000_vcorg[i].vcorg == 15))
@@ -232,7 +229,7 @@ static void itd1000_set_lo(struct itd1000_state *state, u32 freq_khz)
pllf = (u32) tmp;
state->frequency = ((plln * 1000) + (pllf * 1000)/1048576) * 2*FREF;
- deb("frequency: %dkHz (wanted) %dkHz (set), PLLF = %d, PLLN = %d", freq_khz, state->frequency, pllf, plln);
+ itd_dbg("frequency: %dkHz (wanted) %dkHz (set), PLLF = %d, PLLN = %d\n", freq_khz, state->frequency, pllf, plln);
itd1000_write_reg(state, PLLNH, 0x80); /* PLLNH */;
itd1000_write_reg(state, PLLNL, plln & 0xff);
@@ -242,7 +239,7 @@ static void itd1000_set_lo(struct itd1000_state *state, u32 freq_khz)
for (i = 0; i < ARRAY_SIZE(itd1000_fre_values); i++) {
if (freq_khz <= itd1000_fre_values[i].freq) {
- deb("fre_values: %d", i);
+ itd_dbg("fre_values: %d\n", i);
itd1000_write_reg(state, RFTR, itd1000_fre_values[i].values[0]);
for (j = 0; j < 9; j++)
itd1000_write_reg(state, RFST1+j, itd1000_fre_values[i].values[j+1]);
@@ -382,7 +379,7 @@ struct dvb_frontend *itd1000_attach(struct dvb_frontend *fe, struct i2c_adapter
kfree(state);
return NULL;
}
- info("successfully identified (ID: %d)", i);
+ itd_info("successfully identified (ID: %d)\n", i);
memset(state->shadow, 0xff, sizeof(state->shadow));
for (i = 0x65; i < 0x9c; i++)
diff --git a/drivers/media/dvb/frontends/nxt6000.c b/drivers/media/dvb/frontends/nxt6000.c
index a763ec75..6599b8f 100644
--- a/drivers/media/dvb/frontends/nxt6000.c
+++ b/drivers/media/dvb/frontends/nxt6000.c
@@ -50,7 +50,7 @@ static int nxt6000_writereg(struct nxt6000_state* state, u8 reg, u8 data)
if ((ret = i2c_transfer(state->i2c, &msg, 1)) != 1)
dprintk("nxt6000: nxt6000_write error (reg: 0x%02X, data: 0x%02X, ret: %d)\n", reg, data, ret);
- return (ret != 1) ? -EFAULT : 0;
+ return (ret != 1) ? -EIO : 0;
}
static u8 nxt6000_readreg(struct nxt6000_state* state, u8 reg)
diff --git a/drivers/media/dvb/frontends/s5h1420.c b/drivers/media/dvb/frontends/s5h1420.c
index 17f8cdf..3879d2e 100644
--- a/drivers/media/dvb/frontends/s5h1420.c
+++ b/drivers/media/dvb/frontends/s5h1420.c
@@ -634,7 +634,7 @@ static int s5h1420_set_frontend(struct dvb_frontend* fe,
struct s5h1420_state* state = fe->demodulator_priv;
int frequency_delta;
struct dvb_frontend_tune_settings fesettings;
- uint8_t clock_settting;
+ uint8_t clock_setting;
dprintk("enter %s\n", __func__);
@@ -684,19 +684,19 @@ static int s5h1420_set_frontend(struct dvb_frontend* fe,
switch (state->fclk) {
default:
case 88000000:
- clock_settting = 80;
+ clock_setting = 80;
break;
case 86000000:
- clock_settting = 78;
+ clock_setting = 78;
break;
case 80000000:
- clock_settting = 72;
+ clock_setting = 72;
break;
case 59000000:
- clock_settting = 51;
+ clock_setting = 51;
break;
case 44000000:
- clock_settting = 36;
+ clock_setting = 36;
break;
}
dprintk("pll01: %d, ToneFreq: %d\n", state->fclk/1000000 - 8, (state->fclk + (TONE_FREQ * 32) - 1) / (TONE_FREQ * 32));
diff --git a/drivers/media/dvb/frontends/tda18271c2dd.c b/drivers/media/dvb/frontends/tda18271c2dd.c
new file mode 100644
index 0000000..0384e8d
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda18271c2dd.c
@@ -0,0 +1,1251 @@
+/*
+ * tda18271c2dd: Driver for the TDA18271C2 tuner
+ *
+ * Copyright (C) 2010 Digital Devices GmbH
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/version.h>
+#include <asm/div64.h>
+
+#include "dvb_frontend.h"
+
+struct SStandardParam {
+ s32 m_IFFrequency;
+ u32 m_BandWidth;
+ u8 m_EP3_4_0;
+ u8 m_EB22;
+};
+
+struct SMap {
+ u32 m_Frequency;
+ u8 m_Param;
+};
+
+struct SMapI {
+ u32 m_Frequency;
+ s32 m_Param;
+};
+
+struct SMap2 {
+ u32 m_Frequency;
+ u8 m_Param1;
+ u8 m_Param2;
+};
+
+struct SRFBandMap {
+ u32 m_RF_max;
+ u32 m_RF1_Default;
+ u32 m_RF2_Default;
+ u32 m_RF3_Default;
+};
+
+enum ERegister {
+ ID = 0,
+ TM,
+ PL,
+ EP1, EP2, EP3, EP4, EP5,
+ CPD, CD1, CD2, CD3,
+ MPD, MD1, MD2, MD3,
+ EB1, EB2, EB3, EB4, EB5, EB6, EB7, EB8, EB9, EB10,
+ EB11, EB12, EB13, EB14, EB15, EB16, EB17, EB18, EB19, EB20,
+ EB21, EB22, EB23,
+ NUM_REGS
+};
+
+struct tda_state {
+ struct i2c_adapter *i2c;
+ u8 adr;
+
+ u32 m_Frequency;
+ u32 IF;
+
+ u8 m_IFLevelAnalog;
+ u8 m_IFLevelDigital;
+ u8 m_IFLevelDVBC;
+ u8 m_IFLevelDVBT;
+
+ u8 m_EP4;
+ u8 m_EP3_Standby;
+
+ bool m_bMaster;
+
+ s32 m_SettlingTime;
+
+ u8 m_Regs[NUM_REGS];
+
+ /* Tracking filter settings for band 0..6 */
+ u32 m_RF1[7];
+ s32 m_RF_A1[7];
+ s32 m_RF_B1[7];
+ u32 m_RF2[7];
+ s32 m_RF_A2[7];
+ s32 m_RF_B2[7];
+ u32 m_RF3[7];
+
+ u8 m_TMValue_RFCal; /* Calibration temperatur */
+
+ bool m_bFMInput; /* true to use Pin 8 for FM Radio */
+
+};
+
+static int PowerScan(struct tda_state *state,
+ u8 RFBand, u32 RF_in,
+ u32 *pRF_Out, bool *pbcal);
+
+static int i2c_readn(struct i2c_adapter *adapter, u8 adr, u8 *data, int len)
+{
+ struct i2c_msg msgs[1] = {{.addr = adr, .flags = I2C_M_RD,
+ .buf = data, .len = len} };
+ return (i2c_transfer(adapter, msgs, 1) == 1) ? 0 : -1;
+}
+
+static int i2c_write(struct i2c_adapter *adap, u8 adr, u8 *data, int len)
+{
+ struct i2c_msg msg = {.addr = adr, .flags = 0,
+ .buf = data, .len = len};
+
+ if (i2c_transfer(adap, &msg, 1) != 1) {
+ printk(KERN_ERR "tda18271c2dd: i2c write error at addr %i\n", adr);
+ return -1;
+ }
+ return 0;
+}
+
+static int WriteRegs(struct tda_state *state,
+ u8 SubAddr, u8 *Regs, u16 nRegs)
+{
+ u8 data[nRegs+1];
+
+ data[0] = SubAddr;
+ memcpy(data + 1, Regs, nRegs);
+ return i2c_write(state->i2c, state->adr, data, nRegs+1);
+}
+
+static int WriteReg(struct tda_state *state, u8 SubAddr, u8 Reg)
+{
+ u8 msg[2] = {SubAddr, Reg};
+
+ return i2c_write(state->i2c, state->adr, msg, 2);
+}
+
+static int Read(struct tda_state *state, u8 * Regs)
+{
+ return i2c_readn(state->i2c, state->adr, Regs, 16);
+}
+
+static int ReadExtented(struct tda_state *state, u8 * Regs)
+{
+ return i2c_readn(state->i2c, state->adr, Regs, NUM_REGS);
+}
+
+static int UpdateRegs(struct tda_state *state, u8 RegFrom, u8 RegTo)
+{
+ return WriteRegs(state, RegFrom,
+ &state->m_Regs[RegFrom], RegTo-RegFrom+1);
+}
+static int UpdateReg(struct tda_state *state, u8 Reg)
+{
+ return WriteReg(state, Reg, state->m_Regs[Reg]);
+}
+
+#include "tda18271c2dd_maps.h"
+
+static void reset(struct tda_state *state)
+{
+ u32 ulIFLevelAnalog = 0;
+ u32 ulIFLevelDigital = 2;
+ u32 ulIFLevelDVBC = 7;
+ u32 ulIFLevelDVBT = 6;
+ u32 ulXTOut = 0;
+ u32 ulStandbyMode = 0x06; /* Send in stdb, but leave osc on */
+ u32 ulSlave = 0;
+ u32 ulFMInput = 0;
+ u32 ulSettlingTime = 100;
+
+ state->m_Frequency = 0;
+ state->m_SettlingTime = 100;
+ state->m_IFLevelAnalog = (ulIFLevelAnalog & 0x07) << 2;
+ state->m_IFLevelDigital = (ulIFLevelDigital & 0x07) << 2;
+ state->m_IFLevelDVBC = (ulIFLevelDVBC & 0x07) << 2;
+ state->m_IFLevelDVBT = (ulIFLevelDVBT & 0x07) << 2;
+
+ state->m_EP4 = 0x20;
+ if (ulXTOut != 0)
+ state->m_EP4 |= 0x40;
+
+ state->m_EP3_Standby = ((ulStandbyMode & 0x07) << 5) | 0x0F;
+ state->m_bMaster = (ulSlave == 0);
+
+ state->m_SettlingTime = ulSettlingTime;
+
+ state->m_bFMInput = (ulFMInput == 2);
+}
+
+static bool SearchMap1(struct SMap Map[],
+ u32 Frequency, u8 *pParam)
+{
+ int i = 0;
+
+ while ((Map[i].m_Frequency != 0) && (Frequency > Map[i].m_Frequency))
+ i += 1;
+ if (Map[i].m_Frequency == 0)
+ return false;
+ *pParam = Map[i].m_Param;
+ return true;
+}
+
+static bool SearchMap2(struct SMapI Map[],
+ u32 Frequency, s32 *pParam)
+{
+ int i = 0;
+
+ while ((Map[i].m_Frequency != 0) &&
+ (Frequency > Map[i].m_Frequency))
+ i += 1;
+ if (Map[i].m_Frequency == 0)
+ return false;
+ *pParam = Map[i].m_Param;
+ return true;
+}
+
+static bool SearchMap3(struct SMap2 Map[], u32 Frequency,
+ u8 *pParam1, u8 *pParam2)
+{
+ int i = 0;
+
+ while ((Map[i].m_Frequency != 0) &&
+ (Frequency > Map[i].m_Frequency))
+ i += 1;
+ if (Map[i].m_Frequency == 0)
+ return false;
+ *pParam1 = Map[i].m_Param1;
+ *pParam2 = Map[i].m_Param2;
+ return true;
+}
+
+static bool SearchMap4(struct SRFBandMap Map[],
+ u32 Frequency, u8 *pRFBand)
+{
+ int i = 0;
+
+ while (i < 7 && (Frequency > Map[i].m_RF_max))
+ i += 1;
+ if (i == 7)
+ return false;
+ *pRFBand = i;
+ return true;
+}
+
+static int ThermometerRead(struct tda_state *state, u8 *pTM_Value)
+{
+ int status = 0;
+
+ do {
+ u8 Regs[16];
+ state->m_Regs[TM] |= 0x10;
+ status = UpdateReg(state, TM);
+ if (status < 0)
+ break;
+ status = Read(state, Regs);
+ if (status < 0)
+ break;
+ if (((Regs[TM] & 0x0F) == 0 && (Regs[TM] & 0x20) == 0x20) ||
+ ((Regs[TM] & 0x0F) == 8 && (Regs[TM] & 0x20) == 0x00)) {
+ state->m_Regs[TM] ^= 0x20;
+ status = UpdateReg(state, TM);
+ if (status < 0)
+ break;
+ msleep(10);
+ status = Read(state, Regs);
+ if (status < 0)
+ break;
+ }
+ *pTM_Value = (Regs[TM] & 0x20)
+ ? m_Thermometer_Map_2[Regs[TM] & 0x0F]
+ : m_Thermometer_Map_1[Regs[TM] & 0x0F] ;
+ state->m_Regs[TM] &= ~0x10; /* Thermometer off */
+ status = UpdateReg(state, TM);
+ if (status < 0)
+ break;
+ state->m_Regs[EP4] &= ~0x03; /* CAL_mode = 0 ????????? */
+ status = UpdateReg(state, EP4);
+ if (status < 0)
+ break;
+ } while (0);
+
+ return status;
+}
+
+static int StandBy(struct tda_state *state)
+{
+ int status = 0;
+ do {
+ state->m_Regs[EB12] &= ~0x20; /* PD_AGC1_Det = 0 */
+ status = UpdateReg(state, EB12);
+ if (status < 0)
+ break;
+ state->m_Regs[EB18] &= ~0x83; /* AGC1_loop_off = 0, AGC1_Gain = 6 dB */
+ status = UpdateReg(state, EB18);
+ if (status < 0)
+ break;
+ state->m_Regs[EB21] |= 0x03; /* AGC2_Gain = -6 dB */
+ state->m_Regs[EP3] = state->m_EP3_Standby;
+ status = UpdateReg(state, EP3);
+ if (status < 0)
+ break;
+ state->m_Regs[EB23] &= ~0x06; /* ForceLP_Fc2_En = 0, LP_Fc[2] = 0 */
+ status = UpdateRegs(state, EB21, EB23);
+ if (status < 0)
+ break;
+ } while (0);
+ return status;
+}
+
+static int CalcMainPLL(struct tda_state *state, u32 freq)
+{
+
+ u8 PostDiv;
+ u8 Div;
+ u64 OscFreq;
+ u32 MainDiv;
+
+ if (!SearchMap3(m_Main_PLL_Map, freq, &PostDiv, &Div))
+ return -EINVAL;
+
+ OscFreq = (u64) freq * (u64) Div;
+ OscFreq *= (u64) 16384;
+ do_div(OscFreq, (u64)16000000);
+ MainDiv = OscFreq;
+
+ state->m_Regs[MPD] = PostDiv & 0x77;
+ state->m_Regs[MD1] = ((MainDiv >> 16) & 0x7F);
+ state->m_Regs[MD2] = ((MainDiv >> 8) & 0xFF);
+ state->m_Regs[MD3] = (MainDiv & 0xFF);
+
+ return UpdateRegs(state, MPD, MD3);
+}
+
+static int CalcCalPLL(struct tda_state *state, u32 freq)
+{
+ u8 PostDiv;
+ u8 Div;
+ u64 OscFreq;
+ u32 CalDiv;
+
+ if (!SearchMap3(m_Cal_PLL_Map, freq, &PostDiv, &Div))
+ return -EINVAL;
+
+ OscFreq = (u64)freq * (u64)Div;
+ /* CalDiv = u32( OscFreq * 16384 / 16000000 ); */
+ OscFreq *= (u64)16384;
+ do_div(OscFreq, (u64)16000000);
+ CalDiv = OscFreq;
+
+ state->m_Regs[CPD] = PostDiv;
+ state->m_Regs[CD1] = ((CalDiv >> 16) & 0xFF);
+ state->m_Regs[CD2] = ((CalDiv >> 8) & 0xFF);
+ state->m_Regs[CD3] = (CalDiv & 0xFF);
+
+ return UpdateRegs(state, CPD, CD3);
+}
+
+static int CalibrateRF(struct tda_state *state,
+ u8 RFBand, u32 freq, s32 *pCprog)
+{
+ int status = 0;
+ u8 Regs[NUM_REGS];
+ do {
+ u8 BP_Filter = 0;
+ u8 GainTaper = 0;
+ u8 RFC_K = 0;
+ u8 RFC_M = 0;
+
+ state->m_Regs[EP4] &= ~0x03; /* CAL_mode = 0 */
+ status = UpdateReg(state, EP4);
+ if (status < 0)
+ break;
+ state->m_Regs[EB18] |= 0x03; /* AGC1_Gain = 3 */
+ status = UpdateReg(state, EB18);
+ if (status < 0)
+ break;
+
+ /* Switching off LT (as datasheet says) causes calibration on C1 to fail */
+ /* (Readout of Cprog is allways 255) */
+ if (state->m_Regs[ID] != 0x83) /* C1: ID == 83, C2: ID == 84 */
+ state->m_Regs[EP3] |= 0x40; /* SM_LT = 1 */
+
+ if (!(SearchMap1(m_BP_Filter_Map, freq, &BP_Filter) &&
+ SearchMap1(m_GainTaper_Map, freq, &GainTaper) &&
+ SearchMap3(m_KM_Map, freq, &RFC_K, &RFC_M)))
+ return -EINVAL;
+
+ state->m_Regs[EP1] = (state->m_Regs[EP1] & ~0x07) | BP_Filter;
+ state->m_Regs[EP2] = (RFBand << 5) | GainTaper;
+
+ state->m_Regs[EB13] = (state->m_Regs[EB13] & ~0x7C) | (RFC_K << 4) | (RFC_M << 2);
+
+ status = UpdateRegs(state, EP1, EP3);
+ if (status < 0)
+ break;
+ status = UpdateReg(state, EB13);
+ if (status < 0)
+ break;
+
+ state->m_Regs[EB4] |= 0x20; /* LO_ForceSrce = 1 */
+ status = UpdateReg(state, EB4);
+ if (status < 0)
+ break;
+
+ state->m_Regs[EB7] |= 0x20; /* CAL_ForceSrce = 1 */
+ status = UpdateReg(state, EB7);
+ if (status < 0)
+ break;
+
+ state->m_Regs[EB14] = 0; /* RFC_Cprog = 0 */
+ status = UpdateReg(state, EB14);
+ if (status < 0)
+ break;
+
+ state->m_Regs[EB20] &= ~0x20; /* ForceLock = 0; */
+ status = UpdateReg(state, EB20);
+ if (status < 0)
+ break;
+
+ state->m_Regs[EP4] |= 0x03; /* CAL_Mode = 3 */
+ status = UpdateRegs(state, EP4, EP5);
+ if (status < 0)
+ break;
+
+ status = CalcCalPLL(state, freq);
+ if (status < 0)
+ break;
+ status = CalcMainPLL(state, freq + 1000000);
+ if (status < 0)
+ break;
+
+ msleep(5);
+ status = UpdateReg(state, EP2);
+ if (status < 0)
+ break;
+ status = UpdateReg(state, EP1);
+ if (status < 0)
+ break;
+ status = UpdateReg(state, EP2);
+ if (status < 0)
+ break;
+ status = UpdateReg(state, EP1);
+ if (status < 0)
+ break;
+
+ state->m_Regs[EB4] &= ~0x20; /* LO_ForceSrce = 0 */
+ status = UpdateReg(state, EB4);
+ if (status < 0)
+ break;
+
+ state->m_Regs[EB7] &= ~0x20; /* CAL_ForceSrce = 0 */
+ status = UpdateReg(state, EB7);
+ if (status < 0)
+ break;
+ msleep(10);
+
+ state->m_Regs[EB20] |= 0x20; /* ForceLock = 1; */
+ status = UpdateReg(state, EB20);
+ if (status < 0)
+ break;
+ msleep(60);
+
+ state->m_Regs[EP4] &= ~0x03; /* CAL_Mode = 0 */
+ state->m_Regs[EP3] &= ~0x40; /* SM_LT = 0 */
+ state->m_Regs[EB18] &= ~0x03; /* AGC1_Gain = 0 */
+ status = UpdateReg(state, EB18);
+ if (status < 0)
+ break;
+ status = UpdateRegs(state, EP3, EP4);
+ if (status < 0)
+ break;
+ status = UpdateReg(state, EP1);
+ if (status < 0)
+ break;
+
+ status = ReadExtented(state, Regs);
+ if (status < 0)
+ break;
+
+ *pCprog = Regs[EB14];
+
+ } while (0);
+ return status;
+}
+
+static int RFTrackingFiltersInit(struct tda_state *state,
+ u8 RFBand)
+{
+ int status = 0;
+
+ u32 RF1 = m_RF_Band_Map[RFBand].m_RF1_Default;
+ u32 RF2 = m_RF_Band_Map[RFBand].m_RF2_Default;
+ u32 RF3 = m_RF_Band_Map[RFBand].m_RF3_Default;
+ bool bcal = false;
+
+ s32 Cprog_cal1 = 0;
+ s32 Cprog_table1 = 0;
+ s32 Cprog_cal2 = 0;
+ s32 Cprog_table2 = 0;
+ s32 Cprog_cal3 = 0;
+ s32 Cprog_table3 = 0;
+
+ state->m_RF_A1[RFBand] = 0;
+ state->m_RF_B1[RFBand] = 0;
+ state->m_RF_A2[RFBand] = 0;
+ state->m_RF_B2[RFBand] = 0;
+
+ do {
+ status = PowerScan(state, RFBand, RF1, &RF1, &bcal);
+ if (status < 0)
+ break;
+ if (bcal) {
+ status = CalibrateRF(state, RFBand, RF1, &Cprog_cal1);
+ if (status < 0)
+ break;
+ }
+ SearchMap2(m_RF_Cal_Map, RF1, &Cprog_table1);
+ if (!bcal)
+ Cprog_cal1 = Cprog_table1;
+ state->m_RF_B1[RFBand] = Cprog_cal1 - Cprog_table1;
+ /* state->m_RF_A1[RF_Band] = ???? */
+
+ if (RF2 == 0)
+ break;
+
+ status = PowerScan(state, RFBand, RF2, &RF2, &bcal);
+ if (status < 0)
+ break;
+ if (bcal) {
+ status = CalibrateRF(state, RFBand, RF2, &Cprog_cal2);
+ if (status < 0)
+ break;
+ }
+ SearchMap2(m_RF_Cal_Map, RF2, &Cprog_table2);
+ if (!bcal)
+ Cprog_cal2 = Cprog_table2;
+
+ state->m_RF_A1[RFBand] =
+ (Cprog_cal2 - Cprog_table2 - Cprog_cal1 + Cprog_table1) /
+ ((s32)(RF2) - (s32)(RF1));
+
+ if (RF3 == 0)
+ break;
+
+ status = PowerScan(state, RFBand, RF3, &RF3, &bcal);
+ if (status < 0)
+ break;
+ if (bcal) {
+ status = CalibrateRF(state, RFBand, RF3, &Cprog_cal3);
+ if (status < 0)
+ break;
+ }
+ SearchMap2(m_RF_Cal_Map, RF3, &Cprog_table3);
+ if (!bcal)
+ Cprog_cal3 = Cprog_table3;
+ state->m_RF_A2[RFBand] = (Cprog_cal3 - Cprog_table3 - Cprog_cal2 + Cprog_table2) / ((s32)(RF3) - (s32)(RF2));
+ state->m_RF_B2[RFBand] = Cprog_cal2 - Cprog_table2;
+
+ } while (0);
+
+ state->m_RF1[RFBand] = RF1;
+ state->m_RF2[RFBand] = RF2;
+ state->m_RF3[RFBand] = RF3;
+
+#if 0
+ printk(KERN_ERR "tda18271c2dd: %s %d RF1 = %d A1 = %d B1 = %d RF2 = %d A2 = %d B2 = %d RF3 = %d\n", __func__,
+ RFBand, RF1, state->m_RF_A1[RFBand], state->m_RF_B1[RFBand], RF2,
+ state->m_RF_A2[RFBand], state->m_RF_B2[RFBand], RF3);
+#endif
+
+ return status;
+}
+
+static int PowerScan(struct tda_state *state,
+ u8 RFBand, u32 RF_in, u32 *pRF_Out, bool *pbcal)
+{
+ int status = 0;
+ do {
+ u8 Gain_Taper = 0;
+ s32 RFC_Cprog = 0;
+ u8 CID_Target = 0;
+ u8 CountLimit = 0;
+ u32 freq_MainPLL;
+ u8 Regs[NUM_REGS];
+ u8 CID_Gain;
+ s32 Count = 0;
+ int sign = 1;
+ bool wait = false;
+
+ if (!(SearchMap2(m_RF_Cal_Map, RF_in, &RFC_Cprog) &&
+ SearchMap1(m_GainTaper_Map, RF_in, &Gain_Taper) &&
+ SearchMap3(m_CID_Target_Map, RF_in, &CID_Target, &CountLimit))) {
+
+ printk(KERN_ERR "tda18271c2dd: %s Search map failed\n", __func__);
+ return -EINVAL;
+ }
+
+ state->m_Regs[EP2] = (RFBand << 5) | Gain_Taper;
+ state->m_Regs[EB14] = (RFC_Cprog);
+ status = UpdateReg(state, EP2);
+ if (status < 0)
+ break;
+ status = UpdateReg(state, EB14);
+ if (status < 0)
+ break;
+
+ freq_MainPLL = RF_in + 1000000;
+ status = CalcMainPLL(state, freq_MainPLL);
+ if (status < 0)
+ break;
+ msleep(5);
+ state->m_Regs[EP4] = (state->m_Regs[EP4] & ~0x03) | 1; /* CAL_mode = 1 */
+ status = UpdateReg(state, EP4);
+ if (status < 0)
+ break;
+ status = UpdateReg(state, EP2); /* Launch power measurement */
+ if (status < 0)
+ break;
+ status = ReadExtented(state, Regs);
+ if (status < 0)
+ break;
+ CID_Gain = Regs[EB10] & 0x3F;
+ state->m_Regs[ID] = Regs[ID]; /* Chip version, (needed for C1 workarround in CalibrateRF) */
+
+ *pRF_Out = RF_in;
+
+ while (CID_Gain < CID_Target) {
+ freq_MainPLL = RF_in + sign * Count + 1000000;
+ status = CalcMainPLL(state, freq_MainPLL);
+ if (status < 0)
+ break;
+ msleep(wait ? 5 : 1);
+ wait = false;
+ status = UpdateReg(state, EP2); /* Launch power measurement */
+ if (status < 0)
+ break;
+ status = ReadExtented(state, Regs);
+ if (status < 0)
+ break;
+ CID_Gain = Regs[EB10] & 0x3F;
+ Count += 200000;
+
+ if (Count < CountLimit * 100000)
+ continue;
+ if (sign < 0)
+ break;
+
+ sign = -sign;
+ Count = 200000;
+ wait = true;
+ }
+ status = status;
+ if (status < 0)
+ break;
+ if (CID_Gain >= CID_Target) {
+ *pbcal = true;
+ *pRF_Out = freq_MainPLL - 1000000;
+ } else
+ *pbcal = false;
+ } while (0);
+
+ return status;
+}
+
+static int PowerScanInit(struct tda_state *state)
+{
+ int status = 0;
+ do {
+ state->m_Regs[EP3] = (state->m_Regs[EP3] & ~0x1F) | 0x12;
+ state->m_Regs[EP4] = (state->m_Regs[EP4] & ~0x1F); /* If level = 0, Cal mode = 0 */
+ status = UpdateRegs(state, EP3, EP4);
+ if (status < 0)
+ break;
+ state->m_Regs[EB18] = (state->m_Regs[EB18] & ~0x03); /* AGC 1 Gain = 0 */
+ status = UpdateReg(state, EB18);
+ if (status < 0)
+ break;
+ state->m_Regs[EB21] = (state->m_Regs[EB21] & ~0x03); /* AGC 2 Gain = 0 (Datasheet = 3) */
+ state->m_Regs[EB23] = (state->m_Regs[EB23] | 0x06); /* ForceLP_Fc2_En = 1, LPFc[2] = 1 */
+ status = UpdateRegs(state, EB21, EB23);
+ if (status < 0)
+ break;
+ } while (0);
+ return status;
+}
+
+static int CalcRFFilterCurve(struct tda_state *state)
+{
+ int status = 0;
+ do {
+ msleep(200); /* Temperature stabilisation */
+ status = PowerScanInit(state);
+ if (status < 0)
+ break;
+ status = RFTrackingFiltersInit(state, 0);
+ if (status < 0)
+ break;
+ status = RFTrackingFiltersInit(state, 1);
+ if (status < 0)
+ break;
+ status = RFTrackingFiltersInit(state, 2);
+ if (status < 0)
+ break;
+ status = RFTrackingFiltersInit(state, 3);
+ if (status < 0)
+ break;
+ status = RFTrackingFiltersInit(state, 4);
+ if (status < 0)
+ break;
+ status = RFTrackingFiltersInit(state, 5);
+ if (status < 0)
+ break;
+ status = RFTrackingFiltersInit(state, 6);
+ if (status < 0)
+ break;
+ status = ThermometerRead(state, &state->m_TMValue_RFCal); /* also switches off Cal mode !!! */
+ if (status < 0)
+ break;
+ } while (0);
+
+ return status;
+}
+
+static int FixedContentsI2CUpdate(struct tda_state *state)
+{
+ static u8 InitRegs[] = {
+ 0x08, 0x80, 0xC6,
+ 0xDF, 0x16, 0x60, 0x80,
+ 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0xFC, 0x01, 0x84, 0x41,
+ 0x01, 0x84, 0x40, 0x07,
+ 0x00, 0x00, 0x96, 0x3F,
+ 0xC1, 0x00, 0x8F, 0x00,
+ 0x00, 0x8C, 0x00, 0x20,
+ 0xB3, 0x48, 0xB0,
+ };
+ int status = 0;
+ memcpy(&state->m_Regs[TM], InitRegs, EB23 - TM + 1);
+ do {
+ status = UpdateRegs(state, TM, EB23);
+ if (status < 0)
+ break;
+
+ /* AGC1 gain setup */
+ state->m_Regs[EB17] = 0x00;
+ status = UpdateReg(state, EB17);
+ if (status < 0)
+ break;
+ state->m_Regs[EB17] = 0x03;
+ status = UpdateReg(state, EB17);
+ if (status < 0)
+ break;
+ state->m_Regs[EB17] = 0x43;
+ status = UpdateReg(state, EB17);
+ if (status < 0)
+ break;
+ state->m_Regs[EB17] = 0x4C;
+ status = UpdateReg(state, EB17);
+ if (status < 0)
+ break;
+
+ /* IRC Cal Low band */
+ state->m_Regs[EP3] = 0x1F;
+ state->m_Regs[EP4] = 0x66;
+ state->m_Regs[EP5] = 0x81;
+ state->m_Regs[CPD] = 0xCC;
+ state->m_Regs[CD1] = 0x6C;
+ state->m_Regs[CD2] = 0x00;
+ state->m_Regs[CD3] = 0x00;
+ state->m_Regs[MPD] = 0xC5;
+ state->m_Regs[MD1] = 0x77;
+ state->m_Regs[MD2] = 0x08;
+ state->m_Regs[MD3] = 0x00;
+ status = UpdateRegs(state, EP2, MD3); /* diff between sw and datasheet (ep3-md3) */
+ if (status < 0)
+ break;
+
+#if 0
+ state->m_Regs[EB4] = 0x61; /* missing in sw */
+ status = UpdateReg(state, EB4);
+ if (status < 0)
+ break;
+ msleep(1);
+ state->m_Regs[EB4] = 0x41;
+ status = UpdateReg(state, EB4);
+ if (status < 0)
+ break;
+#endif
+
+ msleep(5);
+ status = UpdateReg(state, EP1);
+ if (status < 0)
+ break;
+ msleep(5);
+
+ state->m_Regs[EP5] = 0x85;
+ state->m_Regs[CPD] = 0xCB;
+ state->m_Regs[CD1] = 0x66;
+ state->m_Regs[CD2] = 0x70;
+ status = UpdateRegs(state, EP3, CD3);
+ if (status < 0)
+ break;
+ msleep(5);
+ status = UpdateReg(state, EP2);
+ if (status < 0)
+ break;
+ msleep(30);
+
+ /* IRC Cal mid band */
+ state->m_Regs[EP5] = 0x82;
+ state->m_Regs[CPD] = 0xA8;
+ state->m_Regs[CD2] = 0x00;
+ state->m_Regs[MPD] = 0xA1; /* Datasheet = 0xA9 */
+ state->m_Regs[MD1] = 0x73;
+ state->m_Regs[MD2] = 0x1A;
+ status = UpdateRegs(state, EP3, MD3);
+ if (status < 0)
+ break;
+
+ msleep(5);
+ status = UpdateReg(state, EP1);
+ if (status < 0)
+ break;
+ msleep(5);
+
+ state->m_Regs[EP5] = 0x86;
+ state->m_Regs[CPD] = 0xA8;
+ state->m_Regs[CD1] = 0x66;
+ state->m_Regs[CD2] = 0xA0;
+ status = UpdateRegs(state, EP3, CD3);
+ if (status < 0)
+ break;
+ msleep(5);
+ status = UpdateReg(state, EP2);
+ if (status < 0)
+ break;
+ msleep(30);
+
+ /* IRC Cal high band */
+ state->m_Regs[EP5] = 0x83;
+ state->m_Regs[CPD] = 0x98;
+ state->m_Regs[CD1] = 0x65;
+ state->m_Regs[CD2] = 0x00;
+ state->m_Regs[MPD] = 0x91; /* Datasheet = 0x91 */
+ state->m_Regs[MD1] = 0x71;
+ state->m_Regs[MD2] = 0xCD;
+ status = UpdateRegs(state, EP3, MD3);
+ if (status < 0)
+ break;
+ msleep(5);
+ status = UpdateReg(state, EP1);
+ if (status < 0)
+ break;
+ msleep(5);
+ state->m_Regs[EP5] = 0x87;
+ state->m_Regs[CD1] = 0x65;
+ state->m_Regs[CD2] = 0x50;
+ status = UpdateRegs(state, EP3, CD3);
+ if (status < 0)
+ break;
+ msleep(5);
+ status = UpdateReg(state, EP2);
+ if (status < 0)
+ break;
+ msleep(30);
+
+ /* Back to normal */
+ state->m_Regs[EP4] = 0x64;
+ status = UpdateReg(state, EP4);
+ if (status < 0)
+ break;
+ status = UpdateReg(state, EP1);
+ if (status < 0)
+ break;
+
+ } while (0);
+ return status;
+}
+
+static int InitCal(struct tda_state *state)
+{
+ int status = 0;
+
+ do {
+ status = FixedContentsI2CUpdate(state);
+ if (status < 0)
+ break;
+ status = CalcRFFilterCurve(state);
+ if (status < 0)
+ break;
+ status = StandBy(state);
+ if (status < 0)
+ break;
+ /* m_bInitDone = true; */
+ } while (0);
+ return status;
+};
+
+static int RFTrackingFiltersCorrection(struct tda_state *state,
+ u32 Frequency)
+{
+ int status = 0;
+ s32 Cprog_table;
+ u8 RFBand;
+ u8 dCoverdT;
+
+ if (!SearchMap2(m_RF_Cal_Map, Frequency, &Cprog_table) ||
+ !SearchMap4(m_RF_Band_Map, Frequency, &RFBand) ||
+ !SearchMap1(m_RF_Cal_DC_Over_DT_Map, Frequency, &dCoverdT))
+
+ return -EINVAL;
+
+ do {
+ u8 TMValue_Current;
+ u32 RF1 = state->m_RF1[RFBand];
+ u32 RF2 = state->m_RF1[RFBand];
+ u32 RF3 = state->m_RF1[RFBand];
+ s32 RF_A1 = state->m_RF_A1[RFBand];
+ s32 RF_B1 = state->m_RF_B1[RFBand];
+ s32 RF_A2 = state->m_RF_A2[RFBand];
+ s32 RF_B2 = state->m_RF_B2[RFBand];
+ s32 Capprox = 0;
+ int TComp;
+
+ state->m_Regs[EP3] &= ~0xE0; /* Power up */
+ status = UpdateReg(state, EP3);
+ if (status < 0)
+ break;
+
+ status = ThermometerRead(state, &TMValue_Current);
+ if (status < 0)
+ break;
+
+ if (RF3 == 0 || Frequency < RF2)
+ Capprox = RF_A1 * ((s32)(Frequency) - (s32)(RF1)) + RF_B1 + Cprog_table;
+ else
+ Capprox = RF_A2 * ((s32)(Frequency) - (s32)(RF2)) + RF_B2 + Cprog_table;
+
+ TComp = (int)(dCoverdT) * ((int)(TMValue_Current) - (int)(state->m_TMValue_RFCal))/1000;
+
+ Capprox += TComp;
+
+ if (Capprox < 0)
+ Capprox = 0;
+ else if (Capprox > 255)
+ Capprox = 255;
+
+
+ /* TODO Temperature compensation. There is defenitely a scale factor */
+ /* missing in the datasheet, so leave it out for now. */
+ state->m_Regs[EB14] = Capprox;
+
+ status = UpdateReg(state, EB14);
+ if (status < 0)
+ break;
+
+ } while (0);
+ return status;
+}
+
+static int ChannelConfiguration(struct tda_state *state,
+ u32 Frequency, int Standard)
+{
+
+ s32 IntermediateFrequency = m_StandardTable[Standard].m_IFFrequency;
+ int status = 0;
+
+ u8 BP_Filter = 0;
+ u8 RF_Band = 0;
+ u8 GainTaper = 0;
+ u8 IR_Meas = 0;
+
+ state->IF = IntermediateFrequency;
+ /* printk("tda18271c2dd: %s Freq = %d Standard = %d IF = %d\n", __func__, Frequency, Standard, IntermediateFrequency); */
+ /* get values from tables */
+
+ if (!(SearchMap1(m_BP_Filter_Map, Frequency, &BP_Filter) &&
+ SearchMap1(m_GainTaper_Map, Frequency, &GainTaper) &&
+ SearchMap1(m_IR_Meas_Map, Frequency, &IR_Meas) &&
+ SearchMap4(m_RF_Band_Map, Frequency, &RF_Band))) {
+
+ printk(KERN_ERR "tda18271c2dd: %s SearchMap failed\n", __func__);
+ return -EINVAL;
+ }
+
+ do {
+ state->m_Regs[EP3] = (state->m_Regs[EP3] & ~0x1F) | m_StandardTable[Standard].m_EP3_4_0;
+ state->m_Regs[EP3] &= ~0x04; /* switch RFAGC to high speed mode */
+
+ /* m_EP4 default for XToutOn, CAL_Mode (0) */
+ state->m_Regs[EP4] = state->m_EP4 | ((Standard > HF_AnalogMax) ? state->m_IFLevelDigital : state->m_IFLevelAnalog);
+ /* state->m_Regs[EP4] = state->m_EP4 | state->m_IFLevelDigital; */
+ if (Standard <= HF_AnalogMax)
+ state->m_Regs[EP4] = state->m_EP4 | state->m_IFLevelAnalog;
+ else if (Standard <= HF_ATSC)
+ state->m_Regs[EP4] = state->m_EP4 | state->m_IFLevelDVBT;
+ else if (Standard <= HF_DVBC)
+ state->m_Regs[EP4] = state->m_EP4 | state->m_IFLevelDVBC;
+ else
+ state->m_Regs[EP4] = state->m_EP4 | state->m_IFLevelDigital;
+
+ if ((Standard == HF_FM_Radio) && state->m_bFMInput)
+ state->m_Regs[EP4] |= 80;
+
+ state->m_Regs[MPD] &= ~0x80;
+ if (Standard > HF_AnalogMax)
+ state->m_Regs[MPD] |= 0x80; /* Add IF_notch for digital */
+
+ state->m_Regs[EB22] = m_StandardTable[Standard].m_EB22;
+
+ /* Note: This is missing from flowchart in TDA18271 specification ( 1.5 MHz cutoff for FM ) */
+ if (Standard == HF_FM_Radio)
+ state->m_Regs[EB23] |= 0x06; /* ForceLP_Fc2_En = 1, LPFc[2] = 1 */
+ else
+ state->m_Regs[EB23] &= ~0x06; /* ForceLP_Fc2_En = 0, LPFc[2] = 0 */
+
+ status = UpdateRegs(state, EB22, EB23);
+ if (status < 0)
+ break;
+
+ state->m_Regs[EP1] = (state->m_Regs[EP1] & ~0x07) | 0x40 | BP_Filter; /* Dis_Power_level = 1, Filter */
+ state->m_Regs[EP5] = (state->m_Regs[EP5] & ~0x07) | IR_Meas;
+ state->m_Regs[EP2] = (RF_Band << 5) | GainTaper;
+
+ state->m_Regs[EB1] = (state->m_Regs[EB1] & ~0x07) |
+ (state->m_bMaster ? 0x04 : 0x00); /* CALVCO_FortLOn = MS */
+ /* AGC1_always_master = 0 */
+ /* AGC_firstn = 0 */
+ status = UpdateReg(state, EB1);
+ if (status < 0)
+ break;
+
+ if (state->m_bMaster) {
+ status = CalcMainPLL(state, Frequency + IntermediateFrequency);
+ if (status < 0)
+ break;
+ status = UpdateRegs(state, TM, EP5);
+ if (status < 0)
+ break;
+ state->m_Regs[EB4] |= 0x20; /* LO_forceSrce = 1 */
+ status = UpdateReg(state, EB4);
+ if (status < 0)
+ break;
+ msleep(1);
+ state->m_Regs[EB4] &= ~0x20; /* LO_forceSrce = 0 */
+ status = UpdateReg(state, EB4);
+ if (status < 0)
+ break;
+ } else {
+ u8 PostDiv = 0;
+ u8 Div;
+ status = CalcCalPLL(state, Frequency + IntermediateFrequency);
+ if (status < 0)
+ break;
+
+ SearchMap3(m_Cal_PLL_Map, Frequency + IntermediateFrequency, &PostDiv, &Div);
+ state->m_Regs[MPD] = (state->m_Regs[MPD] & ~0x7F) | (PostDiv & 0x77);
+ status = UpdateReg(state, MPD);
+ if (status < 0)
+ break;
+ status = UpdateRegs(state, TM, EP5);
+ if (status < 0)
+ break;
+
+ state->m_Regs[EB7] |= 0x20; /* CAL_forceSrce = 1 */
+ status = UpdateReg(state, EB7);
+ if (status < 0)
+ break;
+ msleep(1);
+ state->m_Regs[EB7] &= ~0x20; /* CAL_forceSrce = 0 */
+ status = UpdateReg(state, EB7);
+ if (status < 0)
+ break;
+ }
+ msleep(20);
+ if (Standard != HF_FM_Radio)
+ state->m_Regs[EP3] |= 0x04; /* RFAGC to normal mode */
+ status = UpdateReg(state, EP3);
+ if (status < 0)
+ break;
+
+ } while (0);
+ return status;
+}
+
+static int sleep(struct dvb_frontend *fe)
+{
+ struct tda_state *state = fe->tuner_priv;
+
+ StandBy(state);
+ return 0;
+}
+
+static int init(struct dvb_frontend *fe)
+{
+ return 0;
+}
+
+static int release(struct dvb_frontend *fe)
+{
+ kfree(fe->tuner_priv);
+ fe->tuner_priv = NULL;
+ return 0;
+}
+
+/*
+ * As defined on EN 300 429 Annex A and on ITU-T J.83 annex A, the DVB-C
+ * roll-off factor is 0.15.
+ * According with the specs, the amount of the needed bandwith is given by:
+ * Bw = Symbol_rate * (1 + 0.15)
+ * As such, the maximum symbol rate supported by 6 MHz is
+ * max_symbol_rate = 6 MHz / 1.15 = 5217391 Bauds
+ *NOTE: For ITU-T J.83 Annex C, the roll-off factor is 0.13. So:
+ * max_symbol_rate = 6 MHz / 1.13 = 5309735 Baud
+ * That means that an adjustment is needed for Japan,
+ * but, as currently DRX-K is hardcoded to Annex A, let's stick
+ * with 0.15 roll-off factor.
+ */
+#define MAX_SYMBOL_RATE_6MHz 5217391
+
+static int set_params(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *params)
+{
+ struct tda_state *state = fe->tuner_priv;
+ int status = 0;
+ int Standard;
+
+ state->m_Frequency = params->frequency;
+
+ if (fe->ops.info.type == FE_OFDM)
+ switch (params->u.ofdm.bandwidth) {
+ case BANDWIDTH_6_MHZ:
+ Standard = HF_DVBT_6MHZ;
+ break;
+ case BANDWIDTH_7_MHZ:
+ Standard = HF_DVBT_7MHZ;
+ break;
+ default:
+ case BANDWIDTH_8_MHZ:
+ Standard = HF_DVBT_8MHZ;
+ break;
+ }
+ else if (fe->ops.info.type == FE_QAM) {
+ if (params->u.qam.symbol_rate <= MAX_SYMBOL_RATE_6MHz)
+ Standard = HF_DVBC_6MHZ;
+ else
+ Standard = HF_DVBC_8MHZ;
+ } else
+ return -EINVAL;
+ do {
+ status = RFTrackingFiltersCorrection(state, params->frequency);
+ if (status < 0)
+ break;
+ status = ChannelConfiguration(state, params->frequency, Standard);
+ if (status < 0)
+ break;
+
+ msleep(state->m_SettlingTime); /* Allow AGC's to settle down */
+ } while (0);
+ return status;
+}
+
+#if 0
+static int GetSignalStrength(s32 *pSignalStrength, u32 RFAgc, u32 IFAgc)
+{
+ if (IFAgc < 500) {
+ /* Scale this from 0 to 50000 */
+ *pSignalStrength = IFAgc * 100;
+ } else {
+ /* Scale range 500-1500 to 50000-80000 */
+ *pSignalStrength = 50000 + (IFAgc - 500) * 30;
+ }
+
+ return 0;
+}
+#endif
+
+static int get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct tda_state *state = fe->tuner_priv;
+
+ *frequency = state->IF;
+ return 0;
+}
+
+static int get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
+{
+ /* struct tda_state *state = fe->tuner_priv; */
+ /* *bandwidth = priv->bandwidth; */
+ return 0;
+}
+
+
+static struct dvb_tuner_ops tuner_ops = {
+ .info = {
+ .name = "NXP TDA18271C2D",
+ .frequency_min = 47125000,
+ .frequency_max = 865000000,
+ .frequency_step = 62500
+ },
+ .init = init,
+ .sleep = sleep,
+ .set_params = set_params,
+ .release = release,
+ .get_frequency = get_frequency,
+ .get_bandwidth = get_bandwidth,
+};
+
+struct dvb_frontend *tda18271c2dd_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, u8 adr)
+{
+ struct tda_state *state;
+
+ state = kzalloc(sizeof(struct tda_state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ fe->tuner_priv = state;
+ state->adr = adr;
+ state->i2c = i2c;
+ memcpy(&fe->ops.tuner_ops, &tuner_ops, sizeof(struct dvb_tuner_ops));
+ reset(state);
+ InitCal(state);
+
+ return fe;
+}
+EXPORT_SYMBOL_GPL(tda18271c2dd_attach);
+
+MODULE_DESCRIPTION("TDA18271C2 driver");
+MODULE_AUTHOR("DD");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/tda18271c2dd.h b/drivers/media/dvb/frontends/tda18271c2dd.h
new file mode 100644
index 0000000..1389c74
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda18271c2dd.h
@@ -0,0 +1,16 @@
+#ifndef _TDA18271C2DD_H_
+#define _TDA18271C2DD_H_
+#if defined(CONFIG_DVB_TDA18271C2DD) || (defined(CONFIG_DVB_TDA18271C2DD_MODULE) \
+ && defined(MODULE))
+struct dvb_frontend *tda18271c2dd_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, u8 adr);
+#else
+static inline struct dvb_frontend *tda18271c2dd_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, u8 adr)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/dvb/frontends/tda18271c2dd_maps.h b/drivers/media/dvb/frontends/tda18271c2dd_maps.h
new file mode 100644
index 0000000..b87661b
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda18271c2dd_maps.h
@@ -0,0 +1,814 @@
+enum HF_S {
+ HF_None = 0, HF_B, HF_DK, HF_G, HF_I, HF_L, HF_L1, HF_MN, HF_FM_Radio,
+ HF_AnalogMax, HF_DVBT_6MHZ, HF_DVBT_7MHZ, HF_DVBT_8MHZ,
+ HF_DVBT, HF_ATSC, HF_DVBC_6MHZ, HF_DVBC_7MHZ,
+ HF_DVBC_8MHZ, HF_DVBC
+};
+
+struct SStandardParam m_StandardTable[] = {
+ { 0, 0, 0x00, 0x00 }, /* HF_None */
+ { 6000000, 7000000, 0x1D, 0x2C }, /* HF_B, */
+ { 6900000, 8000000, 0x1E, 0x2C }, /* HF_DK, */
+ { 7100000, 8000000, 0x1E, 0x2C }, /* HF_G, */
+ { 7250000, 8000000, 0x1E, 0x2C }, /* HF_I, */
+ { 6900000, 8000000, 0x1E, 0x2C }, /* HF_L, */
+ { 1250000, 8000000, 0x1E, 0x2C }, /* HF_L1, */
+ { 5400000, 6000000, 0x1C, 0x2C }, /* HF_MN, */
+ { 1250000, 500000, 0x18, 0x2C }, /* HF_FM_Radio, */
+ { 0, 0, 0x00, 0x00 }, /* HF_AnalogMax (Unused) */
+ { 3300000, 6000000, 0x1C, 0x58 }, /* HF_DVBT_6MHZ */
+ { 3500000, 7000000, 0x1C, 0x37 }, /* HF_DVBT_7MHZ */
+ { 4000000, 8000000, 0x1D, 0x37 }, /* HF_DVBT_8MHZ */
+ { 0, 0, 0x00, 0x00 }, /* HF_DVBT (Unused) */
+ { 5000000, 6000000, 0x1C, 0x37 }, /* HF_ATSC (center = 3.25 MHz) */
+ { 4000000, 6000000, 0x1D, 0x58 }, /* HF_DVBC_6MHZ (Chicago) */
+ { 4500000, 7000000, 0x1E, 0x37 }, /* HF_DVBC_7MHZ (not documented by NXP) */
+ { 5000000, 8000000, 0x1F, 0x37 }, /* HF_DVBC_8MHZ */
+ { 0, 0, 0x00, 0x00 }, /* HF_DVBC (Unused) */
+};
+
+struct SMap m_BP_Filter_Map[] = {
+ { 62000000, 0x00 },
+ { 84000000, 0x01 },
+ { 100000000, 0x02 },
+ { 140000000, 0x03 },
+ { 170000000, 0x04 },
+ { 180000000, 0x05 },
+ { 865000000, 0x06 },
+ { 0, 0x00 }, /* Table End */
+};
+
+static struct SMapI m_RF_Cal_Map[] = {
+ { 41000000, 0x0F },
+ { 43000000, 0x1C },
+ { 45000000, 0x2F },
+ { 46000000, 0x39 },
+ { 47000000, 0x40 },
+ { 47900000, 0x50 },
+ { 49100000, 0x16 },
+ { 50000000, 0x18 },
+ { 51000000, 0x20 },
+ { 53000000, 0x28 },
+ { 55000000, 0x2B },
+ { 56000000, 0x32 },
+ { 57000000, 0x35 },
+ { 58000000, 0x3E },
+ { 59000000, 0x43 },
+ { 60000000, 0x4E },
+ { 61100000, 0x55 },
+ { 63000000, 0x0F },
+ { 64000000, 0x11 },
+ { 65000000, 0x12 },
+ { 66000000, 0x15 },
+ { 67000000, 0x16 },
+ { 68000000, 0x17 },
+ { 70000000, 0x19 },
+ { 71000000, 0x1C },
+ { 72000000, 0x1D },
+ { 73000000, 0x1F },
+ { 74000000, 0x20 },
+ { 75000000, 0x21 },
+ { 76000000, 0x24 },
+ { 77000000, 0x25 },
+ { 78000000, 0x27 },
+ { 80000000, 0x28 },
+ { 81000000, 0x29 },
+ { 82000000, 0x2D },
+ { 83000000, 0x2E },
+ { 84000000, 0x2F },
+ { 85000000, 0x31 },
+ { 86000000, 0x33 },
+ { 87000000, 0x34 },
+ { 88000000, 0x35 },
+ { 89000000, 0x37 },
+ { 90000000, 0x38 },
+ { 91000000, 0x39 },
+ { 93000000, 0x3C },
+ { 94000000, 0x3E },
+ { 95000000, 0x3F },
+ { 96000000, 0x40 },
+ { 97000000, 0x42 },
+ { 99000000, 0x45 },
+ { 100000000, 0x46 },
+ { 102000000, 0x48 },
+ { 103000000, 0x4A },
+ { 105000000, 0x4D },
+ { 106000000, 0x4E },
+ { 107000000, 0x50 },
+ { 108000000, 0x51 },
+ { 110000000, 0x54 },
+ { 111000000, 0x56 },
+ { 112000000, 0x57 },
+ { 113000000, 0x58 },
+ { 114000000, 0x59 },
+ { 115000000, 0x5C },
+ { 116000000, 0x5D },
+ { 117000000, 0x5F },
+ { 119000000, 0x60 },
+ { 120000000, 0x64 },
+ { 121000000, 0x65 },
+ { 122000000, 0x66 },
+ { 123000000, 0x68 },
+ { 124000000, 0x69 },
+ { 125000000, 0x6C },
+ { 126000000, 0x6D },
+ { 127000000, 0x6E },
+ { 128000000, 0x70 },
+ { 129000000, 0x71 },
+ { 130000000, 0x75 },
+ { 131000000, 0x77 },
+ { 132000000, 0x78 },
+ { 133000000, 0x7B },
+ { 134000000, 0x7E },
+ { 135000000, 0x81 },
+ { 136000000, 0x82 },
+ { 137000000, 0x87 },
+ { 138000000, 0x88 },
+ { 139000000, 0x8D },
+ { 140000000, 0x8E },
+ { 141000000, 0x91 },
+ { 142000000, 0x95 },
+ { 143000000, 0x9A },
+ { 144000000, 0x9D },
+ { 145000000, 0xA1 },
+ { 146000000, 0xA2 },
+ { 147000000, 0xA4 },
+ { 148000000, 0xA9 },
+ { 149000000, 0xAE },
+ { 150000000, 0xB0 },
+ { 151000000, 0xB1 },
+ { 152000000, 0xB7 },
+ { 152600000, 0xBD },
+ { 154000000, 0x20 },
+ { 155000000, 0x22 },
+ { 156000000, 0x24 },
+ { 157000000, 0x25 },
+ { 158000000, 0x27 },
+ { 159000000, 0x29 },
+ { 160000000, 0x2C },
+ { 161000000, 0x2D },
+ { 163000000, 0x2E },
+ { 164000000, 0x2F },
+ { 164700000, 0x30 },
+ { 166000000, 0x11 },
+ { 167000000, 0x12 },
+ { 168000000, 0x13 },
+ { 169000000, 0x14 },
+ { 170000000, 0x15 },
+ { 172000000, 0x16 },
+ { 173000000, 0x17 },
+ { 174000000, 0x18 },
+ { 175000000, 0x1A },
+ { 176000000, 0x1B },
+ { 178000000, 0x1D },
+ { 179000000, 0x1E },
+ { 180000000, 0x1F },
+ { 181000000, 0x20 },
+ { 182000000, 0x21 },
+ { 183000000, 0x22 },
+ { 184000000, 0x24 },
+ { 185000000, 0x25 },
+ { 186000000, 0x26 },
+ { 187000000, 0x27 },
+ { 188000000, 0x29 },
+ { 189000000, 0x2A },
+ { 190000000, 0x2C },
+ { 191000000, 0x2D },
+ { 192000000, 0x2E },
+ { 193000000, 0x2F },
+ { 194000000, 0x30 },
+ { 195000000, 0x33 },
+ { 196000000, 0x35 },
+ { 198000000, 0x36 },
+ { 200000000, 0x38 },
+ { 201000000, 0x3C },
+ { 202000000, 0x3D },
+ { 203500000, 0x3E },
+ { 206000000, 0x0E },
+ { 208000000, 0x0F },
+ { 212000000, 0x10 },
+ { 216000000, 0x11 },
+ { 217000000, 0x12 },
+ { 218000000, 0x13 },
+ { 220000000, 0x14 },
+ { 222000000, 0x15 },
+ { 225000000, 0x16 },
+ { 228000000, 0x17 },
+ { 231000000, 0x18 },
+ { 234000000, 0x19 },
+ { 235000000, 0x1A },
+ { 236000000, 0x1B },
+ { 237000000, 0x1C },
+ { 240000000, 0x1D },
+ { 242000000, 0x1E },
+ { 244000000, 0x1F },
+ { 247000000, 0x20 },
+ { 249000000, 0x21 },
+ { 252000000, 0x22 },
+ { 253000000, 0x23 },
+ { 254000000, 0x24 },
+ { 256000000, 0x25 },
+ { 259000000, 0x26 },
+ { 262000000, 0x27 },
+ { 264000000, 0x28 },
+ { 267000000, 0x29 },
+ { 269000000, 0x2A },
+ { 271000000, 0x2B },
+ { 273000000, 0x2C },
+ { 275000000, 0x2D },
+ { 277000000, 0x2E },
+ { 279000000, 0x2F },
+ { 282000000, 0x30 },
+ { 284000000, 0x31 },
+ { 286000000, 0x32 },
+ { 287000000, 0x33 },
+ { 290000000, 0x34 },
+ { 293000000, 0x35 },
+ { 295000000, 0x36 },
+ { 297000000, 0x37 },
+ { 300000000, 0x38 },
+ { 303000000, 0x39 },
+ { 305000000, 0x3A },
+ { 306000000, 0x3B },
+ { 307000000, 0x3C },
+ { 310000000, 0x3D },
+ { 312000000, 0x3E },
+ { 315000000, 0x3F },
+ { 318000000, 0x40 },
+ { 320000000, 0x41 },
+ { 323000000, 0x42 },
+ { 324000000, 0x43 },
+ { 325000000, 0x44 },
+ { 327000000, 0x45 },
+ { 331000000, 0x46 },
+ { 334000000, 0x47 },
+ { 337000000, 0x48 },
+ { 339000000, 0x49 },
+ { 340000000, 0x4A },
+ { 341000000, 0x4B },
+ { 343000000, 0x4C },
+ { 345000000, 0x4D },
+ { 349000000, 0x4E },
+ { 352000000, 0x4F },
+ { 353000000, 0x50 },
+ { 355000000, 0x51 },
+ { 357000000, 0x52 },
+ { 359000000, 0x53 },
+ { 361000000, 0x54 },
+ { 362000000, 0x55 },
+ { 364000000, 0x56 },
+ { 368000000, 0x57 },
+ { 370000000, 0x58 },
+ { 372000000, 0x59 },
+ { 375000000, 0x5A },
+ { 376000000, 0x5B },
+ { 377000000, 0x5C },
+ { 379000000, 0x5D },
+ { 382000000, 0x5E },
+ { 384000000, 0x5F },
+ { 385000000, 0x60 },
+ { 386000000, 0x61 },
+ { 388000000, 0x62 },
+ { 390000000, 0x63 },
+ { 393000000, 0x64 },
+ { 394000000, 0x65 },
+ { 396000000, 0x66 },
+ { 397000000, 0x67 },
+ { 398000000, 0x68 },
+ { 400000000, 0x69 },
+ { 402000000, 0x6A },
+ { 403000000, 0x6B },
+ { 407000000, 0x6C },
+ { 408000000, 0x6D },
+ { 409000000, 0x6E },
+ { 410000000, 0x6F },
+ { 411000000, 0x70 },
+ { 412000000, 0x71 },
+ { 413000000, 0x72 },
+ { 414000000, 0x73 },
+ { 417000000, 0x74 },
+ { 418000000, 0x75 },
+ { 420000000, 0x76 },
+ { 422000000, 0x77 },
+ { 423000000, 0x78 },
+ { 424000000, 0x79 },
+ { 427000000, 0x7A },
+ { 428000000, 0x7B },
+ { 429000000, 0x7D },
+ { 432000000, 0x7F },
+ { 434000000, 0x80 },
+ { 435000000, 0x81 },
+ { 436000000, 0x83 },
+ { 437000000, 0x84 },
+ { 438000000, 0x85 },
+ { 439000000, 0x86 },
+ { 440000000, 0x87 },
+ { 441000000, 0x88 },
+ { 442000000, 0x89 },
+ { 445000000, 0x8A },
+ { 446000000, 0x8B },
+ { 447000000, 0x8C },
+ { 448000000, 0x8E },
+ { 449000000, 0x8F },
+ { 450000000, 0x90 },
+ { 452000000, 0x91 },
+ { 453000000, 0x93 },
+ { 454000000, 0x94 },
+ { 456000000, 0x96 },
+ { 457800000, 0x98 },
+ { 461000000, 0x11 },
+ { 468000000, 0x12 },
+ { 472000000, 0x13 },
+ { 473000000, 0x14 },
+ { 474000000, 0x15 },
+ { 481000000, 0x16 },
+ { 486000000, 0x17 },
+ { 491000000, 0x18 },
+ { 498000000, 0x19 },
+ { 499000000, 0x1A },
+ { 501000000, 0x1B },
+ { 506000000, 0x1C },
+ { 511000000, 0x1D },
+ { 516000000, 0x1E },
+ { 520000000, 0x1F },
+ { 521000000, 0x20 },
+ { 525000000, 0x21 },
+ { 529000000, 0x22 },
+ { 533000000, 0x23 },
+ { 539000000, 0x24 },
+ { 541000000, 0x25 },
+ { 547000000, 0x26 },
+ { 549000000, 0x27 },
+ { 551000000, 0x28 },
+ { 556000000, 0x29 },
+ { 561000000, 0x2A },
+ { 563000000, 0x2B },
+ { 565000000, 0x2C },
+ { 569000000, 0x2D },
+ { 571000000, 0x2E },
+ { 577000000, 0x2F },
+ { 580000000, 0x30 },
+ { 582000000, 0x31 },
+ { 584000000, 0x32 },
+ { 588000000, 0x33 },
+ { 591000000, 0x34 },
+ { 596000000, 0x35 },
+ { 598000000, 0x36 },
+ { 603000000, 0x37 },
+ { 604000000, 0x38 },
+ { 606000000, 0x39 },
+ { 612000000, 0x3A },
+ { 615000000, 0x3B },
+ { 617000000, 0x3C },
+ { 621000000, 0x3D },
+ { 622000000, 0x3E },
+ { 625000000, 0x3F },
+ { 632000000, 0x40 },
+ { 633000000, 0x41 },
+ { 634000000, 0x42 },
+ { 642000000, 0x43 },
+ { 643000000, 0x44 },
+ { 647000000, 0x45 },
+ { 650000000, 0x46 },
+ { 652000000, 0x47 },
+ { 657000000, 0x48 },
+ { 661000000, 0x49 },
+ { 662000000, 0x4A },
+ { 665000000, 0x4B },
+ { 667000000, 0x4C },
+ { 670000000, 0x4D },
+ { 673000000, 0x4E },
+ { 676000000, 0x4F },
+ { 677000000, 0x50 },
+ { 681000000, 0x51 },
+ { 683000000, 0x52 },
+ { 686000000, 0x53 },
+ { 688000000, 0x54 },
+ { 689000000, 0x55 },
+ { 691000000, 0x56 },
+ { 695000000, 0x57 },
+ { 698000000, 0x58 },
+ { 703000000, 0x59 },
+ { 704000000, 0x5A },
+ { 705000000, 0x5B },
+ { 707000000, 0x5C },
+ { 710000000, 0x5D },
+ { 712000000, 0x5E },
+ { 717000000, 0x5F },
+ { 718000000, 0x60 },
+ { 721000000, 0x61 },
+ { 722000000, 0x62 },
+ { 723000000, 0x63 },
+ { 725000000, 0x64 },
+ { 727000000, 0x65 },
+ { 730000000, 0x66 },
+ { 732000000, 0x67 },
+ { 735000000, 0x68 },
+ { 740000000, 0x69 },
+ { 741000000, 0x6A },
+ { 742000000, 0x6B },
+ { 743000000, 0x6C },
+ { 745000000, 0x6D },
+ { 747000000, 0x6E },
+ { 748000000, 0x6F },
+ { 750000000, 0x70 },
+ { 752000000, 0x71 },
+ { 754000000, 0x72 },
+ { 757000000, 0x73 },
+ { 758000000, 0x74 },
+ { 760000000, 0x75 },
+ { 763000000, 0x76 },
+ { 764000000, 0x77 },
+ { 766000000, 0x78 },
+ { 767000000, 0x79 },
+ { 768000000, 0x7A },
+ { 773000000, 0x7B },
+ { 774000000, 0x7C },
+ { 776000000, 0x7D },
+ { 777000000, 0x7E },
+ { 778000000, 0x7F },
+ { 779000000, 0x80 },
+ { 781000000, 0x81 },
+ { 783000000, 0x82 },
+ { 784000000, 0x83 },
+ { 785000000, 0x84 },
+ { 786000000, 0x85 },
+ { 793000000, 0x86 },
+ { 794000000, 0x87 },
+ { 795000000, 0x88 },
+ { 797000000, 0x89 },
+ { 799000000, 0x8A },
+ { 801000000, 0x8B },
+ { 802000000, 0x8C },
+ { 803000000, 0x8D },
+ { 804000000, 0x8E },
+ { 810000000, 0x90 },
+ { 811000000, 0x91 },
+ { 812000000, 0x92 },
+ { 814000000, 0x93 },
+ { 816000000, 0x94 },
+ { 817000000, 0x96 },
+ { 818000000, 0x97 },
+ { 820000000, 0x98 },
+ { 821000000, 0x99 },
+ { 822000000, 0x9A },
+ { 828000000, 0x9B },
+ { 829000000, 0x9D },
+ { 830000000, 0x9F },
+ { 831000000, 0xA0 },
+ { 833000000, 0xA1 },
+ { 835000000, 0xA2 },
+ { 836000000, 0xA3 },
+ { 837000000, 0xA4 },
+ { 838000000, 0xA6 },
+ { 840000000, 0xA8 },
+ { 842000000, 0xA9 },
+ { 845000000, 0xAA },
+ { 846000000, 0xAB },
+ { 847000000, 0xAD },
+ { 848000000, 0xAE },
+ { 852000000, 0xAF },
+ { 853000000, 0xB0 },
+ { 858000000, 0xB1 },
+ { 860000000, 0xB2 },
+ { 861000000, 0xB3 },
+ { 862000000, 0xB4 },
+ { 863000000, 0xB6 },
+ { 864000000, 0xB8 },
+ { 865000000, 0xB9 },
+ { 0, 0x00 }, /* Table End */
+};
+
+
+static struct SMap2 m_KM_Map[] = {
+ { 47900000, 3, 2 },
+ { 61100000, 3, 1 },
+ { 350000000, 3, 0 },
+ { 720000000, 2, 1 },
+ { 865000000, 3, 3 },
+ { 0, 0x00 }, /* Table End */
+};
+
+static struct SMap2 m_Main_PLL_Map[] = {
+ { 33125000, 0x57, 0xF0 },
+ { 35500000, 0x56, 0xE0 },
+ { 38188000, 0x55, 0xD0 },
+ { 41375000, 0x54, 0xC0 },
+ { 45125000, 0x53, 0xB0 },
+ { 49688000, 0x52, 0xA0 },
+ { 55188000, 0x51, 0x90 },
+ { 62125000, 0x50, 0x80 },
+ { 66250000, 0x47, 0x78 },
+ { 71000000, 0x46, 0x70 },
+ { 76375000, 0x45, 0x68 },
+ { 82750000, 0x44, 0x60 },
+ { 90250000, 0x43, 0x58 },
+ { 99375000, 0x42, 0x50 },
+ { 110375000, 0x41, 0x48 },
+ { 124250000, 0x40, 0x40 },
+ { 132500000, 0x37, 0x3C },
+ { 142000000, 0x36, 0x38 },
+ { 152750000, 0x35, 0x34 },
+ { 165500000, 0x34, 0x30 },
+ { 180500000, 0x33, 0x2C },
+ { 198750000, 0x32, 0x28 },
+ { 220750000, 0x31, 0x24 },
+ { 248500000, 0x30, 0x20 },
+ { 265000000, 0x27, 0x1E },
+ { 284000000, 0x26, 0x1C },
+ { 305500000, 0x25, 0x1A },
+ { 331000000, 0x24, 0x18 },
+ { 361000000, 0x23, 0x16 },
+ { 397500000, 0x22, 0x14 },
+ { 441500000, 0x21, 0x12 },
+ { 497000000, 0x20, 0x10 },
+ { 530000000, 0x17, 0x0F },
+ { 568000000, 0x16, 0x0E },
+ { 611000000, 0x15, 0x0D },
+ { 662000000, 0x14, 0x0C },
+ { 722000000, 0x13, 0x0B },
+ { 795000000, 0x12, 0x0A },
+ { 883000000, 0x11, 0x09 },
+ { 994000000, 0x10, 0x08 },
+ { 0, 0x00, 0x00 }, /* Table End */
+};
+
+static struct SMap2 m_Cal_PLL_Map[] = {
+ { 33813000, 0xDD, 0xD0 },
+ { 36625000, 0xDC, 0xC0 },
+ { 39938000, 0xDB, 0xB0 },
+ { 43938000, 0xDA, 0xA0 },
+ { 48813000, 0xD9, 0x90 },
+ { 54938000, 0xD8, 0x80 },
+ { 62813000, 0xD3, 0x70 },
+ { 67625000, 0xCD, 0x68 },
+ { 73250000, 0xCC, 0x60 },
+ { 79875000, 0xCB, 0x58 },
+ { 87875000, 0xCA, 0x50 },
+ { 97625000, 0xC9, 0x48 },
+ { 109875000, 0xC8, 0x40 },
+ { 125625000, 0xC3, 0x38 },
+ { 135250000, 0xBD, 0x34 },
+ { 146500000, 0xBC, 0x30 },
+ { 159750000, 0xBB, 0x2C },
+ { 175750000, 0xBA, 0x28 },
+ { 195250000, 0xB9, 0x24 },
+ { 219750000, 0xB8, 0x20 },
+ { 251250000, 0xB3, 0x1C },
+ { 270500000, 0xAD, 0x1A },
+ { 293000000, 0xAC, 0x18 },
+ { 319500000, 0xAB, 0x16 },
+ { 351500000, 0xAA, 0x14 },
+ { 390500000, 0xA9, 0x12 },
+ { 439500000, 0xA8, 0x10 },
+ { 502500000, 0xA3, 0x0E },
+ { 541000000, 0x9D, 0x0D },
+ { 586000000, 0x9C, 0x0C },
+ { 639000000, 0x9B, 0x0B },
+ { 703000000, 0x9A, 0x0A },
+ { 781000000, 0x99, 0x09 },
+ { 879000000, 0x98, 0x08 },
+ { 0, 0x00, 0x00 }, /* Table End */
+};
+
+static struct SMap m_GainTaper_Map[] = {
+ { 45400000, 0x1F },
+ { 45800000, 0x1E },
+ { 46200000, 0x1D },
+ { 46700000, 0x1C },
+ { 47100000, 0x1B },
+ { 47500000, 0x1A },
+ { 47900000, 0x19 },
+ { 49600000, 0x17 },
+ { 51200000, 0x16 },
+ { 52900000, 0x15 },
+ { 54500000, 0x14 },
+ { 56200000, 0x13 },
+ { 57800000, 0x12 },
+ { 59500000, 0x11 },
+ { 61100000, 0x10 },
+ { 67600000, 0x0D },
+ { 74200000, 0x0C },
+ { 80700000, 0x0B },
+ { 87200000, 0x0A },
+ { 93800000, 0x09 },
+ { 100300000, 0x08 },
+ { 106900000, 0x07 },
+ { 113400000, 0x06 },
+ { 119900000, 0x05 },
+ { 126500000, 0x04 },
+ { 133000000, 0x03 },
+ { 139500000, 0x02 },
+ { 146100000, 0x01 },
+ { 152600000, 0x00 },
+ { 154300000, 0x1F },
+ { 156100000, 0x1E },
+ { 157800000, 0x1D },
+ { 159500000, 0x1C },
+ { 161200000, 0x1B },
+ { 163000000, 0x1A },
+ { 164700000, 0x19 },
+ { 170200000, 0x17 },
+ { 175800000, 0x16 },
+ { 181300000, 0x15 },
+ { 186900000, 0x14 },
+ { 192400000, 0x13 },
+ { 198000000, 0x12 },
+ { 203500000, 0x11 },
+ { 216200000, 0x14 },
+ { 228900000, 0x13 },
+ { 241600000, 0x12 },
+ { 254400000, 0x11 },
+ { 267100000, 0x10 },
+ { 279800000, 0x0F },
+ { 292500000, 0x0E },
+ { 305200000, 0x0D },
+ { 317900000, 0x0C },
+ { 330700000, 0x0B },
+ { 343400000, 0x0A },
+ { 356100000, 0x09 },
+ { 368800000, 0x08 },
+ { 381500000, 0x07 },
+ { 394200000, 0x06 },
+ { 406900000, 0x05 },
+ { 419700000, 0x04 },
+ { 432400000, 0x03 },
+ { 445100000, 0x02 },
+ { 457800000, 0x01 },
+ { 476300000, 0x19 },
+ { 494800000, 0x18 },
+ { 513300000, 0x17 },
+ { 531800000, 0x16 },
+ { 550300000, 0x15 },
+ { 568900000, 0x14 },
+ { 587400000, 0x13 },
+ { 605900000, 0x12 },
+ { 624400000, 0x11 },
+ { 642900000, 0x10 },
+ { 661400000, 0x0F },
+ { 679900000, 0x0E },
+ { 698400000, 0x0D },
+ { 716900000, 0x0C },
+ { 735400000, 0x0B },
+ { 753900000, 0x0A },
+ { 772500000, 0x09 },
+ { 791000000, 0x08 },
+ { 809500000, 0x07 },
+ { 828000000, 0x06 },
+ { 846500000, 0x05 },
+ { 865000000, 0x04 },
+ { 0, 0x00 }, /* Table End */
+};
+
+static struct SMap m_RF_Cal_DC_Over_DT_Map[] = {
+ { 47900000, 0x00 },
+ { 55000000, 0x00 },
+ { 61100000, 0x0A },
+ { 64000000, 0x0A },
+ { 82000000, 0x14 },
+ { 84000000, 0x19 },
+ { 119000000, 0x1C },
+ { 124000000, 0x20 },
+ { 129000000, 0x2A },
+ { 134000000, 0x32 },
+ { 139000000, 0x39 },
+ { 144000000, 0x3E },
+ { 149000000, 0x3F },
+ { 152600000, 0x40 },
+ { 154000000, 0x40 },
+ { 164700000, 0x41 },
+ { 203500000, 0x32 },
+ { 353000000, 0x19 },
+ { 356000000, 0x1A },
+ { 359000000, 0x1B },
+ { 363000000, 0x1C },
+ { 366000000, 0x1D },
+ { 369000000, 0x1E },
+ { 373000000, 0x1F },
+ { 376000000, 0x20 },
+ { 379000000, 0x21 },
+ { 383000000, 0x22 },
+ { 386000000, 0x23 },
+ { 389000000, 0x24 },
+ { 393000000, 0x25 },
+ { 396000000, 0x26 },
+ { 399000000, 0x27 },
+ { 402000000, 0x28 },
+ { 404000000, 0x29 },
+ { 407000000, 0x2A },
+ { 409000000, 0x2B },
+ { 412000000, 0x2C },
+ { 414000000, 0x2D },
+ { 417000000, 0x2E },
+ { 419000000, 0x2F },
+ { 422000000, 0x30 },
+ { 424000000, 0x31 },
+ { 427000000, 0x32 },
+ { 429000000, 0x33 },
+ { 432000000, 0x34 },
+ { 434000000, 0x35 },
+ { 437000000, 0x36 },
+ { 439000000, 0x37 },
+ { 442000000, 0x38 },
+ { 444000000, 0x39 },
+ { 447000000, 0x3A },
+ { 449000000, 0x3B },
+ { 457800000, 0x3C },
+ { 465000000, 0x0F },
+ { 477000000, 0x12 },
+ { 483000000, 0x14 },
+ { 502000000, 0x19 },
+ { 508000000, 0x1B },
+ { 519000000, 0x1C },
+ { 522000000, 0x1D },
+ { 524000000, 0x1E },
+ { 534000000, 0x1F },
+ { 549000000, 0x20 },
+ { 554000000, 0x22 },
+ { 584000000, 0x24 },
+ { 589000000, 0x26 },
+ { 658000000, 0x27 },
+ { 664000000, 0x2C },
+ { 669000000, 0x2D },
+ { 699000000, 0x2E },
+ { 704000000, 0x30 },
+ { 709000000, 0x31 },
+ { 714000000, 0x32 },
+ { 724000000, 0x33 },
+ { 729000000, 0x36 },
+ { 739000000, 0x38 },
+ { 744000000, 0x39 },
+ { 749000000, 0x3B },
+ { 754000000, 0x3C },
+ { 759000000, 0x3D },
+ { 764000000, 0x3E },
+ { 769000000, 0x3F },
+ { 774000000, 0x40 },
+ { 779000000, 0x41 },
+ { 784000000, 0x43 },
+ { 789000000, 0x46 },
+ { 794000000, 0x48 },
+ { 799000000, 0x4B },
+ { 804000000, 0x4F },
+ { 809000000, 0x54 },
+ { 814000000, 0x59 },
+ { 819000000, 0x5D },
+ { 824000000, 0x61 },
+ { 829000000, 0x68 },
+ { 834000000, 0x6E },
+ { 839000000, 0x75 },
+ { 844000000, 0x7E },
+ { 849000000, 0x82 },
+ { 854000000, 0x84 },
+ { 859000000, 0x8F },
+ { 865000000, 0x9A },
+ { 0, 0x00 }, /* Table End */
+};
+
+
+static struct SMap m_IR_Meas_Map[] = {
+ { 200000000, 0x05 },
+ { 400000000, 0x06 },
+ { 865000000, 0x07 },
+ { 0, 0x00 }, /* Table End */
+};
+
+static struct SMap2 m_CID_Target_Map[] = {
+ { 46000000, 0x04, 18 },
+ { 52200000, 0x0A, 15 },
+ { 70100000, 0x01, 40 },
+ { 136800000, 0x18, 40 },
+ { 156700000, 0x18, 40 },
+ { 186250000, 0x0A, 40 },
+ { 230000000, 0x0A, 40 },
+ { 345000000, 0x18, 40 },
+ { 426000000, 0x0E, 40 },
+ { 489500000, 0x1E, 40 },
+ { 697500000, 0x32, 40 },
+ { 842000000, 0x3A, 40 },
+ { 0, 0x00, 0 }, /* Table End */
+};
+
+static struct SRFBandMap m_RF_Band_Map[7] = {
+ { 47900000, 46000000, 0, 0},
+ { 61100000, 52200000, 0, 0},
+ { 152600000, 70100000, 136800000, 0},
+ { 164700000, 156700000, 0, 0},
+ { 203500000, 186250000, 0, 0},
+ { 457800000, 230000000, 345000000, 426000000},
+ { 865000000, 489500000, 697500000, 842000000},
+};
+
+u8 m_Thermometer_Map_1[16] = {
+ 60, 62, 66, 64,
+ 74, 72, 68, 70,
+ 90, 88, 84, 86,
+ 76, 78, 82, 80,
+};
+
+u8 m_Thermometer_Map_2[16] = {
+ 92, 94, 98, 96,
+ 106, 104, 100, 102,
+ 122, 120, 116, 118,
+ 108, 110, 114, 112,
+};
diff --git a/drivers/media/dvb/ngene/Kconfig b/drivers/media/dvb/ngene/Kconfig
index cec242b..64c8470 100644
--- a/drivers/media/dvb/ngene/Kconfig
+++ b/drivers/media/dvb/ngene/Kconfig
@@ -5,6 +5,8 @@ config DVB_NGENE
select DVB_STV6110x if !DVB_FE_CUSTOMISE
select DVB_STV090x if !DVB_FE_CUSTOMISE
select DVB_LGDT330X if !DVB_FE_CUSTOMISE
+ select DVB_DRXK if !DVB_FE_CUSTOMISE
+ select DVB_TDA18271C2DD if !DVB_FE_CUSTOMISE
select MEDIA_TUNER_MT2131 if !MEDIA_TUNER_CUSTOMISE
---help---
Support for Micronas PCI express cards with nGene bridge.
diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
index fcf4be9..0564192 100644
--- a/drivers/media/dvb/ngene/ngene-cards.c
+++ b/drivers/media/dvb/ngene/ngene-cards.c
@@ -40,6 +40,8 @@
#include "lnbh24.h"
#include "lgdt330x.h"
#include "mt2131.h"
+#include "tda18271c2dd.h"
+#include "drxk.h"
/****************************************************************************/
@@ -83,6 +85,49 @@ static int tuner_attach_stv6110(struct ngene_channel *chan)
}
+static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct ngene_channel *chan = fe->sec_priv;
+ int status;
+
+ if (enable) {
+ down(&chan->dev->pll_mutex);
+ status = chan->gate_ctrl(fe, 1);
+ } else {
+ status = chan->gate_ctrl(fe, 0);
+ up(&chan->dev->pll_mutex);
+ }
+ return status;
+}
+
+static int tuner_attach_tda18271(struct ngene_channel *chan)
+{
+ struct i2c_adapter *i2c;
+ struct dvb_frontend *fe;
+
+ i2c = &chan->dev->channel[0].i2c_adapter;
+ if (chan->fe->ops.i2c_gate_ctrl)
+ chan->fe->ops.i2c_gate_ctrl(chan->fe, 1);
+ fe = dvb_attach(tda18271c2dd_attach, chan->fe, i2c, 0x60);
+ if (chan->fe->ops.i2c_gate_ctrl)
+ chan->fe->ops.i2c_gate_ctrl(chan->fe, 0);
+ if (!fe) {
+ printk(KERN_ERR "No TDA18271 found!\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int tuner_attach_probe(struct ngene_channel *chan)
+{
+ if (chan->demod_type == 0)
+ return tuner_attach_stv6110(chan);
+ if (chan->demod_type == 1)
+ return tuner_attach_tda18271(chan);
+ return -EINVAL;
+}
+
static int demod_attach_stv0900(struct ngene_channel *chan)
{
struct i2c_adapter *i2c;
@@ -130,6 +175,60 @@ static void cineS2_tuner_i2c_lock(struct dvb_frontend *fe, int lock)
up(&chan->dev->pll_mutex);
}
+static int i2c_read(struct i2c_adapter *adapter, u8 adr, u8 *val)
+{
+ struct i2c_msg msgs[1] = {{.addr = adr, .flags = I2C_M_RD,
+ .buf = val, .len = 1 } };
+ return (i2c_transfer(adapter, msgs, 1) == 1) ? 0 : -1;
+}
+
+static int i2c_read_reg16(struct i2c_adapter *adapter, u8 adr,
+ u16 reg, u8 *val)
+{
+ u8 msg[2] = {reg>>8, reg&0xff};
+ struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
+ .buf = msg, .len = 2},
+ {.addr = adr, .flags = I2C_M_RD,
+ .buf = val, .len = 1} };
+ return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
+}
+
+static int port_has_stv0900(struct i2c_adapter *i2c, int port)
+{
+ u8 val;
+ if (i2c_read_reg16(i2c, 0x68+port/2, 0xf100, &val) < 0)
+ return 0;
+ return 1;
+}
+
+static int port_has_drxk(struct i2c_adapter *i2c, int port)
+{
+ u8 val;
+
+ if (i2c_read(i2c, 0x29+port, &val) < 0)
+ return 0;
+ return 1;
+}
+
+static int demod_attach_drxk(struct ngene_channel *chan,
+ struct i2c_adapter *i2c)
+{
+ struct drxk_config config;
+
+ memset(&config, 0, sizeof(config));
+ config.adr = 0x29 + (chan->number ^ 2);
+
+ chan->fe = dvb_attach(drxk_attach, &config, i2c, &chan->fe2);
+ if (!chan->fe) {
+ printk(KERN_ERR "No DRXK found!\n");
+ return -ENODEV;
+ }
+ chan->fe->sec_priv = chan;
+ chan->gate_ctrl = chan->fe->ops.i2c_gate_ctrl;
+ chan->fe->ops.i2c_gate_ctrl = drxk_gate_ctrl;
+ return 0;
+}
+
static int cineS2_probe(struct ngene_channel *chan)
{
struct i2c_adapter *i2c;
@@ -144,43 +243,42 @@ static int cineS2_probe(struct ngene_channel *chan)
else
i2c = &chan->dev->channel[1].i2c_adapter;
- fe_conf = chan->dev->card_info->fe_config[chan->number];
- i2c_msg.addr = fe_conf->address;
-
- /* probe demod */
- i2c_msg.len = 2;
- buf[0] = 0xf1;
- buf[1] = 0x00;
- rc = i2c_transfer(i2c, &i2c_msg, 1);
- if (rc != 1)
- return -ENODEV;
-
- /* demod found, attach it */
- rc = demod_attach_stv0900(chan);
- if (rc < 0 || chan->number < 2)
- return rc;
-
- /* demod #2: reprogram outputs DPN1 & DPN2 */
- i2c_msg.len = 3;
- buf[0] = 0xf1;
- switch (chan->number) {
- case 2:
- buf[1] = 0x5c;
- buf[2] = 0xc2;
- break;
- case 3:
- buf[1] = 0x61;
- buf[2] = 0xcc;
- break;
- default:
+ if (port_has_stv0900(i2c, chan->number)) {
+ chan->demod_type = 0;
+ fe_conf = chan->dev->card_info->fe_config[chan->number];
+ /* demod found, attach it */
+ rc = demod_attach_stv0900(chan);
+ if (rc < 0 || chan->number < 2)
+ return rc;
+
+ /* demod #2: reprogram outputs DPN1 & DPN2 */
+ i2c_msg.addr = fe_conf->address;
+ i2c_msg.len = 3;
+ buf[0] = 0xf1;
+ switch (chan->number) {
+ case 2:
+ buf[1] = 0x5c;
+ buf[2] = 0xc2;
+ break;
+ case 3:
+ buf[1] = 0x61;
+ buf[2] = 0xcc;
+ break;
+ default:
+ return -ENODEV;
+ }
+ rc = i2c_transfer(i2c, &i2c_msg, 1);
+ if (rc != 1) {
+ printk(KERN_ERR DEVICE_NAME ": could not setup DPNx\n");
+ return -EIO;
+ }
+ } else if (port_has_drxk(i2c, chan->number^2)) {
+ chan->demod_type = 1;
+ demod_attach_drxk(chan, i2c);
+ } else {
+ printk(KERN_ERR "No demod found on chan %d\n", chan->number);
return -ENODEV;
}
- rc = i2c_transfer(i2c, &i2c_msg, 1);
- if (rc != 1) {
- printk(KERN_ERR DEVICE_NAME ": could not setup DPNx\n");
- return -EIO;
- }
-
return 0;
}
@@ -306,7 +404,7 @@ static struct ngene_info ngene_info_satixS2v2 = {
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
NGENE_IO_TSOUT},
.demod_attach = {demod_attach_stv0900, demod_attach_stv0900, cineS2_probe, cineS2_probe},
- .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110},
+ .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_probe, tuner_attach_probe},
.fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2},
.tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1},
.lnb = {0x0a, 0x08, 0x0b, 0x09},
@@ -321,7 +419,7 @@ static struct ngene_info ngene_info_cineS2v5 = {
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
NGENE_IO_TSOUT},
.demod_attach = {demod_attach_stv0900, demod_attach_stv0900, cineS2_probe, cineS2_probe},
- .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110},
+ .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_probe, tuner_attach_probe},
.fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2},
.tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1},
.lnb = {0x0a, 0x08, 0x0b, 0x09},
@@ -331,13 +429,13 @@ static struct ngene_info ngene_info_cineS2v5 = {
};
-static struct ngene_info ngene_info_duoFlexS2 = {
+static struct ngene_info ngene_info_duoFlex = {
.type = NGENE_SIDEWINDER,
- .name = "Digital Devices DuoFlex S2 miniPCIe",
+ .name = "Digital Devices DuoFlex PCIe or miniPCIe",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
NGENE_IO_TSOUT},
.demod_attach = {cineS2_probe, cineS2_probe, cineS2_probe, cineS2_probe},
- .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110},
+ .tuner_attach = {tuner_attach_probe, tuner_attach_probe, tuner_attach_probe, tuner_attach_probe},
.fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2},
.tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1},
.lnb = {0x0a, 0x08, 0x0b, 0x09},
@@ -385,8 +483,8 @@ static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
NGENE_ID(0x18c3, 0xdb02, ngene_info_satixS2v2),
NGENE_ID(0x18c3, 0xdd00, ngene_info_cineS2v5),
- NGENE_ID(0x18c3, 0xdd10, ngene_info_duoFlexS2),
- NGENE_ID(0x18c3, 0xdd20, ngene_info_duoFlexS2),
+ NGENE_ID(0x18c3, 0xdd10, ngene_info_duoFlex),
+ NGENE_ID(0x18c3, 0xdd20, ngene_info_duoFlex),
NGENE_ID(0x1461, 0x062e, ngene_info_m780),
{0}
};
diff --git a/drivers/media/dvb/ngene/ngene-core.c b/drivers/media/dvb/ngene/ngene-core.c
index 6927c72..f129a93 100644
--- a/drivers/media/dvb/ngene/ngene-core.c
+++ b/drivers/media/dvb/ngene/ngene-core.c
@@ -41,7 +41,7 @@
#include "ngene.h"
-static int one_adapter = 1;
+static int one_adapter;
module_param(one_adapter, int, 0444);
MODULE_PARM_DESC(one_adapter, "Use only one adapter.");
@@ -461,7 +461,7 @@ static u8 TSFeatureDecoderSetup[8 * 5] = {
0x42, 0x00, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00,
0x40, 0x06, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* DRXH */
0x71, 0x07, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* DRXHser */
- 0x72, 0x06, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* S2ser */
+ 0x72, 0x00, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* S2ser */
0x40, 0x07, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* LGDT3303 */
};
@@ -507,7 +507,7 @@ void FillTSBuffer(void *Buffer, int Length, u32 Flags)
{
u32 *ptr = Buffer;
- memset(Buffer, 0xff, Length);
+ memset(Buffer, TS_FILLER, Length);
while (Length > 0) {
if (Flags & DF_SWAP32)
*ptr = 0x471FFF10;
@@ -1443,6 +1443,9 @@ static void release_channel(struct ngene_channel *chan)
chan->ci_dev = NULL;
}
+ if (chan->fe2)
+ dvb_unregister_frontend(chan->fe2);
+
if (chan->fe) {
dvb_unregister_frontend(chan->fe);
dvb_frontend_detach(chan->fe);
@@ -1534,6 +1537,14 @@ static int init_channel(struct ngene_channel *chan)
goto err;
chan->has_demux = true;
}
+ if (chan->fe2) {
+ if (dvb_register_frontend(adapter, chan->fe2) < 0)
+ goto err;
+ chan->fe2->tuner_priv = chan->fe->tuner_priv;
+ memcpy(&chan->fe2->ops.tuner_ops,
+ &chan->fe->ops.tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+ }
if (chan->has_demux) {
ret = my_dvb_dmx_ts_card_init(dvbdemux, "SW demux",
@@ -1571,11 +1582,18 @@ static int init_channels(struct ngene *dev)
return 0;
}
+static struct cxd2099_cfg cxd_cfg = {
+ .bitrate = 62000,
+ .adr = 0x40,
+ .polarity = 0,
+ .clock_mode = 0,
+};
+
static void cxd_attach(struct ngene *dev)
{
struct ngene_ci *ci = &dev->ci;
- ci->en = cxd2099_attach(0x40, dev, &dev->channel[0].i2c_adapter);
+ ci->en = cxd2099_attach(&cxd_cfg, dev, &dev->channel[0].i2c_adapter);
ci->dev = dev;
return;
}
diff --git a/drivers/media/dvb/ngene/ngene-dvb.c b/drivers/media/dvb/ngene/ngene-dvb.c
index 0b49432..fcb16a6 100644
--- a/drivers/media/dvb/ngene/ngene-dvb.c
+++ b/drivers/media/dvb/ngene/ngene-dvb.c
@@ -118,6 +118,16 @@ static void swap_buffer(u32 *p, u32 len)
}
}
+/* start of filler packet */
+static u8 fill_ts[] = { 0x47, 0x1f, 0xff, 0x10, TS_FILLER };
+
+/* #define DEBUG_CI_XFER */
+#ifdef DEBUG_CI_XFER
+static u32 ok;
+static u32 overflow;
+static u32 stripped;
+#endif
+
void *tsin_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags)
{
struct ngene_channel *chan = priv;
@@ -126,21 +136,41 @@ void *tsin_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags)
if (flags & DF_SWAP32)
swap_buffer(buf, len);
+
if (dev->ci.en && chan->number == 2) {
- if (dvb_ringbuffer_free(&dev->tsin_rbuf) > len) {
- dvb_ringbuffer_write(&dev->tsin_rbuf, buf, len);
- wake_up_interruptible(&dev->tsin_rbuf.queue);
+ while (len >= 188) {
+ if (memcmp(buf, fill_ts, sizeof fill_ts) != 0) {
+ if (dvb_ringbuffer_free(&dev->tsin_rbuf) >= 188) {
+ dvb_ringbuffer_write(&dev->tsin_rbuf, buf, 188);
+ wake_up(&dev->tsin_rbuf.queue);
+#ifdef DEBUG_CI_XFER
+ ok++;
+#endif
+ }
+#ifdef DEBUG_CI_XFER
+ else
+ overflow++;
+#endif
+ }
+#ifdef DEBUG_CI_XFER
+ else
+ stripped++;
+
+ if (ok % 100 == 0 && overflow)
+ printk(KERN_WARNING "%s: ok %u overflow %u dropped %u\n", __func__, ok, overflow, stripped);
+#endif
+ buf += 188;
+ len -= 188;
}
- return 0;
+ return NULL;
}
- if (chan->users > 0) {
+
+ if (chan->users > 0)
dvb_dmx_swfilter(&chan->demux, buf, len);
- }
+
return NULL;
}
-u8 fill_ts[188] = { 0x47, 0x1f, 0xff, 0x10 };
-
void *tsout_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags)
{
struct ngene_channel *chan = priv;
diff --git a/drivers/media/dvb/ngene/ngene.h b/drivers/media/dvb/ngene/ngene.h
index 40fce9e..5443dc0 100644
--- a/drivers/media/dvb/ngene/ngene.h
+++ b/drivers/media/dvb/ngene/ngene.h
@@ -641,8 +641,11 @@ struct ngene_channel {
int mode;
bool has_adapter;
bool has_demux;
+ int demod_type;
+ int (*gate_ctrl)(struct dvb_frontend *, int);
struct dvb_frontend *fe;
+ struct dvb_frontend *fe2;
struct dmxdev dmxdev;
struct dvb_demux demux;
struct dvb_net dvbnet;
@@ -786,6 +789,8 @@ struct ngene {
u8 uart_rbuf[UART_RBUF_LEN];
int uart_rp, uart_wp;
+#define TS_FILLER 0x6f
+
u8 *tsout_buf;
#define TSOUT_BUF_SIZE (512*188*8)
struct dvb_ringbuffer tsout_rbuf;
@@ -852,7 +857,7 @@ struct ngene_info {
};
#ifdef NGENE_V4L
-struct ngene_format{
+struct ngene_format {
char *name;
int fourcc; /* video4linux 2 */
int btformat; /* BT848_COLOR_FMT_* */
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c
index 78765ed..7331e84 100644
--- a/drivers/media/dvb/siano/smscoreapi.c
+++ b/drivers/media/dvb/siano/smscoreapi.c
@@ -1147,7 +1147,7 @@ static int smscore_validate_client(struct smscore_device_t *coredev,
if (!client) {
sms_err("bad parameter.");
- return -EFAULT;
+ return -EINVAL;
}
registered_client = smscore_find_client(coredev, data_type, id);
if (registered_client == client)
diff --git a/drivers/media/dvb/siano/smscoreapi.h b/drivers/media/dvb/siano/smscoreapi.h
index 8ecadec..c592ae0 100644
--- a/drivers/media/dvb/siano/smscoreapi.h
+++ b/drivers/media/dvb/siano/smscoreapi.h
@@ -22,7 +22,6 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#ifndef __SMS_CORE_API_H__
#define __SMS_CORE_API_H__
-#include <linux/version.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/mm.h>
diff --git a/drivers/media/radio/dsbr100.c b/drivers/media/radio/dsbr100.c
index 3d8cc42..25e58cb 100644
--- a/drivers/media/radio/dsbr100.c
+++ b/drivers/media/radio/dsbr100.c
@@ -102,10 +102,7 @@
/*
* Version Information
*/
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
-
-#define DRIVER_VERSION "v0.46"
-#define RADIO_VERSION KERNEL_VERSION(0, 4, 6)
+#define DRIVER_VERSION "0.4.7"
#define DRIVER_AUTHOR "Markus Demleitner <msdemlei@tucana.harvard.edu>"
#define DRIVER_DESC "D-Link DSB-R100 USB FM radio driver"
@@ -335,7 +332,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "dsbr100", sizeof(v->driver));
strlcpy(v->card, "D-Link R-100 USB FM Radio", sizeof(v->card));
usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER;
return 0;
}
@@ -647,3 +643,4 @@ module_exit (dsbr100_exit);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 4ce10db..1c3f844 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -33,7 +33,6 @@
#include <linux/ioport.h> /* request_region */
#include <linux/delay.h> /* msleep */
#include <linux/videodev2.h> /* kernel radio structs */
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/io.h> /* outb, outb_p */
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -41,6 +40,7 @@
MODULE_AUTHOR("M.Kirkwood");
MODULE_DESCRIPTION("A driver for the RadioTrack/RadioReveal radio card.");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.3");
#ifndef CONFIG_RADIO_RTRACK_PORT
#define CONFIG_RADIO_RTRACK_PORT -1
@@ -53,8 +53,6 @@ module_param(io, int, 0);
MODULE_PARM_DESC(io, "I/O address of the RadioTrack card (0x20f or 0x30f)");
module_param(radio_nr, int, 0);
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
-
struct rtrack
{
struct v4l2_device v4l2_dev;
@@ -223,7 +221,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-aimslab", sizeof(v->driver));
strlcpy(v->card, "RadioTrack", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c
index dd8a6ab..eed7b08 100644
--- a/drivers/media/radio/radio-aztech.c
+++ b/drivers/media/radio/radio-aztech.c
@@ -30,7 +30,6 @@
#include <linux/ioport.h> /* request_region */
#include <linux/delay.h> /* udelay */
#include <linux/videodev2.h> /* kernel radio structs */
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/io.h> /* outb, outb_p */
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -38,6 +37,7 @@
MODULE_AUTHOR("Russell Kroll, Quay Lu, Donald Song, Jason Lewis, Scott McGrath, William McGrath");
MODULE_DESCRIPTION("A driver for the Aztech radio card.");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.3");
/* acceptable ports: 0x350 (JP3 shorted), 0x358 (JP3 open) */
@@ -53,8 +53,6 @@ module_param(io, int, 0);
module_param(radio_nr, int, 0);
MODULE_PARM_DESC(io, "I/O address of the Aztech card (0x350 or 0x358)");
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
-
struct aztech
{
struct v4l2_device v4l2_dev;
@@ -188,7 +186,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-aztech", sizeof(v->driver));
strlcpy(v->card, "Aztech Radio", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index bc9ad08..16a089f 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -30,7 +30,6 @@
* Changed API to V4L2
*/
-#include <linux/version.h>
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
#include <linux/ioport.h> /* request_region */
@@ -46,6 +45,7 @@
MODULE_AUTHOR("Fred Gleason, Russell Kroll, Quay Lu, Donald Song, Jason Lewis, Scott McGrath, William McGrath");
MODULE_DESCRIPTION("A driver for the ADS Cadet AM/FM/RDS radio card.");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.3.4");
static int io = -1; /* default to isapnp activation */
static int radio_nr = -1;
@@ -54,8 +54,6 @@ module_param(io, int, 0);
MODULE_PARM_DESC(io, "I/O address of Cadet card (0x330,0x332,0x334,0x336,0x338,0x33a,0x33c,0x33e)");
module_param(radio_nr, int, 0);
-#define CADET_VERSION KERNEL_VERSION(0, 3, 3)
-
#define RDS_BUFFER 256
#define RDS_RX_FLAG 1
#define MBS_RX_FLAG 2
@@ -361,7 +359,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "ADS Cadet", sizeof(v->driver));
strlcpy(v->card, "ADS Cadet", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = CADET_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
V4L2_CAP_READWRITE | V4L2_CAP_RDS_CAPTURE;
return 0;
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index 2599364..edadc84 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -21,21 +21,19 @@
#include <linux/ioport.h> /* request_region */
#include <linux/delay.h> /* udelay */
#include <linux/videodev2.h> /* kernel radio structs */
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/mutex.h>
#include <linux/io.h> /* outb, outb_p */
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 3)
-
/*
* Module info.
*/
-MODULE_AUTHOR("Jonas Munsin, Pekka Seppänen <pexu@kapsi.fi>");
+MODULE_AUTHOR("Jonas Munsin, Pekka Seppänen <pexu@kapsi.fi>");
MODULE_DESCRIPTION("A driver for the GemTek Radio card.");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.4");
/*
* Module params.
@@ -387,7 +385,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-gemtek", sizeof(v->driver));
strlcpy(v->card, "GemTek", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index e83e840..f872a54 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -40,15 +40,18 @@
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/videodev2.h>
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/io.h>
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#define DRIVER_VERSION "0.7.8"
+
+
MODULE_AUTHOR("Dimitromanolakis Apostolos, apdim@grecian.net");
MODULE_DESCRIPTION("Radio driver for the Guillemot Maxi Radio FM2000 radio.");
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
static int radio_nr = -1;
module_param(radio_nr, int, 0);
@@ -58,10 +61,6 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "activates debug info");
-#define DRIVER_VERSION "0.77"
-
-#define RADIO_VERSION KERNEL_VERSION(0, 7, 7)
-
#define dprintk(dev, num, fmt, arg...) \
v4l2_dbg(num, debug, &dev->v4l2_dev, fmt, ## arg)
@@ -195,7 +194,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-maxiradio", sizeof(v->driver));
strlcpy(v->card, "Maxi Radio FM2000 radio", sizeof(v->card));
snprintf(v->bus_info, sizeof(v->bus_info), "PCI:%s", pci_name(dev->pdev));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index b3a635b..1742bd8 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -63,18 +63,17 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <linux/usb.h>
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/mutex.h>
/* driver and module definitions */
#define DRIVER_AUTHOR "Alexey Klimov <klimov.linux@gmail.com>"
#define DRIVER_DESC "AverMedia MR 800 USB FM radio driver"
-#define DRIVER_VERSION "0.11"
-#define RADIO_VERSION KERNEL_VERSION(0, 1, 1)
+#define DRIVER_VERSION "0.1.2"
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
#define USB_AMRADIO_VENDOR 0x07ca
#define USB_AMRADIO_PRODUCT 0xb800
@@ -301,7 +300,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-mr800", sizeof(v->driver));
strlcpy(v->card, "AverMedia MR 800 USB FM Radio", sizeof(v->card));
usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER;
return 0;
}
diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c
index 8d6ea59..3628be6 100644
--- a/drivers/media/radio/radio-rtrack2.c
+++ b/drivers/media/radio/radio-rtrack2.c
@@ -15,7 +15,6 @@
#include <linux/delay.h> /* udelay */
#include <linux/videodev2.h> /* kernel radio structs */
#include <linux/mutex.h>
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/io.h> /* outb, outb_p */
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -23,6 +22,7 @@
MODULE_AUTHOR("Ben Pfaff");
MODULE_DESCRIPTION("A driver for the RadioTrack II radio card.");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.3");
#ifndef CONFIG_RADIO_RTRACK2_PORT
#define CONFIG_RADIO_RTRACK2_PORT -1
@@ -35,8 +35,6 @@ module_param(io, int, 0);
MODULE_PARM_DESC(io, "I/O address of the RadioTrack card (0x20c or 0x30c)");
module_param(radio_nr, int, 0);
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
-
struct rtrack2
{
struct v4l2_device v4l2_dev;
@@ -121,7 +119,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-rtrack2", sizeof(v->driver));
strlcpy(v->card, "RadioTrack II", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index b5a5f89..22c5743 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -16,7 +16,6 @@
* Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
*/
-#include <linux/version.h>
#include <linux/kernel.h> /* __setup */
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
@@ -32,6 +31,7 @@
MODULE_AUTHOR("Petr Vandrovec, vandrove@vc.cvut.cz and M. Kirkwood");
MODULE_DESCRIPTION("A driver for the SF16-FMI and SF16-FMP radio.");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.3");
static int io = -1;
static int radio_nr = -1;
@@ -40,8 +40,6 @@ module_param(io, int, 0);
MODULE_PARM_DESC(io, "I/O address of the SF16-FMI or SF16-FMP card (0x284 or 0x384)");
module_param(radio_nr, int, 0);
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
-
struct fmi
{
struct v4l2_device v4l2_dev;
@@ -134,7 +132,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-sf16fmi", sizeof(v->driver));
strlcpy(v->card, "SF16-FMx radio", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index 87bad76..2dd4859 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -1,441 +1,209 @@
-/* SF16FMR2 radio driver for Linux radio support
- * heavily based on fmi driver...
- * (c) 2000-2002 Ziglio Frediano, freddy77@angelfire.com
+/* SF16-FMR2 radio driver for Linux
+ * Copyright (c) 2011 Ondrej Zary
*
- * Notes on the hardware
- *
- * Frequency control is done digitally -- ie out(port,encodefreq(95.8));
- * No volume control - only mute/unmute - you have to use line volume
- *
- * For read stereo/mono you must wait 0.1 sec after set frequency and
- * card unmuted so I set frequency on unmute
- * Signal handling seem to work only on autoscanning (not implemented)
- *
- * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Original driver was (c) 2000-2002 Ziglio Frediano, freddy77@angelfire.com
+ * but almost nothing remained here after conversion to generic TEA575x
+ * implementation
*/
+#include <linux/delay.h>
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
#include <linux/ioport.h> /* request_region */
-#include <linux/delay.h> /* udelay */
-#include <linux/videodev2.h> /* kernel radio structs */
-#include <linux/mutex.h>
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/io.h> /* outb, outb_p */
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
+#include <sound/tea575x-tuner.h>
-MODULE_AUTHOR("Ziglio Frediano, freddy77@angelfire.com");
-MODULE_DESCRIPTION("A driver for the SF16FMR2 radio.");
+MODULE_AUTHOR("Ondrej Zary");
+MODULE_DESCRIPTION("MediaForte SF16-FMR2 FM radio card driver");
MODULE_LICENSE("GPL");
-static int io = 0x384;
-static int radio_nr = -1;
-
-module_param(io, int, 0);
-MODULE_PARM_DESC(io, "I/O address of the SF16FMR2 card (should be 0x384, if do not work try 0x284)");
-module_param(radio_nr, int, 0);
-
-#define RADIO_VERSION KERNEL_VERSION(0,0,2)
-
-#define AUD_VOL_INDEX 1
-
-#undef DEBUG
-//#define DEBUG 1
-
-#ifdef DEBUG
-# define debug_print(s) printk s
-#else
-# define debug_print(s)
-#endif
-
-/* this should be static vars for module size */
-struct fmr2
-{
- struct v4l2_device v4l2_dev;
- struct video_device vdev;
- struct mutex lock;
+struct fmr2 {
int io;
- int curvol; /* 0-15 */
- int mute;
- int stereo; /* card is producing stereo audio */
- unsigned long curfreq; /* freq in kHz */
- int card_type;
+ struct snd_tea575x tea;
+ struct v4l2_ctrl *volume;
+ struct v4l2_ctrl *balance;
};
+/* the port is hardwired so no need to support multiple cards */
+#define FMR2_PORT 0x384
static struct fmr2 fmr2_card;
-/* hw precision is 12.5 kHz
- * It is only useful to give freq in interval of 200 (=0.0125Mhz),
- * other bits will be truncated
- */
-#define RSF16_ENCODE(x) ((x) / 200 + 856)
-#define RSF16_MINFREQ (87 * 16000)
-#define RSF16_MAXFREQ (108 * 16000)
-
-static inline void wait(int n, int io)
-{
- for (; n; --n)
- inb(io);
-}
-
-static void outbits(int bits, unsigned int data, int nWait, int io)
-{
- int bit;
-
- for (; --bits >= 0;) {
- bit = (data >> bits) & 1;
- outb(bit, io);
- wait(nWait, io);
- outb(bit | 2, io);
- wait(nWait, io);
- outb(bit, io);
- wait(nWait, io);
- }
-}
-
-static inline void fmr2_mute(int io)
-{
- outb(0x00, io);
- wait(4, io);
-}
-
-static inline void fmr2_unmute(int io)
-{
- outb(0x04, io);
- wait(4, io);
-}
-
-static inline int fmr2_stereo_mode(int io)
-{
- int n = inb(io);
-
- outb(6, io);
- inb(io);
- n = ((n >> 3) & 1) ^ 1;
- debug_print((KERN_DEBUG "stereo: %d\n", n));
- return n;
-}
-
-static int fmr2_product_info(struct fmr2 *dev)
-{
- int n = inb(dev->io);
-
- n &= 0xC1;
- if (n == 0) {
- /* this should support volume set */
- dev->card_type = 12;
- return 0;
- }
- /* not volume (mine is 11) */
- dev->card_type = (n == 128) ? 11 : 0;
- return n;
-}
+/* TEA575x tuner pins */
+#define STR_DATA (1 << 0)
+#define STR_CLK (1 << 1)
+#define STR_WREN (1 << 2)
+#define STR_MOST (1 << 3)
+/* PT2254A/TC9154A volume control pins */
+#define PT_ST (1 << 4)
+#define PT_CK (1 << 5)
+#define PT_DATA (1 << 6)
+/* volume control presence pin */
+#define FMR2_HASVOL (1 << 7)
-static inline int fmr2_getsigstr(struct fmr2 *dev)
+static void fmr2_tea575x_set_pins(struct snd_tea575x *tea, u8 pins)
{
- /* !!! works only if scanning freq */
- int res = 0xffff;
-
- outb(5, dev->io);
- wait(4, dev->io);
- if (!(inb(dev->io) & 1))
- res = 0;
- debug_print((KERN_DEBUG "signal: %d\n", res));
- return res;
-}
-
-/* set frequency and unmute card */
-static int fmr2_setfreq(struct fmr2 *dev)
-{
- unsigned long freq = dev->curfreq;
-
- fmr2_mute(dev->io);
-
- /* 0x42 for mono output
- * 0x102 forward scanning
- * 0x182 scansione avanti
- */
- outbits(9, 0x2, 3, dev->io);
- outbits(16, RSF16_ENCODE(freq), 2, dev->io);
-
- fmr2_unmute(dev->io);
+ struct fmr2 *fmr2 = tea->private_data;
+ u8 bits = 0;
- /* wait 0.11 sec */
- msleep(110);
+ bits |= (pins & TEA575X_DATA) ? STR_DATA : 0;
+ bits |= (pins & TEA575X_CLK) ? STR_CLK : 0;
+ /* WRITE_ENABLE is inverted, DATA must be high during read */
+ bits |= (pins & TEA575X_WREN) ? 0 : STR_WREN | STR_DATA;
- /* NOTE if mute this stop radio
- you must set freq on unmute */
- dev->stereo = fmr2_stereo_mode(dev->io);
- return 0;
-}
-
-/* !!! not tested, in my card this doesn't work !!! */
-static int fmr2_setvolume(struct fmr2 *dev)
-{
- int vol[16] = { 0x021, 0x084, 0x090, 0x104,
- 0x110, 0x204, 0x210, 0x402,
- 0x404, 0x408, 0x410, 0x801,
- 0x802, 0x804, 0x808, 0x810 };
- int i, a;
- int n = vol[dev->curvol & 0x0f];
-
- if (dev->card_type != 11)
- return 1;
-
- for (i = 12; --i >= 0; ) {
- a = ((n >> i) & 1) << 6; /* if (a==0) a = 0; else a = 0x40; */
- outb(a | 4, dev->io);
- wait(4, dev->io);
- outb(a | 0x24, dev->io);
- wait(4, dev->io);
- outb(a | 4, dev->io);
- wait(4, dev->io);
- }
- for (i = 6; --i >= 0; ) {
- a = ((0x18 >> i) & 1) << 6;
- outb(a | 4, dev->io);
- wait(4, dev->io);
- outb(a | 0x24, dev->io);
- wait(4, dev->io);
- outb(a | 4, dev->io);
- wait(4, dev->io);
- }
- wait(4, dev->io);
- outb(0x14, dev->io);
- return 0;
+ outb(bits, fmr2->io);
}
-static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *v)
+static u8 fmr2_tea575x_get_pins(struct snd_tea575x *tea)
{
- strlcpy(v->driver, "radio-sf16fmr2", sizeof(v->driver));
- strlcpy(v->card, "SF16-FMR2 radio", sizeof(v->card));
- strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = RADIO_VERSION;
- v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
- return 0;
-}
-
-static int vidioc_g_tuner(struct file *file, void *priv,
- struct v4l2_tuner *v)
-{
- struct fmr2 *fmr2 = video_drvdata(file);
-
- if (v->index > 0)
- return -EINVAL;
+ struct fmr2 *fmr2 = tea->private_data;
+ u8 bits = inb(fmr2->io);
- strlcpy(v->name, "FM", sizeof(v->name));
- v->type = V4L2_TUNER_RADIO;
-
- v->rangelow = RSF16_MINFREQ;
- v->rangehigh = RSF16_MAXFREQ;
- v->rxsubchans = fmr2->stereo ? V4L2_TUNER_SUB_STEREO :
- V4L2_TUNER_SUB_MONO;
- v->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LOW;
- v->audmode = V4L2_TUNER_MODE_STEREO;
- mutex_lock(&fmr2->lock);
- v->signal = fmr2_getsigstr(fmr2);
- mutex_unlock(&fmr2->lock);
- return 0;
+ return (bits & STR_DATA) ? TEA575X_DATA : 0 |
+ (bits & STR_MOST) ? TEA575X_MOST : 0;
}
-static int vidioc_s_tuner(struct file *file, void *priv,
- struct v4l2_tuner *v)
+static void fmr2_tea575x_set_direction(struct snd_tea575x *tea, bool output)
{
- return v->index ? -EINVAL : 0;
}
-static int vidioc_s_frequency(struct file *file, void *priv,
- struct v4l2_frequency *f)
-{
- struct fmr2 *fmr2 = video_drvdata(file);
+static struct snd_tea575x_ops fmr2_tea_ops = {
+ .set_pins = fmr2_tea575x_set_pins,
+ .get_pins = fmr2_tea575x_get_pins,
+ .set_direction = fmr2_tea575x_set_direction,
+};
- if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
- return -EINVAL;
- if (f->frequency < RSF16_MINFREQ ||
- f->frequency > RSF16_MAXFREQ)
- return -EINVAL;
- /* rounding in steps of 200 to match the freq
- that will be used */
- fmr2->curfreq = (f->frequency / 200) * 200;
-
- /* set card freq (if not muted) */
- if (fmr2->curvol && !fmr2->mute) {
- mutex_lock(&fmr2->lock);
- fmr2_setfreq(fmr2);
- mutex_unlock(&fmr2->lock);
+/* TC9154A/PT2254A volume control */
+
+/* 18-bit shift register bit definitions */
+#define TC9154A_ATT_MAJ_0DB (1 << 0)
+#define TC9154A_ATT_MAJ_10DB (1 << 1)
+#define TC9154A_ATT_MAJ_20DB (1 << 2)
+#define TC9154A_ATT_MAJ_30DB (1 << 3)
+#define TC9154A_ATT_MAJ_40DB (1 << 4)
+#define TC9154A_ATT_MAJ_50DB (1 << 5)
+#define TC9154A_ATT_MAJ_60DB (1 << 6)
+
+#define TC9154A_ATT_MIN_0DB (1 << 7)
+#define TC9154A_ATT_MIN_2DB (1 << 8)
+#define TC9154A_ATT_MIN_4DB (1 << 9)
+#define TC9154A_ATT_MIN_6DB (1 << 10)
+#define TC9154A_ATT_MIN_8DB (1 << 11)
+/* bit 12 is ignored */
+#define TC9154A_CHANNEL_LEFT (1 << 13)
+#define TC9154A_CHANNEL_RIGHT (1 << 14)
+/* bits 15, 16, 17 must be 0 */
+
+#define TC9154A_ATT_MAJ(x) (1 << x)
+#define TC9154A_ATT_MIN(x) (1 << (7 + x))
+
+static void tc9154a_set_pins(struct fmr2 *fmr2, u8 pins)
+{
+ if (!fmr2->tea.mute)
+ pins |= STR_WREN;
+
+ outb(pins, fmr2->io);
+}
+
+static void tc9154a_set_attenuation(struct fmr2 *fmr2, int att, u32 channel)
+{
+ int i;
+ u32 reg;
+ u8 bit;
+
+ reg = TC9154A_ATT_MAJ(att / 10) | TC9154A_ATT_MIN((att % 10) / 2);
+ reg |= channel;
+ /* write 18-bit shift register, LSB first */
+ for (i = 0; i < 18; i++) {
+ bit = reg & (1 << i) ? PT_DATA : 0;
+ tc9154a_set_pins(fmr2, bit);
+ udelay(5);
+ tc9154a_set_pins(fmr2, bit | PT_CK);
+ udelay(5);
+ tc9154a_set_pins(fmr2, bit);
}
- return 0;
-}
-
-static int vidioc_g_frequency(struct file *file, void *priv,
- struct v4l2_frequency *f)
-{
- struct fmr2 *fmr2 = video_drvdata(file);
-
- if (f->tuner != 0)
- return -EINVAL;
- f->type = V4L2_TUNER_RADIO;
- f->frequency = fmr2->curfreq;
- return 0;
-}
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- struct fmr2 *fmr2 = video_drvdata(file);
-
- switch (qc->id) {
- case V4L2_CID_AUDIO_MUTE:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
- case V4L2_CID_AUDIO_VOLUME:
- /* Only card_type == 11 implements volume */
- if (fmr2->card_type == 11)
- return v4l2_ctrl_query_fill(qc, 0, 15, 1, 0);
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0);
- }
- return -EINVAL;
+ /* latch register data */
+ udelay(5);
+ tc9154a_set_pins(fmr2, PT_ST);
+ udelay(5);
+ tc9154a_set_pins(fmr2, 0);
}
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
+static int fmr2_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct fmr2 *fmr2 = video_drvdata(file);
+ struct snd_tea575x *tea = container_of(ctrl->handler, struct snd_tea575x, ctrl_handler);
+ struct fmr2 *fmr2 = tea->private_data;
+ int volume, balance, left, right;
switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- ctrl->value = fmr2->mute;
- return 0;
case V4L2_CID_AUDIO_VOLUME:
- ctrl->value = fmr2->curvol;
- return 0;
- }
- return -EINVAL;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct fmr2 *fmr2 = video_drvdata(file);
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- fmr2->mute = ctrl->value;
+ volume = ctrl->val;
+ balance = fmr2->balance->cur.val;
break;
- case V4L2_CID_AUDIO_VOLUME:
- fmr2->curvol = ctrl->value;
+ case V4L2_CID_AUDIO_BALANCE:
+ balance = ctrl->val;
+ volume = fmr2->volume->cur.val;
break;
default:
return -EINVAL;
}
-#ifdef DEBUG
- if (fmr2->curvol && !fmr2->mute)
- printk(KERN_DEBUG "unmute\n");
- else
- printk(KERN_DEBUG "mute\n");
-#endif
-
- mutex_lock(&fmr2->lock);
- if (fmr2->curvol && !fmr2->mute) {
- fmr2_setvolume(fmr2);
- /* Set frequency and unmute card */
- fmr2_setfreq(fmr2);
- } else
- fmr2_mute(fmr2->io);
- mutex_unlock(&fmr2->lock);
- return 0;
-}
-
-static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
+ left = right = volume;
+ if (balance < 0)
+ right = max(0, right + balance);
+ if (balance > 0)
+ left = max(0, left - balance);
-static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
-{
- return i ? -EINVAL : 0;
-}
+ tc9154a_set_attenuation(fmr2, abs(left - 68), TC9154A_CHANNEL_LEFT);
+ tc9154a_set_attenuation(fmr2, abs(right - 68), TC9154A_CHANNEL_RIGHT);
-static int vidioc_g_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
-{
- a->index = 0;
- strlcpy(a->name, "Radio", sizeof(a->name));
- a->capability = V4L2_AUDCAP_STEREO;
return 0;
}
-static int vidioc_s_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
+static const struct v4l2_ctrl_ops fmr2_ctrl_ops = {
+ .s_ctrl = fmr2_s_ctrl,
+};
+
+static int fmr2_tea_ext_init(struct snd_tea575x *tea)
{
- return a->index ? -EINVAL : 0;
-}
+ struct fmr2 *fmr2 = tea->private_data;
-static const struct v4l2_file_operations fmr2_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = video_ioctl2,
-};
+ if (inb(fmr2->io) & FMR2_HASVOL) {
+ fmr2->volume = v4l2_ctrl_new_std(&tea->ctrl_handler, &fmr2_ctrl_ops, V4L2_CID_AUDIO_VOLUME, 0, 68, 2, 56);
+ fmr2->balance = v4l2_ctrl_new_std(&tea->ctrl_handler, &fmr2_ctrl_ops, V4L2_CID_AUDIO_BALANCE, -68, 68, 2, 0);
+ if (tea->ctrl_handler.error) {
+ printk(KERN_ERR "radio-sf16fmr2: can't initialize contrls\n");
+ return tea->ctrl_handler.error;
+ }
+ }
-static const struct v4l2_ioctl_ops fmr2_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_audio = vidioc_g_audio,
- .vidioc_s_audio = vidioc_s_audio,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
-};
+ return 0;
+}
static int __init fmr2_init(void)
{
struct fmr2 *fmr2 = &fmr2_card;
- struct v4l2_device *v4l2_dev = &fmr2->v4l2_dev;
- int res;
- strlcpy(v4l2_dev->name, "sf16fmr2", sizeof(v4l2_dev->name));
- fmr2->io = io;
- fmr2->stereo = 1;
- mutex_init(&fmr2->lock);
+ fmr2->io = FMR2_PORT;
- if (!request_region(fmr2->io, 2, "sf16fmr2")) {
- v4l2_err(v4l2_dev, "request_region failed!\n");
+ if (!request_region(fmr2->io, 2, "SF16-FMR2")) {
+ printk(KERN_ERR "radio-sf16fmr2: I/O port 0x%x already in use\n", fmr2->io);
return -EBUSY;
}
- res = v4l2_device_register(NULL, v4l2_dev);
- if (res < 0) {
- release_region(fmr2->io, 2);
- v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
- return res;
- }
+ fmr2->tea.private_data = fmr2;
+ fmr2->tea.ops = &fmr2_tea_ops;
+ fmr2->tea.ext_init = fmr2_tea_ext_init;
+ strlcpy(fmr2->tea.card, "SF16-FMR2", sizeof(fmr2->tea.card));
+ strcpy(fmr2->tea.bus_info, "ISA");
- strlcpy(fmr2->vdev.name, v4l2_dev->name, sizeof(fmr2->vdev.name));
- fmr2->vdev.v4l2_dev = v4l2_dev;
- fmr2->vdev.fops = &fmr2_fops;
- fmr2->vdev.ioctl_ops = &fmr2_ioctl_ops;
- fmr2->vdev.release = video_device_release_empty;
- video_set_drvdata(&fmr2->vdev, fmr2);
-
- /* mute card - prevents noisy bootups */
- fmr2_mute(fmr2->io);
- fmr2_product_info(fmr2);
-
- if (video_register_device(&fmr2->vdev, VFL_TYPE_RADIO, radio_nr) < 0) {
- v4l2_device_unregister(v4l2_dev);
+ if (snd_tea575x_init(&fmr2->tea)) {
+ printk(KERN_ERR "radio-sf16fmr2: Unable to detect TEA575x tuner\n");
release_region(fmr2->io, 2);
- return -EINVAL;
+ return -ENODEV;
}
- v4l2_info(v4l2_dev, "SF16FMR2 radio card driver at 0x%x.\n", fmr2->io);
- debug_print((KERN_DEBUG "card_type %d\n", fmr2->card_type));
+ printk(KERN_INFO "radio-sf16fmr2: SF16-FMR2 radio card at 0x%x.\n", fmr2->io);
return 0;
}
@@ -443,22 +211,9 @@ static void __exit fmr2_exit(void)
{
struct fmr2 *fmr2 = &fmr2_card;
- video_unregister_device(&fmr2->vdev);
- v4l2_device_unregister(&fmr2->v4l2_dev);
+ snd_tea575x_exit(&fmr2->tea);
release_region(fmr2->io, 2);
}
module_init(fmr2_init);
module_exit(fmr2_exit);
-
-#ifndef MODULE
-
-static int __init fmr2_setup_io(char *str)
-{
- get_option(&str, &io);
- return 1;
-}
-
-__setup("sf16fmr2=", fmr2_setup_io);
-
-#endif
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index 0e71d81..95ddcc4 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -39,10 +39,8 @@
#include <linux/i2c.h> /* I2C */
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
-#define DRIVER_VERSION "v0.01"
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 1)
+#define DRIVER_VERSION "0.0.2"
#define DRIVER_AUTHOR "Fabio Belavenuto <belavenuto@gmail.com>"
#define DRIVER_DESC "A driver for the TEA5764 radio chip for EZX Phones."
@@ -300,7 +298,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->card, dev->name, sizeof(v->card));
snprintf(v->bus_info, sizeof(v->bus_info),
"I2C:%s", dev_name(&dev->dev));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
@@ -595,8 +592,9 @@ static void __exit tea5764_exit(void)
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
-module_param(use_xtal, int, 1);
+module_param(use_xtal, int, 0);
MODULE_PARM_DESC(use_xtal, "Chip have a xtal connected in board");
module_param(radio_nr, int, 0);
MODULE_PARM_DESC(radio_nr, "video4linux device number to use");
diff --git a/drivers/media/radio/radio-terratec.c b/drivers/media/radio/radio-terratec.c
index a326639..f2ed9cc 100644
--- a/drivers/media/radio/radio-terratec.c
+++ b/drivers/media/radio/radio-terratec.c
@@ -29,7 +29,6 @@
#include <linux/ioport.h> /* request_region */
#include <linux/videodev2.h> /* kernel radio structs */
#include <linux/mutex.h>
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/io.h> /* outb, outb_p */
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -37,6 +36,7 @@
MODULE_AUTHOR("R.OFFERMANNS & others");
MODULE_DESCRIPTION("A driver for the TerraTec ActiveRadio Standalone radio card.");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.3");
#ifndef CONFIG_RADIO_TERRATEC_PORT
#define CONFIG_RADIO_TERRATEC_PORT 0x590
@@ -49,8 +49,6 @@ module_param(io, int, 0);
MODULE_PARM_DESC(io, "I/O address of the TerraTec ActiveRadio card (0x590 or 0x591)");
module_param(radio_nr, int, 0);
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
-
static struct v4l2_queryctrl radio_qctrl[] = {
{
.id = V4L2_CID_AUDIO_MUTE,
@@ -205,7 +203,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-terratec", sizeof(v->driver));
strlcpy(v->card, "ActiveRadio", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c
index a185610..f17b540 100644
--- a/drivers/media/radio/radio-timb.c
+++ b/drivers/media/radio/radio-timb.c
@@ -16,7 +16,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/version.h>
#include <linux/io.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
@@ -44,7 +43,6 @@ static int timbradio_vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, DRIVER_NAME, sizeof(v->driver));
strlcpy(v->card, "Timberdale Radio", sizeof(v->card));
snprintf(v->bus_info, sizeof(v->bus_info), "platform:"DRIVER_NAME);
- v->version = KERNEL_VERSION(0, 0, 1);
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
@@ -245,4 +243,5 @@ module_exit(timbradio_exit);
MODULE_DESCRIPTION("Timberdale Radio driver");
MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.0.2");
MODULE_ALIAS("platform:"DRIVER_NAME);
diff --git a/drivers/media/radio/radio-trust.c b/drivers/media/radio/radio-trust.c
index 22fa9cc..b3f45a0 100644
--- a/drivers/media/radio/radio-trust.c
+++ b/drivers/media/radio/radio-trust.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ioport.h>
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/videodev2.h>
#include <linux/io.h>
#include <media/v4l2-device.h>
@@ -28,6 +27,7 @@
MODULE_AUTHOR("Eric Lammerts, Russell Kroll, Quay Lu, Donald Song, Jason Lewis, Scott McGrath, William McGrath");
MODULE_DESCRIPTION("A driver for the Trust FM Radio card.");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.3");
/* acceptable ports: 0x350 (JP3 shorted), 0x358 (JP3 open) */
@@ -42,8 +42,6 @@ module_param(io, int, 0);
MODULE_PARM_DESC(io, "I/O address of the Trust FM Radio card (0x350 or 0x358)");
module_param(radio_nr, int, 0);
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
-
struct trust {
struct v4l2_device v4l2_dev;
struct video_device vdev;
@@ -196,7 +194,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-trust", sizeof(v->driver));
strlcpy(v->card, "Trust FM Radio", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/radio/radio-typhoon.c b/drivers/media/radio/radio-typhoon.c
index 8dbbf08..398726a 100644
--- a/drivers/media/radio/radio-typhoon.c
+++ b/drivers/media/radio/radio-typhoon.c
@@ -31,15 +31,17 @@
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
#include <linux/ioport.h> /* request_region */
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/videodev2.h> /* kernel radio structs */
#include <linux/io.h> /* outb, outb_p */
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#define DRIVER_VERSION "0.1.2"
+
MODULE_AUTHOR("Dr. Henrik Seidel");
MODULE_DESCRIPTION("A driver for the Typhoon radio card (a.k.a. EcoRadio).");
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
#ifndef CONFIG_RADIO_TYPHOON_PORT
#define CONFIG_RADIO_TYPHOON_PORT -1
@@ -61,9 +63,7 @@ static unsigned long mutefreq = CONFIG_RADIO_TYPHOON_MUTEFREQ;
module_param(mutefreq, ulong, 0);
MODULE_PARM_DESC(mutefreq, "Frequency used when muting the card (in kHz)");
-#define RADIO_VERSION KERNEL_VERSION(0, 1, 1)
-
-#define BANNER "Typhoon Radio Card driver v0.1.1\n"
+#define BANNER "Typhoon Radio Card driver v" DRIVER_VERSION "\n"
struct typhoon {
struct v4l2_device v4l2_dev;
@@ -171,7 +171,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-typhoon", sizeof(v->driver));
strlcpy(v->card, "Typhoon Radio", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 459f727..46cacf8 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1382,7 +1382,7 @@ static int wl1273_fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
- ctrl->cur.val = wl1273_fm_get_tx_ctune(radio);
+ ctrl->val = wl1273_fm_get_tx_ctune(radio);
break;
default:
diff --git a/drivers/media/radio/radio-zoltrix.c b/drivers/media/radio/radio-zoltrix.c
index af99c5b..f5613b9 100644
--- a/drivers/media/radio/radio-zoltrix.c
+++ b/drivers/media/radio/radio-zoltrix.c
@@ -35,7 +35,6 @@
#include <linux/delay.h> /* udelay, msleep */
#include <linux/videodev2.h> /* kernel radio structs */
#include <linux/mutex.h>
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/io.h> /* outb, outb_p */
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -43,6 +42,7 @@
MODULE_AUTHOR("C.van Schaik");
MODULE_DESCRIPTION("A driver for the Zoltrix Radio Plus.");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.3");
#ifndef CONFIG_RADIO_ZOLTRIX_PORT
#define CONFIG_RADIO_ZOLTRIX_PORT -1
@@ -55,8 +55,6 @@ module_param(io, int, 0);
MODULE_PARM_DESC(io, "I/O address of the Zoltrix Radio Plus (0x20c or 0x30c)");
module_param(radio_nr, int, 0);
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
-
struct zoltrix {
struct v4l2_device v4l2_dev;
struct video_device vdev;
@@ -228,7 +226,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-zoltrix", sizeof(v->driver));
strlcpy(v->card, "Zoltrix Radio", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index a2a6777..fd3541b 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -24,10 +24,9 @@
/* driver definitions */
#define DRIVER_AUTHOR "Joonyoung Shim <jy0922.shim@samsung.com>";
-#define DRIVER_KERNEL_VERSION KERNEL_VERSION(1, 0, 1)
#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver"
#define DRIVER_DESC "I2C radio driver for Si470x FM Radio Receivers"
-#define DRIVER_VERSION "1.0.1"
+#define DRIVER_VERSION "1.0.2"
/* kernel includes */
#include <linux/i2c.h>
@@ -248,7 +247,6 @@ int si470x_vidioc_querycap(struct file *file, void *priv,
{
strlcpy(capability->driver, DRIVER_NAME, sizeof(capability->driver));
strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card));
- capability->version = DRIVER_KERNEL_VERSION;
capability->capabilities = V4L2_CAP_HW_FREQ_SEEK |
V4L2_CAP_TUNER | V4L2_CAP_RADIO;
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 392e84f..4cf5370 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -29,7 +29,6 @@
/* driver definitions */
#define DRIVER_AUTHOR "Tobias Lorenz <tobias.lorenz@gmx.net>"
-#define DRIVER_KERNEL_VERSION KERNEL_VERSION(1, 0, 10)
#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver"
#define DRIVER_DESC "USB radio driver for Si470x FM Radio Receivers"
#define DRIVER_VERSION "1.0.10"
@@ -626,7 +625,6 @@ int si470x_vidioc_querycap(struct file *file, void *priv,
strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card));
usb_make_path(radio->usbdev, capability->bus_info,
sizeof(capability->bus_info));
- capability->version = DRIVER_KERNEL_VERSION;
capability->capabilities = V4L2_CAP_HW_FREQ_SEEK |
V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE;
@@ -699,7 +697,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
radio->videodev = video_device_alloc();
if (!radio->videodev) {
retval = -ENOMEM;
- goto err_intbuffer;
+ goto err_urb;
}
memcpy(radio->videodev, &si470x_viddev_template,
sizeof(si470x_viddev_template));
@@ -790,6 +788,8 @@ err_all:
kfree(radio->buffer);
err_video:
video_device_release(radio->videodev);
+err_urb:
+ usb_free_urb(radio->int_in_urb);
err_intbuffer:
kfree(radio->int_in_buffer);
err_radio:
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h
index 68da001..f300a55 100644
--- a/drivers/media/radio/si470x/radio-si470x.h
+++ b/drivers/media/radio/si470x/radio-si470x.h
@@ -32,7 +32,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/input.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <linux/mutex.h>
#include <media/v4l2-common.h>
diff --git a/drivers/media/radio/wl128x/fmdrv.h b/drivers/media/radio/wl128x/fmdrv.h
index 1a45a5d..d84ad9d 100644
--- a/drivers/media/radio/wl128x/fmdrv.h
+++ b/drivers/media/radio/wl128x/fmdrv.h
@@ -28,14 +28,11 @@
#include <sound/core.h>
#include <sound/initval.h>
#include <linux/timer.h>
-#include <linux/version.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
-#define FM_DRV_VERSION "0.10"
-/* Should match with FM_DRV_VERSION */
-#define FM_DRV_RADIO_VERSION KERNEL_VERSION(0, 0, 1)
+#define FM_DRV_VERSION "0.1.1"
#define FM_DRV_NAME "ti_fmdrv"
#define FM_DRV_CARD_SHORT_NAME "TI FM Radio"
#define FM_DRV_CARD_LONG_NAME "Texas Instruments FM Radio"
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index a4f07f8..ec1d52f 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -175,7 +175,6 @@ static int fm_v4l2_vidioc_querycap(struct file *file, void *priv,
strlcpy(capability->card, FM_DRV_CARD_SHORT_NAME,
sizeof(capability->card));
sprintf(capability->bus_info, "UART");
- capability->version = FM_DRV_RADIO_VERSION;
capability->capabilities = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER |
V4L2_CAP_RADIO | V4L2_CAP_MODULATOR |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE |
@@ -191,7 +190,7 @@ static int fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
- ctrl->cur.val = fm_tx_get_tune_cap_val(fmdev);
+ ctrl->val = fm_tx_get_tune_cap_val(fmdev);
break;
default:
fmwarn("%s: Unknown IOCTL: %d\n", __func__, ctrl->id);
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 7d4bbc2..899f783 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -87,6 +87,17 @@ config IR_RC5_SZ_DECODER
uses an IR protocol that is almost standard RC-5, but not quite,
as it uses an additional bit).
+config IR_MCE_KBD_DECODER
+ tristate "Enable IR raw decoder for the MCE keyboard/mouse protocol"
+ depends on RC_CORE
+ select BITREVERSE
+ default y
+
+ ---help---
+ Enable this option if you have a Microsoft Remote Keyboard for
+ Windows Media Center Edition, which you would like to use with
+ a raw IR receiver in your system.
+
config IR_LIRC_CODEC
tristate "Enable IR to LIRC bridge"
depends on RC_CORE
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 52830e5..f224db0 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_IR_RC6_DECODER) += ir-rc6-decoder.o
obj-$(CONFIG_IR_JVC_DECODER) += ir-jvc-decoder.o
obj-$(CONFIG_IR_SONY_DECODER) += ir-sony-decoder.o
obj-$(CONFIG_IR_RC5_SZ_DECODER) += ir-rc5-sz-decoder.o
+obj-$(CONFIG_IR_MCE_KBD_DECODER) += ir-mce_kbd-decoder.o
obj-$(CONFIG_IR_LIRC_CODEC) += ir-lirc-codec.o
# stand-alone IR receivers/transmitters
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index a43ed6c..2b9c2569 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -953,13 +953,13 @@ static void ene_set_idle(struct rc_dev *rdev, bool idle)
}
/* outside interface: transmit */
-static int ene_transmit(struct rc_dev *rdev, int *buf, u32 n)
+static int ene_transmit(struct rc_dev *rdev, unsigned *buf, unsigned n)
{
struct ene_device *dev = rdev->priv;
unsigned long flags;
dev->tx_buffer = buf;
- dev->tx_len = n / sizeof(int);
+ dev->tx_len = n;
dev->tx_pos = 0;
dev->tx_reg = 0;
dev->tx_done = 0;
diff --git a/drivers/media/rc/ene_ir.h b/drivers/media/rc/ene_ir.h
index 337a41d..017c209 100644
--- a/drivers/media/rc/ene_ir.h
+++ b/drivers/media/rc/ene_ir.h
@@ -235,7 +235,7 @@ struct ene_device {
bool tx_sample_pulse; /* current sample is pulse */
/* TX buffer */
- int *tx_buffer; /* input samples buffer*/
+ unsigned *tx_buffer; /* input samples buffer*/
int tx_pos; /* position in that bufer */
int tx_len; /* current len of tx buffer */
int tx_done; /* done transmitting */
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index 1c5cc65..e5eeec4 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -103,19 +103,19 @@ static ssize_t ir_lirc_transmit_ir(struct file *file, const char *buf,
{
struct lirc_codec *lirc;
struct rc_dev *dev;
- int *txbuf; /* buffer with values to transmit */
- int ret = 0;
+ unsigned int *txbuf; /* buffer with values to transmit */
+ ssize_t ret = 0;
size_t count;
lirc = lirc_get_pdata(file);
if (!lirc)
return -EFAULT;
- if (n % sizeof(int))
+ if (n < sizeof(unsigned) || n % sizeof(unsigned))
return -EINVAL;
- count = n / sizeof(int);
- if (count > LIRCBUF_SIZE || count % 2 == 0 || n % sizeof(int) != 0)
+ count = n / sizeof(unsigned);
+ if (count > LIRCBUF_SIZE || count % 2 == 0)
return -EINVAL;
txbuf = memdup_user(buf, n);
@@ -129,7 +129,10 @@ static ssize_t ir_lirc_transmit_ir(struct file *file, const char *buf,
}
if (dev->tx_ir)
- ret = dev->tx_ir(dev, txbuf, (u32)n);
+ ret = dev->tx_ir(dev, txbuf, count);
+
+ if (ret > 0)
+ ret *= sizeof(unsigned);
out:
kfree(txbuf);
diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c
new file mode 100644
index 0000000..3784ebf
--- /dev/null
+++ b/drivers/media/rc/ir-mce_kbd-decoder.c
@@ -0,0 +1,449 @@
+/* ir-mce_kbd-decoder.c - A decoder for the RC6-ish keyboard/mouse IR protocol
+ * used by the Microsoft Remote Keyboard for Windows Media Center Edition,
+ * referred to by Microsoft's Windows Media Center remote specification docs
+ * as "an internal protocol called MCIR-2".
+ *
+ * Copyright (C) 2011 by Jarod Wilson <jarod@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+
+#include "rc-core-priv.h"
+
+/*
+ * This decoder currently supports:
+ * - MCIR-2 29-bit IR signals used for mouse movement and buttons
+ * - MCIR-2 32-bit IR signals used for standard keyboard keys
+ *
+ * The media keys on the keyboard send RC-6 signals that are inditinguishable
+ * from the keys of the same name on the stock MCE remote, and will be handled
+ * by the standard RC-6 decoder, and be made available to the system via the
+ * input device for the remote, rather than the keyboard/mouse one.
+ */
+
+#define MCIR2_UNIT 333333 /* ns */
+#define MCIR2_HEADER_NBITS 5
+#define MCIR2_MOUSE_NBITS 29
+#define MCIR2_KEYBOARD_NBITS 32
+#define MCIR2_PREFIX_PULSE (8 * MCIR2_UNIT)
+#define MCIR2_PREFIX_SPACE (1 * MCIR2_UNIT)
+#define MCIR2_MAX_LEN (3 * MCIR2_UNIT)
+#define MCIR2_BIT_START (1 * MCIR2_UNIT)
+#define MCIR2_BIT_END (1 * MCIR2_UNIT)
+#define MCIR2_BIT_0 (1 * MCIR2_UNIT)
+#define MCIR2_BIT_SET (2 * MCIR2_UNIT)
+#define MCIR2_MODE_MASK 0xf /* for the header bits */
+#define MCIR2_KEYBOARD_HEADER 0x4
+#define MCIR2_MOUSE_HEADER 0x1
+#define MCIR2_MASK_KEYS_START 0xe0
+
+enum mce_kbd_mode {
+ MCIR2_MODE_KEYBOARD,
+ MCIR2_MODE_MOUSE,
+ MCIR2_MODE_UNKNOWN,
+};
+
+enum mce_kbd_state {
+ STATE_INACTIVE,
+ STATE_HEADER_BIT_START,
+ STATE_HEADER_BIT_END,
+ STATE_BODY_BIT_START,
+ STATE_BODY_BIT_END,
+ STATE_FINISHED,
+};
+
+static unsigned char kbd_keycodes[256] = {
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_A,
+ KEY_B, KEY_C, KEY_D, KEY_E, KEY_F,
+ KEY_G, KEY_H, KEY_I, KEY_J, KEY_K,
+ KEY_L, KEY_M, KEY_N, KEY_O, KEY_P,
+ KEY_Q, KEY_R, KEY_S, KEY_T, KEY_U,
+ KEY_V, KEY_W, KEY_X, KEY_Y, KEY_Z,
+ KEY_1, KEY_2, KEY_3, KEY_4, KEY_5,
+ KEY_6, KEY_7, KEY_8, KEY_9, KEY_0,
+ KEY_ENTER, KEY_ESC, KEY_BACKSPACE, KEY_TAB, KEY_SPACE,
+ KEY_MINUS, KEY_EQUAL, KEY_LEFTBRACE, KEY_RIGHTBRACE, KEY_BACKSLASH,
+ KEY_RESERVED, KEY_SEMICOLON, KEY_APOSTROPHE, KEY_GRAVE, KEY_COMMA,
+ KEY_DOT, KEY_SLASH, KEY_CAPSLOCK, KEY_F1, KEY_F2,
+ KEY_F3, KEY_F4, KEY_F5, KEY_F6, KEY_F7,
+ KEY_F8, KEY_F9, KEY_F10, KEY_F11, KEY_F12,
+ KEY_SYSRQ, KEY_SCROLLLOCK, KEY_PAUSE, KEY_INSERT, KEY_HOME,
+ KEY_PAGEUP, KEY_DELETE, KEY_END, KEY_PAGEDOWN, KEY_RIGHT,
+ KEY_LEFT, KEY_DOWN, KEY_UP, KEY_NUMLOCK, KEY_KPSLASH,
+ KEY_KPASTERISK, KEY_KPMINUS, KEY_KPPLUS, KEY_KPENTER, KEY_KP1,
+ KEY_KP2, KEY_KP3, KEY_KP4, KEY_KP5, KEY_KP6,
+ KEY_KP7, KEY_KP8, KEY_KP9, KEY_KP0, KEY_KPDOT,
+ KEY_102ND, KEY_COMPOSE, KEY_POWER, KEY_KPEQUAL, KEY_F13,
+ KEY_F14, KEY_F15, KEY_F16, KEY_F17, KEY_F18,
+ KEY_F19, KEY_F20, KEY_F21, KEY_F22, KEY_F23,
+ KEY_F24, KEY_OPEN, KEY_HELP, KEY_PROPS, KEY_FRONT,
+ KEY_STOP, KEY_AGAIN, KEY_UNDO, KEY_CUT, KEY_COPY,
+ KEY_PASTE, KEY_FIND, KEY_MUTE, KEY_VOLUMEUP, KEY_VOLUMEDOWN,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_KPCOMMA, KEY_RESERVED,
+ KEY_RO, KEY_KATAKANAHIRAGANA, KEY_YEN, KEY_HENKAN, KEY_MUHENKAN,
+ KEY_KPJPCOMMA, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_HANGUEL,
+ KEY_HANJA, KEY_KATAKANA, KEY_HIRAGANA, KEY_ZENKAKUHANKAKU, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_LEFTCTRL,
+ KEY_LEFTSHIFT, KEY_LEFTALT, KEY_LEFTMETA, KEY_RIGHTCTRL, KEY_RIGHTSHIFT,
+ KEY_RIGHTALT, KEY_RIGHTMETA, KEY_PLAYPAUSE, KEY_STOPCD, KEY_PREVIOUSSONG,
+ KEY_NEXTSONG, KEY_EJECTCD, KEY_VOLUMEUP, KEY_VOLUMEDOWN, KEY_MUTE,
+ KEY_WWW, KEY_BACK, KEY_FORWARD, KEY_STOP, KEY_FIND,
+ KEY_SCROLLUP, KEY_SCROLLDOWN, KEY_EDIT, KEY_SLEEP, KEY_COFFEE,
+ KEY_REFRESH, KEY_CALC, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED
+};
+
+static void mce_kbd_rx_timeout(unsigned long data)
+{
+ struct mce_kbd_dec *mce_kbd = (struct mce_kbd_dec *)data;
+ int i;
+ unsigned char maskcode;
+
+ IR_dprintk(2, "timer callback clearing all keys\n");
+
+ for (i = 0; i < 7; i++) {
+ maskcode = kbd_keycodes[MCIR2_MASK_KEYS_START + i];
+ input_report_key(mce_kbd->idev, maskcode, 0);
+ }
+
+ for (i = 0; i < MCIR2_MASK_KEYS_START; i++)
+ input_report_key(mce_kbd->idev, kbd_keycodes[i], 0);
+}
+
+static enum mce_kbd_mode mce_kbd_mode(struct mce_kbd_dec *data)
+{
+ switch (data->header & MCIR2_MODE_MASK) {
+ case MCIR2_KEYBOARD_HEADER:
+ return MCIR2_MODE_KEYBOARD;
+ case MCIR2_MOUSE_HEADER:
+ return MCIR2_MODE_MOUSE;
+ default:
+ return MCIR2_MODE_UNKNOWN;
+ }
+}
+
+static void ir_mce_kbd_process_keyboard_data(struct input_dev *idev,
+ u32 scancode)
+{
+ u8 keydata = (scancode >> 8) & 0xff;
+ u8 shiftmask = scancode & 0xff;
+ unsigned char keycode, maskcode;
+ int i, keystate;
+
+ IR_dprintk(1, "keyboard: keydata = 0x%02x, shiftmask = 0x%02x\n",
+ keydata, shiftmask);
+
+ for (i = 0; i < 7; i++) {
+ maskcode = kbd_keycodes[MCIR2_MASK_KEYS_START + i];
+ if (shiftmask & (1 << i))
+ keystate = 1;
+ else
+ keystate = 0;
+ input_report_key(idev, maskcode, keystate);
+ }
+
+ if (keydata) {
+ keycode = kbd_keycodes[keydata];
+ input_report_key(idev, keycode, 1);
+ } else {
+ for (i = 0; i < MCIR2_MASK_KEYS_START; i++)
+ input_report_key(idev, kbd_keycodes[i], 0);
+ }
+}
+
+static void ir_mce_kbd_process_mouse_data(struct input_dev *idev, u32 scancode)
+{
+ /* raw mouse coordinates */
+ u8 xdata = (scancode >> 7) & 0x7f;
+ u8 ydata = (scancode >> 14) & 0x7f;
+ int x, y;
+ /* mouse buttons */
+ bool right = scancode & 0x40;
+ bool left = scancode & 0x20;
+
+ if (xdata & 0x40)
+ x = -((~xdata & 0x7f) + 1);
+ else
+ x = xdata;
+
+ if (ydata & 0x40)
+ y = -((~ydata & 0x7f) + 1);
+ else
+ y = ydata;
+
+ IR_dprintk(1, "mouse: x = %d, y = %d, btns = %s%s\n",
+ x, y, left ? "L" : "", right ? "R" : "");
+
+ input_report_rel(idev, REL_X, x);
+ input_report_rel(idev, REL_Y, y);
+
+ input_report_key(idev, BTN_LEFT, left);
+ input_report_key(idev, BTN_RIGHT, right);
+}
+
+/**
+ * ir_mce_kbd_decode() - Decode one mce_kbd pulse or space
+ * @dev: the struct rc_dev descriptor of the device
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_mce_kbd_decode(struct rc_dev *dev, struct ir_raw_event ev)
+{
+ struct mce_kbd_dec *data = &dev->raw->mce_kbd;
+ u32 scancode;
+ unsigned long delay;
+
+ if (!(dev->raw->enabled_protocols & RC_TYPE_MCE_KBD))
+ return 0;
+
+ if (!is_timing_event(ev)) {
+ if (ev.reset)
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ if (!geq_margin(ev.duration, MCIR2_UNIT, MCIR2_UNIT / 2))
+ goto out;
+
+again:
+ IR_dprintk(2, "started at state %i (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ if (!geq_margin(ev.duration, MCIR2_UNIT, MCIR2_UNIT / 2))
+ return 0;
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ /* Note: larger margin on first pulse since each MCIR2_UNIT
+ is quite short and some hardware takes some time to
+ adjust to the signal */
+ if (!eq_margin(ev.duration, MCIR2_PREFIX_PULSE, MCIR2_UNIT))
+ break;
+
+ data->state = STATE_HEADER_BIT_START;
+ data->count = 0;
+ data->header = 0;
+ return 0;
+
+ case STATE_HEADER_BIT_START:
+ if (geq_margin(ev.duration, MCIR2_MAX_LEN, MCIR2_UNIT / 2))
+ break;
+
+ data->header <<= 1;
+ if (ev.pulse)
+ data->header |= 1;
+ data->count++;
+ data->state = STATE_HEADER_BIT_END;
+ return 0;
+
+ case STATE_HEADER_BIT_END:
+ if (!is_transition(&ev, &dev->raw->prev_ev))
+ break;
+
+ decrease_duration(&ev, MCIR2_BIT_END);
+
+ if (data->count != MCIR2_HEADER_NBITS) {
+ data->state = STATE_HEADER_BIT_START;
+ goto again;
+ }
+
+ switch (mce_kbd_mode(data)) {
+ case MCIR2_MODE_KEYBOARD:
+ data->wanted_bits = MCIR2_KEYBOARD_NBITS;
+ break;
+ case MCIR2_MODE_MOUSE:
+ data->wanted_bits = MCIR2_MOUSE_NBITS;
+ break;
+ default:
+ IR_dprintk(1, "not keyboard or mouse data\n");
+ goto out;
+ }
+
+ data->count = 0;
+ data->body = 0;
+ data->state = STATE_BODY_BIT_START;
+ goto again;
+
+ case STATE_BODY_BIT_START:
+ if (geq_margin(ev.duration, MCIR2_MAX_LEN, MCIR2_UNIT / 2))
+ break;
+
+ data->body <<= 1;
+ if (ev.pulse)
+ data->body |= 1;
+ data->count++;
+ data->state = STATE_BODY_BIT_END;
+ return 0;
+
+ case STATE_BODY_BIT_END:
+ if (!is_transition(&ev, &dev->raw->prev_ev))
+ break;
+
+ if (data->count == data->wanted_bits)
+ data->state = STATE_FINISHED;
+ else
+ data->state = STATE_BODY_BIT_START;
+
+ decrease_duration(&ev, MCIR2_BIT_END);
+ goto again;
+
+ case STATE_FINISHED:
+ if (ev.pulse)
+ break;
+
+ switch (data->wanted_bits) {
+ case MCIR2_KEYBOARD_NBITS:
+ scancode = data->body & 0xffff;
+ IR_dprintk(1, "keyboard data 0x%08x\n", data->body);
+ if (dev->timeout)
+ delay = usecs_to_jiffies(dev->timeout / 1000);
+ else
+ delay = msecs_to_jiffies(100);
+ mod_timer(&data->rx_timeout, jiffies + delay);
+ /* Pass data to keyboard buffer parser */
+ ir_mce_kbd_process_keyboard_data(data->idev, scancode);
+ break;
+ case MCIR2_MOUSE_NBITS:
+ scancode = data->body & 0x1fffff;
+ IR_dprintk(1, "mouse data 0x%06x\n", scancode);
+ /* Pass data to mouse buffer parser */
+ ir_mce_kbd_process_mouse_data(data->idev, scancode);
+ break;
+ default:
+ IR_dprintk(1, "not keyboard or mouse data\n");
+ goto out;
+ }
+
+ data->state = STATE_INACTIVE;
+ input_sync(data->idev);
+ return 0;
+ }
+
+out:
+ IR_dprintk(1, "failed at state %i (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ input_sync(data->idev);
+ return -EINVAL;
+}
+
+static int ir_mce_kbd_register(struct rc_dev *dev)
+{
+ struct mce_kbd_dec *mce_kbd = &dev->raw->mce_kbd;
+ struct input_dev *idev;
+ int i, ret;
+
+ idev = input_allocate_device();
+ if (!idev)
+ return -ENOMEM;
+
+ snprintf(mce_kbd->name, sizeof(mce_kbd->name),
+ "MCE IR Keyboard/Mouse (%s)", dev->driver_name);
+ strlcat(mce_kbd->phys, "/input0", sizeof(mce_kbd->phys));
+
+ idev->name = mce_kbd->name;
+ idev->phys = mce_kbd->phys;
+
+ /* Keyboard bits */
+ set_bit(EV_KEY, idev->evbit);
+ set_bit(EV_REP, idev->evbit);
+ for (i = 0; i < sizeof(kbd_keycodes); i++)
+ set_bit(kbd_keycodes[i], idev->keybit);
+
+ /* Mouse bits */
+ set_bit(EV_REL, idev->evbit);
+ set_bit(REL_X, idev->relbit);
+ set_bit(REL_Y, idev->relbit);
+ set_bit(BTN_LEFT, idev->keybit);
+ set_bit(BTN_RIGHT, idev->keybit);
+
+ /* Report scancodes too */
+ set_bit(EV_MSC, idev->evbit);
+ set_bit(MSC_SCAN, idev->mscbit);
+
+ setup_timer(&mce_kbd->rx_timeout, mce_kbd_rx_timeout,
+ (unsigned long)mce_kbd);
+
+ input_set_drvdata(idev, mce_kbd);
+
+#if 0
+ /* Adding this reference means two input devices are associated with
+ * this rc-core device, which ir-keytable doesn't cope with yet */
+ idev->dev.parent = &dev->dev;
+#endif
+
+ ret = input_register_device(idev);
+ if (ret < 0) {
+ input_free_device(idev);
+ return -EIO;
+ }
+
+ mce_kbd->idev = idev;
+
+ return 0;
+}
+
+static int ir_mce_kbd_unregister(struct rc_dev *dev)
+{
+ struct mce_kbd_dec *mce_kbd = &dev->raw->mce_kbd;
+ struct input_dev *idev = mce_kbd->idev;
+
+ del_timer_sync(&mce_kbd->rx_timeout);
+ input_unregister_device(idev);
+
+ return 0;
+}
+
+static struct ir_raw_handler mce_kbd_handler = {
+ .protocols = RC_TYPE_MCE_KBD,
+ .decode = ir_mce_kbd_decode,
+ .raw_register = ir_mce_kbd_register,
+ .raw_unregister = ir_mce_kbd_unregister,
+};
+
+static int __init ir_mce_kbd_decode_init(void)
+{
+ ir_raw_handler_register(&mce_kbd_handler);
+
+ printk(KERN_INFO "IR MCE Keyboard/mouse protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_mce_kbd_decode_exit(void)
+{
+ ir_raw_handler_unregister(&mce_kbd_handler);
+}
+
+module_init(ir_mce_kbd_decode_init);
+module_exit(ir_mce_kbd_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
+MODULE_DESCRIPTION("MCE Keyboard/mouse IR protocol decoder");
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 423ed45..27808bb 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -355,6 +355,7 @@ static void init_decoders(struct work_struct *work)
load_rc6_decode();
load_jvc_decode();
load_sony_decode();
+ load_mce_kbd_decode();
load_lirc_codec();
/* If needed, we may later add some init code. In this case,
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index d20168f..682009d 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -382,7 +382,7 @@ static int ite_set_tx_duty_cycle(struct rc_dev *rcdev, u32 duty_cycle)
/* transmit out IR pulses; what you get here is a batch of alternating
* pulse/space/pulse/space lengths that we should write out completely through
* the FIFO, blocking on a full FIFO */
-static int ite_tx_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
+static int ite_tx_ir(struct rc_dev *rcdev, unsigned *txbuf, unsigned n)
{
unsigned long flags;
struct ite_dev *dev = rcdev->priv;
@@ -398,9 +398,6 @@ static int ite_tx_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
/* clear the array just in case */
memset(last_sent, 0, ARRAY_SIZE(last_sent));
- /* n comes in bytes; convert to ints */
- n /= sizeof(int);
-
spin_lock_irqsave(&dev->lock, flags);
/* let everybody know we're now transmitting */
diff --git a/drivers/media/rc/keymaps/rc-rc6-mce.c b/drivers/media/rc/keymaps/rc-rc6-mce.c
index 01b69bc..c3907e2 100644
--- a/drivers/media/rc/keymaps/rc-rc6-mce.c
+++ b/drivers/media/rc/keymaps/rc-rc6-mce.c
@@ -29,7 +29,7 @@ static struct rc_map_table rc6_mce[] = {
{ 0x800f040a, KEY_DELETE },
{ 0x800f040b, KEY_ENTER },
- { 0x800f040c, KEY_POWER }, /* PC Power */
+ { 0x800f040c, KEY_SLEEP }, /* Formerly PC Power */
{ 0x800f040d, KEY_MEDIA }, /* Windows MCE button */
{ 0x800f040e, KEY_MUTE },
{ 0x800f040f, KEY_INFO },
@@ -44,7 +44,6 @@ static struct rc_map_table rc6_mce[] = {
{ 0x800f0416, KEY_PLAY },
{ 0x800f0417, KEY_RECORD },
{ 0x800f0418, KEY_PAUSE },
- { 0x800f046e, KEY_PLAYPAUSE },
{ 0x800f0419, KEY_STOP },
{ 0x800f041a, KEY_NEXT },
{ 0x800f041b, KEY_PREVIOUS },
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index ec972dc..85ff9a1 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -692,20 +692,18 @@ static void mce_flush_rx_buffer(struct mceusb_dev *ir, int size)
}
/* Send data out the IR blaster port(s) */
-static int mceusb_tx_ir(struct rc_dev *dev, int *txbuf, u32 n)
+static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
{
struct mceusb_dev *ir = dev->priv;
int i, ret = 0;
- int count, cmdcount = 0;
+ int cmdcount = 0;
unsigned char *cmdbuf; /* MCE command buffer */
long signal_duration = 0; /* Singnal length in us */
struct timeval start_time, end_time;
do_gettimeofday(&start_time);
- count = n / sizeof(int);
-
- cmdbuf = kzalloc(sizeof(int) * MCE_CMDBUF_SIZE, GFP_KERNEL);
+ cmdbuf = kzalloc(sizeof(unsigned) * MCE_CMDBUF_SIZE, GFP_KERNEL);
if (!cmdbuf)
return -ENOMEM;
@@ -774,7 +772,7 @@ static int mceusb_tx_ir(struct rc_dev *dev, int *txbuf, u32 n)
out:
kfree(cmdbuf);
- return ret ? ret : n;
+ return ret ? ret : count;
}
/* Sets active IR outputs -- mce devices typically have two */
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index ce595f9..144f3f5 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -546,24 +546,18 @@ static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
* number may larger than TXFCONT (0xff). So in interrupt_handler, it has to
* set TXFCONT as 0xff, until buf_count less than 0xff.
*/
-static int nvt_tx_ir(struct rc_dev *dev, int *txbuf, u32 n)
+static int nvt_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned n)
{
struct nvt_dev *nvt = dev->priv;
unsigned long flags;
- size_t cur_count;
unsigned int i;
u8 iren;
int ret;
spin_lock_irqsave(&nvt->tx.lock, flags);
- if (n >= TX_BUF_LEN) {
- nvt->tx.buf_count = cur_count = TX_BUF_LEN;
- ret = TX_BUF_LEN;
- } else {
- nvt->tx.buf_count = cur_count = n;
- ret = n;
- }
+ ret = min((unsigned)(TX_BUF_LEN / sizeof(unsigned)), n);
+ nvt->tx.buf_count = (ret * sizeof(unsigned));
memcpy(nvt->tx.buf, txbuf, nvt->tx.buf_count);
@@ -624,7 +618,6 @@ static void nvt_dump_rx_buf(struct nvt_dev *nvt)
static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
{
DEFINE_IR_RAW_EVENT(rawir);
- unsigned int count;
u32 carrier;
u8 sample;
int i;
@@ -637,65 +630,38 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
if (nvt->carrier_detect_enabled)
carrier = nvt_rx_carrier_detect(nvt);
- count = nvt->pkts;
- nvt_dbg_verbose("Processing buffer of len %d", count);
+ nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
init_ir_raw_event(&rawir);
- for (i = 0; i < count; i++) {
- nvt->pkts--;
+ for (i = 0; i < nvt->pkts; i++) {
sample = nvt->buf[i];
rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
* SAMPLE_PERIOD);
- if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) {
- if (nvt->rawir.pulse == rawir.pulse)
- nvt->rawir.duration += rawir.duration;
- else {
- nvt->rawir.duration = rawir.duration;
- nvt->rawir.pulse = rawir.pulse;
- }
- continue;
- }
-
- rawir.duration += nvt->rawir.duration;
-
- init_ir_raw_event(&nvt->rawir);
- nvt->rawir.duration = 0;
- nvt->rawir.pulse = rawir.pulse;
-
- if (sample == BUF_PULSE_BIT)
- rawir.pulse = false;
+ nvt_dbg("Storing %s with duration %d",
+ rawir.pulse ? "pulse" : "space", rawir.duration);
- if (rawir.duration) {
- nvt_dbg("Storing %s with duration %d",
- rawir.pulse ? "pulse" : "space",
- rawir.duration);
-
- ir_raw_event_store_with_filter(nvt->rdev, &rawir);
- }
+ ir_raw_event_store_with_filter(nvt->rdev, &rawir);
/*
* BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE
* indicates end of IR signal, but new data incoming. In both
* cases, it means we're ready to call ir_raw_event_handle
*/
- if ((sample == BUF_PULSE_BIT) && nvt->pkts) {
+ if ((sample == BUF_PULSE_BIT) && (i + 1 < nvt->pkts)) {
nvt_dbg("Calling ir_raw_event_handle (signal end)\n");
ir_raw_event_handle(nvt->rdev);
}
}
+ nvt->pkts = 0;
+
nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n");
ir_raw_event_handle(nvt->rdev);
- if (nvt->pkts) {
- nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts);
- nvt->pkts = 0;
- }
-
nvt_dbg_verbose("%s done", __func__);
}
@@ -1054,7 +1020,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
spin_lock_init(&nvt->nvt_lock);
spin_lock_init(&nvt->tx.lock);
- init_ir_raw_event(&nvt->rawir);
ret = -EBUSY;
/* now claim resources */
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 1241fc8..0d5e087 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -67,7 +67,6 @@ static int debug;
struct nvt_dev {
struct pnp_dev *pdev;
struct rc_dev *rdev;
- struct ir_raw_event rawir;
spinlock_t nvt_lock;
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 873b387..04c2c72 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -84,6 +84,17 @@ struct ir_raw_event_ctrl {
unsigned count;
unsigned wanted_bits;
} rc5_sz;
+ struct mce_kbd_dec {
+ struct input_dev *idev;
+ struct timer_list rx_timeout;
+ char name[64];
+ char phys[64];
+ int state;
+ u8 header;
+ u32 body;
+ unsigned count;
+ unsigned wanted_bits;
+ } mce_kbd;
struct lirc_codec {
struct rc_dev *dev;
struct lirc_driver *drv;
@@ -182,6 +193,13 @@ void ir_raw_init(void);
#define load_sony_decode() 0
#endif
+/* from ir-mce_kbd-decoder.c */
+#ifdef CONFIG_IR_MCE_KBD_DECODER_MODULE
+#define load_mce_kbd_decode() request_module("ir-mce_kbd-decoder")
+#else
+#define load_mce_kbd_decode() 0
+#endif
+
/* from ir-lirc-codec.c */
#ifdef CONFIG_IR_LIRC_CODEC_MODULE
#define load_lirc_codec() request_module("ir-lirc-codec")
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index cc846b2..efc6a51 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -101,21 +101,14 @@ static int loop_set_rx_carrier_range(struct rc_dev *dev, u32 min, u32 max)
return 0;
}
-static int loop_tx_ir(struct rc_dev *dev, int *txbuf, u32 n)
+static int loop_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
{
struct loopback_dev *lodev = dev->priv;
u32 rxmask;
- unsigned count;
unsigned total_duration = 0;
unsigned i;
DEFINE_IR_RAW_EVENT(rawir);
- if (n == 0 || n % sizeof(int)) {
- dprintk("invalid tx buffer size\n");
- return -EINVAL;
- }
-
- count = n / sizeof(int);
for (i = 0; i < count; i++)
total_duration += abs(txbuf[i]);
@@ -142,7 +135,7 @@ static int loop_tx_ir(struct rc_dev *dev, int *txbuf, u32 n)
for (i = 0; i < count; i++) {
rawir.pulse = i % 2 ? false : true;
- rawir.duration = abs(txbuf[i]) * 1000;
+ rawir.duration = txbuf[i] * 1000;
if (rawir.duration)
ir_raw_event_store_with_filter(dev, &rawir);
}
@@ -158,7 +151,7 @@ out:
/* Lirc expects this function to take as long as the total duration */
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(usecs_to_jiffies(total_duration));
- return n;
+ return count;
}
static void loop_set_idle(struct rc_dev *dev, bool enable)
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 3186ac7..51a23f4 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -735,6 +735,7 @@ static struct {
{ RC_TYPE_JVC, "jvc" },
{ RC_TYPE_SONY, "sony" },
{ RC_TYPE_RC5_SZ, "rc-5-sz" },
+ { RC_TYPE_MCE_KBD, "mce_kbd" },
{ RC_TYPE_LIRC, "lirc" },
{ RC_TYPE_OTHER, "other" },
};
@@ -1099,7 +1100,6 @@ int rc_register_device(struct rc_dev *dev)
if (rc < 0)
goto out_input;
}
- mutex_unlock(&dev->lock);
if (dev->change_protocol) {
rc = dev->change_protocol(dev, rc_map->rc_type);
@@ -1107,6 +1107,8 @@ int rc_register_device(struct rc_dev *dev)
goto out_raw;
}
+ mutex_unlock(&dev->lock);
+
IR_dprintk(1, "Registered rc%ld (driver: %s, remote: %s, mode %s)\n",
dev->devno,
dev->driver_name ? dev->driver_name : "unknown",
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 5147767..a166044 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -205,6 +205,7 @@ struct redrat3_dev {
/* rx signal timeout timer */
struct timer_list rx_timeout;
+ u32 hw_timeout;
/* Is the device currently receiving? */
bool recv_in_progress;
@@ -414,20 +415,10 @@ static u32 redrat3_us_to_len(u32 microsec)
}
-/* timer callback to send long trailing space on receive timeout */
+/* timer callback to send reset event */
static void redrat3_rx_timeout(unsigned long data)
{
struct redrat3_dev *rr3 = (struct redrat3_dev *)data;
- DEFINE_IR_RAW_EVENT(rawir);
-
- rawir.pulse = false;
- rawir.duration = rr3->rc->timeout;
- rr3_dbg(rr3->dev, "storing trailing space with duration %d\n",
- rawir.duration);
- ir_raw_event_store_with_filter(rr3->rc, &rawir);
-
- rr3_dbg(rr3->dev, "calling ir_raw_event_handle\n");
- ir_raw_event_handle(rr3->rc);
rr3_dbg(rr3->dev, "calling ir_raw_event_reset\n");
ir_raw_event_reset(rr3->rc);
@@ -438,7 +429,7 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
DEFINE_IR_RAW_EVENT(rawir);
struct redrat3_signal_header header;
struct device *dev;
- int i;
+ int i, trailer = 0;
unsigned long delay;
u32 mod_freq, single_len;
u16 *len_vals;
@@ -464,7 +455,8 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
if (!(header.length >= RR3_HEADER_LENGTH))
dev_warn(dev, "read returned less than rr3 header len\n");
- delay = usecs_to_jiffies(rr3->rc->timeout / 1000);
+ /* Make sure we reset the IR kfifo after a bit of inactivity */
+ delay = usecs_to_jiffies(rr3->hw_timeout);
mod_timer(&rr3->rx_timeout, jiffies + delay);
memcpy(&tmp32, sig_data + RR3_PAUSE_OFFSET, sizeof(tmp32));
@@ -506,9 +498,6 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
u16 val = len_vals[data_vals[i]];
single_len = redrat3_len_to_us((u32)be16_to_cpu(val));
- /* cap the value to IR_MAX_DURATION */
- single_len &= IR_MAX_DURATION;
-
/* we should always get pulse/space/pulse/space samples */
if (i % 2)
rawir.pulse = false;
@@ -516,6 +505,12 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
rawir.pulse = true;
rawir.duration = US_TO_NS(single_len);
+ /* Save initial pulse length to fudge trailer */
+ if (i == 0)
+ trailer = rawir.duration;
+ /* cap the value to IR_MAX_DURATION */
+ rawir.duration &= IR_MAX_DURATION;
+
rr3_dbg(dev, "storing %s with duration %d (i: %d)\n",
rawir.pulse ? "pulse" : "space", rawir.duration, i);
ir_raw_event_store_with_filter(rr3->rc, &rawir);
@@ -525,7 +520,10 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
if (i % 2) {
rawir.pulse = false;
/* this duration is made up, and may not be ideal... */
- rawir.duration = rr3->rc->timeout / 2;
+ if (trailer < US_TO_NS(1000))
+ rawir.duration = US_TO_NS(2800);
+ else
+ rawir.duration = trailer;
rr3_dbg(dev, "storing trailing space with duration %d\n",
rawir.duration);
ir_raw_event_store_with_filter(rr3->rc, &rawir);
@@ -629,36 +627,31 @@ static inline void redrat3_delete(struct redrat3_dev *rr3,
kfree(rr3);
}
-static u32 redrat3_get_timeout(struct device *dev,
- struct rc_dev *rc, struct usb_device *udev)
+static u32 redrat3_get_timeout(struct redrat3_dev *rr3)
{
u32 *tmp;
- u32 timeout = MS_TO_NS(150); /* a sane default, if things go haywire */
+ u32 timeout = MS_TO_US(150); /* a sane default, if things go haywire */
int len, ret, pipe;
len = sizeof(*tmp);
tmp = kzalloc(len, GFP_KERNEL);
if (!tmp) {
- dev_warn(dev, "Memory allocation faillure\n");
+ dev_warn(rr3->dev, "Memory allocation faillure\n");
return timeout;
}
- pipe = usb_rcvctrlpipe(udev, 0);
- ret = usb_control_msg(udev, pipe, RR3_GET_IR_PARAM,
+ pipe = usb_rcvctrlpipe(rr3->udev, 0);
+ ret = usb_control_msg(rr3->udev, pipe, RR3_GET_IR_PARAM,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
RR3_IR_IO_SIG_TIMEOUT, 0, tmp, len, HZ * 5);
if (ret != len) {
- dev_warn(dev, "Failed to read timeout from hardware\n");
+ dev_warn(rr3->dev, "Failed to read timeout from hardware\n");
return timeout;
}
- timeout = US_TO_NS(redrat3_len_to_us(be32_to_cpu(*tmp)));
- if (timeout < rc->min_timeout)
- timeout = rc->min_timeout;
- else if (timeout > rc->max_timeout)
- timeout = rc->max_timeout;
+ timeout = redrat3_len_to_us(be32_to_cpu(*tmp));
- rr3_dbg(dev, "Got timeout of %d ms\n", timeout / (1000 * 1000));
+ rr3_dbg(rr3->dev, "Got timeout of %d ms\n", timeout / 1000);
return timeout;
}
@@ -1110,9 +1103,7 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
rc->priv = rr3;
rc->driver_type = RC_DRIVER_IR_RAW;
rc->allowed_protos = RC_TYPE_ALL;
- rc->min_timeout = MS_TO_NS(RR3_RX_MIN_TIMEOUT);
- rc->max_timeout = MS_TO_NS(RR3_RX_MAX_TIMEOUT);
- rc->timeout = redrat3_get_timeout(dev, rc, rr3->udev);
+ rc->timeout = US_TO_NS(2750);
rc->tx_ir = redrat3_transmit_ir;
rc->s_tx_carrier = redrat3_set_tx_carrier;
rc->driver_name = DRIVER_NAME;
@@ -1186,7 +1177,7 @@ static int __devinit redrat3_dev_probe(struct usb_interface *intf,
rr3 = kzalloc(sizeof(*rr3), GFP_KERNEL);
if (rr3 == NULL) {
dev_err(dev, "Memory allocation failure\n");
- goto error;
+ goto no_endpoints;
}
rr3->dev = &intf->dev;
@@ -1242,6 +1233,9 @@ static int __devinit redrat3_dev_probe(struct usb_interface *intf,
if (retval < 0)
goto error;
+ /* store current hardware timeout, in us, will use for kfifo resets */
+ rr3->hw_timeout = redrat3_get_timeout(rr3);
+
/* default.. will get overridden by any sends with a freq defined */
rr3->carrier = 38000;
@@ -1280,6 +1274,7 @@ static void __devexit redrat3_dev_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
rc_unregister_device(rr3->rc);
+ del_timer_sync(&rr3->rx_timeout);
redrat3_delete(rr3, udev);
rr3_ftr(&intf->dev, "RedRat3 IR Transceiver now disconnected\n");
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 5d06b89..bec8abc 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -6,8 +6,8 @@
* could probably support others (Winbond WEC102X, NatSemi, etc)
* with minor modifications.
*
- * Original Author: David Härdeman <david@hardeman.nu>
- * Copyright (C) 2009 - 2010 David Härdeman <david@hardeman.nu>
+ * Original Author: David Härdeman <david@hardeman.nu>
+ * Copyright (C) 2009 - 2011 David Härdeman <david@hardeman.nu>
*
* Dedicated to my daughter Matilda, without whose loving attention this
* driver would have been finished in half the time and with a fraction
@@ -577,16 +577,12 @@ wbcir_txmask(struct rc_dev *dev, u32 mask)
}
static int
-wbcir_tx(struct rc_dev *dev, int *buf, u32 bufsize)
+wbcir_tx(struct rc_dev *dev, unsigned *buf, unsigned count)
{
struct wbcir_data *data = dev->priv;
- u32 count;
unsigned i;
unsigned long flags;
- /* bufsize has been sanity checked by the caller */
- count = bufsize / sizeof(int);
-
/* Not sure if this is possible, but better safe than sorry */
spin_lock_irqsave(&data->spinlock, flags);
if (data->txstate != WBCIR_TXSTATE_INACTIVE) {
@@ -876,18 +872,8 @@ wbcir_init_hw(struct wbcir_data *data)
/* prescaler 1.0, tx/rx fifo lvl 16 */
outb(0x30, data->sbase + WBCIR_REG_SP3_EXCR2);
- /* Set baud divisor to generate one byte per bit/cell */
- switch (protocol) {
- case IR_PROTOCOL_RC5:
- outb(0xA7, data->sbase + WBCIR_REG_SP3_BGDL);
- break;
- case IR_PROTOCOL_RC6:
- outb(0x53, data->sbase + WBCIR_REG_SP3_BGDL);
- break;
- case IR_PROTOCOL_NEC:
- outb(0x69, data->sbase + WBCIR_REG_SP3_BGDL);
- break;
- }
+ /* Set baud divisor to sample every 10 us */
+ outb(0x0F, data->sbase + WBCIR_REG_SP3_BGDL);
outb(0x00, data->sbase + WBCIR_REG_SP3_BGDH);
/* Set CEIR mode */
@@ -896,9 +882,9 @@ wbcir_init_hw(struct wbcir_data *data)
inb(data->sbase + WBCIR_REG_SP3_LSR); /* Clear LSR */
inb(data->sbase + WBCIR_REG_SP3_MSR); /* Clear MSR */
- /* Disable RX demod, run-length encoding/decoding, set freq span */
+ /* Disable RX demod, enable run-length enc/dec, set freq span */
wbcir_select_bank(data, WBCIR_BANK_7);
- outb(0x10, data->sbase + WBCIR_REG_SP3_RCCFG);
+ outb(0x90, data->sbase + WBCIR_REG_SP3_RCCFG);
/* Disable timer */
wbcir_select_bank(data, WBCIR_BANK_4);
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index bb53de7..f574dc0 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -489,6 +489,15 @@ config VIDEO_TCM825X
This is a driver for the Toshiba TCM825x VGA camera sensor.
It is used for example in Nokia N800.
+comment "Flash devices"
+
+config VIDEO_ADP1653
+ tristate "ADP1653 flash support"
+ depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ ---help---
+ This is a driver for the ADP1653 flash controller. It is used for
+ example in Nokia N900.
+
comment "Video improvement chips"
config VIDEO_UPD64031A
@@ -707,6 +716,8 @@ source "drivers/media/video/cx18/Kconfig"
source "drivers/media/video/saa7164/Kconfig"
+source "drivers/media/video/marvell-ccic/Kconfig"
+
config VIDEO_M32R_AR
tristate "AR devices"
depends on M32R && VIDEO_V4L2
@@ -726,15 +737,6 @@ config VIDEO_M32R_AR_M64278
To compile this driver as a module, choose M here: the
module will be called arv.
-config VIDEO_CAFE_CCIC
- tristate "Marvell 88ALP01 (Cafe) CMOS Camera Controller support"
- depends on PCI && I2C && VIDEO_V4L2
- select VIDEO_OV7670
- ---help---
- This is a video4linux2 driver for the Marvell 88ALP01 integrated
- CMOS camera controller. This is the controller found on first-
- generation OLPC systems.
-
config VIDEO_SR030PC30
tristate "SR030PC30 VGA camera sensor support"
depends on I2C && VIDEO_V4L2
@@ -846,6 +848,12 @@ config SOC_CAMERA_OV2640
help
This is a ov2640 camera driver
+config SOC_CAMERA_OV5642
+ tristate "ov5642 camera support"
+ depends on SOC_CAMERA && I2C
+ help
+ This is a V4L2 camera driver for the OmniVision OV5642 sensor
+
config SOC_CAMERA_OV6650
tristate "ov6650 sensor support"
depends on SOC_CAMERA && I2C
@@ -952,6 +960,14 @@ config VIDEO_SAMSUNG_S5P_FIMC
To compile this driver as a module, choose M here: the
module will be called s5p-fimc.
+config VIDEO_ATMEL_ISI
+ tristate "ATMEL Image Sensor Interface (ISI) support"
+ depends on VIDEO_DEV && SOC_CAMERA && ARCH_AT91
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ This module makes the ATMEL Image Sensor Interface available
+ as a v4l2 device.
+
config VIDEO_S5P_MIPI_CSIS
tristate "Samsung S5P and EXYNOS4 MIPI CSI receiver driver"
depends on VIDEO_V4L2 && PM_RUNTIME && PLAT_S5P && VIDEO_V4L2_SUBDEV_API
@@ -961,6 +977,8 @@ config VIDEO_S5P_MIPI_CSIS
To compile this driver as a module, choose M here: the
module will be called s5p-csis.
+source "drivers/media/video/s5p-tv/Kconfig"
+
#
# USB Multimedia device configuration
#
@@ -1056,4 +1074,12 @@ config VIDEO_MEM2MEM_TESTDEV
framework.
+config VIDEO_SAMSUNG_S5P_MFC
+ tristate "Samsung S5P MFC 5.1 Video Codec"
+ depends on VIDEO_DEV && VIDEO_V4L2 && PLAT_S5P
+ select VIDEOBUF2_DMA_CONTIG
+ default n
+ help
+ MFC 5.1 driver for V4L2.
+
endif # V4L_MEM2MEM_DRIVERS
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index f0fecd6..2723900 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -70,6 +70,7 @@ obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o
obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
obj-$(CONFIG_VIDEO_M5MOLS) += m5mols/
+obj-$(CONFIG_VIDEO_ADP1653) += adp1653.o
obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o
obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
@@ -78,6 +79,7 @@ obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o
obj-$(CONFIG_SOC_CAMERA_MT9T112) += mt9t112.o
obj-$(CONFIG_SOC_CAMERA_MT9V022) += mt9v022.o
obj-$(CONFIG_SOC_CAMERA_OV2640) += ov2640.o
+obj-$(CONFIG_SOC_CAMERA_OV5642) += ov5642.o
obj-$(CONFIG_SOC_CAMERA_OV6650) += ov6650.o
obj-$(CONFIG_SOC_CAMERA_OV772X) += ov772x.o
obj-$(CONFIG_SOC_CAMERA_OV9640) += ov9640.o
@@ -127,7 +129,8 @@ obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o
obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
-obj-$(CONFIG_VIDEO_CAFE_CCIC) += cafe_ccic.o
+obj-$(CONFIG_VIDEO_CAFE_CCIC) += marvell-ccic/
+obj-$(CONFIG_VIDEO_MMP_CAMERA) += marvell-ccic/
obj-$(CONFIG_VIDEO_VIA_CAMERA) += via-camera.o
@@ -166,8 +169,11 @@ obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CSI2) += sh_mobile_csi2.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
obj-$(CONFIG_VIDEO_OMAP1) += omap1_camera.o
+obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_FIMC) += s5p-fimc/
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc/
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_TV) += s5p-tv/
obj-$(CONFIG_ARCH_DAVINCI) += davinci/
diff --git a/drivers/media/video/adp1653.c b/drivers/media/video/adp1653.c
new file mode 100644
index 0000000..be7befd
--- /dev/null
+++ b/drivers/media/video/adp1653.c
@@ -0,0 +1,491 @@
+/*
+ * drivers/media/video/adp1653.c
+ *
+ * Copyright (C) 2008--2011 Nokia Corporation
+ *
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * Contributors:
+ * Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Tuukka Toivonen <tuukkat76@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * TODO:
+ * - fault interrupt handling
+ * - hardware strobe
+ * - power doesn't need to be ON if all lights are off
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <media/adp1653.h>
+#include <media/v4l2-device.h>
+
+#define TIMEOUT_MAX 820000
+#define TIMEOUT_STEP 54600
+#define TIMEOUT_MIN (TIMEOUT_MAX - ADP1653_REG_CONFIG_TMR_SET_MAX \
+ * TIMEOUT_STEP)
+#define TIMEOUT_US_TO_CODE(t) ((TIMEOUT_MAX + (TIMEOUT_STEP / 2) - (t)) \
+ / TIMEOUT_STEP)
+#define TIMEOUT_CODE_TO_US(c) (TIMEOUT_MAX - (c) * TIMEOUT_STEP)
+
+/* Write values into ADP1653 registers. */
+static int adp1653_update_hw(struct adp1653_flash *flash)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
+ u8 out_sel;
+ u8 config = 0;
+ int rval;
+
+ out_sel = ADP1653_INDICATOR_INTENSITY_uA_TO_REG(
+ flash->indicator_intensity->val)
+ << ADP1653_REG_OUT_SEL_ILED_SHIFT;
+
+ switch (flash->led_mode->val) {
+ case V4L2_FLASH_LED_MODE_NONE:
+ break;
+ case V4L2_FLASH_LED_MODE_FLASH:
+ /* Flash mode, light on with strobe, duration from timer */
+ config = ADP1653_REG_CONFIG_TMR_CFG;
+ config |= TIMEOUT_US_TO_CODE(flash->flash_timeout->val)
+ << ADP1653_REG_CONFIG_TMR_SET_SHIFT;
+ break;
+ case V4L2_FLASH_LED_MODE_TORCH:
+ /* Torch mode, light immediately on, duration indefinite */
+ out_sel |= ADP1653_FLASH_INTENSITY_mA_TO_REG(
+ flash->torch_intensity->val)
+ << ADP1653_REG_OUT_SEL_HPLED_SHIFT;
+ break;
+ }
+
+ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, out_sel);
+ if (rval < 0)
+ return rval;
+
+ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_CONFIG, config);
+ if (rval < 0)
+ return rval;
+
+ return 0;
+}
+
+static int adp1653_get_fault(struct adp1653_flash *flash)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
+ int fault;
+ int rval;
+
+ fault = i2c_smbus_read_byte_data(client, ADP1653_REG_FAULT);
+ if (IS_ERR_VALUE(fault))
+ return fault;
+
+ flash->fault |= fault;
+
+ if (!flash->fault)
+ return 0;
+
+ /* Clear faults. */
+ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, 0);
+ if (IS_ERR_VALUE(rval))
+ return rval;
+
+ flash->led_mode->val = V4L2_FLASH_LED_MODE_NONE;
+
+ rval = adp1653_update_hw(flash);
+ if (IS_ERR_VALUE(rval))
+ return rval;
+
+ return flash->fault;
+}
+
+static int adp1653_strobe(struct adp1653_flash *flash, int enable)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
+ u8 out_sel = ADP1653_INDICATOR_INTENSITY_uA_TO_REG(
+ flash->indicator_intensity->val)
+ << ADP1653_REG_OUT_SEL_ILED_SHIFT;
+ int rval;
+
+ if (flash->led_mode->val != V4L2_FLASH_LED_MODE_FLASH)
+ return -EBUSY;
+
+ if (!enable)
+ return i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL,
+ out_sel);
+
+ out_sel |= ADP1653_FLASH_INTENSITY_mA_TO_REG(
+ flash->flash_intensity->val)
+ << ADP1653_REG_OUT_SEL_HPLED_SHIFT;
+ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, out_sel);
+ if (rval)
+ return rval;
+
+ /* Software strobe using i2c */
+ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_SW_STROBE,
+ ADP1653_REG_SW_STROBE_SW_STROBE);
+ if (rval)
+ return rval;
+ return i2c_smbus_write_byte_data(client, ADP1653_REG_SW_STROBE, 0);
+}
+
+/* --------------------------------------------------------------------------
+ * V4L2 controls
+ */
+
+static int adp1653_get_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct adp1653_flash *flash =
+ container_of(ctrl->handler, struct adp1653_flash, ctrls);
+ int rval;
+
+ rval = adp1653_get_fault(flash);
+ if (IS_ERR_VALUE(rval))
+ return rval;
+
+ ctrl->cur.val = 0;
+
+ if (flash->fault & ADP1653_REG_FAULT_FLT_SCP)
+ ctrl->cur.val |= V4L2_FLASH_FAULT_SHORT_CIRCUIT;
+ if (flash->fault & ADP1653_REG_FAULT_FLT_OT)
+ ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_TEMPERATURE;
+ if (flash->fault & ADP1653_REG_FAULT_FLT_TMR)
+ ctrl->cur.val |= V4L2_FLASH_FAULT_TIMEOUT;
+ if (flash->fault & ADP1653_REG_FAULT_FLT_OV)
+ ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_VOLTAGE;
+
+ flash->fault = 0;
+
+ return 0;
+}
+
+static int adp1653_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct adp1653_flash *flash =
+ container_of(ctrl->handler, struct adp1653_flash, ctrls);
+ int rval;
+
+ rval = adp1653_get_fault(flash);
+ if (IS_ERR_VALUE(rval))
+ return rval;
+ if ((rval & (ADP1653_REG_FAULT_FLT_SCP |
+ ADP1653_REG_FAULT_FLT_OT |
+ ADP1653_REG_FAULT_FLT_OV)) &&
+ (ctrl->id == V4L2_CID_FLASH_STROBE ||
+ ctrl->id == V4L2_CID_FLASH_TORCH_INTENSITY ||
+ ctrl->id == V4L2_CID_FLASH_LED_MODE))
+ return -EBUSY;
+
+ switch (ctrl->id) {
+ case V4L2_CID_FLASH_STROBE:
+ return adp1653_strobe(flash, 1);
+ case V4L2_CID_FLASH_STROBE_STOP:
+ return adp1653_strobe(flash, 0);
+ }
+
+ return adp1653_update_hw(flash);
+}
+
+static const struct v4l2_ctrl_ops adp1653_ctrl_ops = {
+ .g_volatile_ctrl = adp1653_get_ctrl,
+ .s_ctrl = adp1653_set_ctrl,
+};
+
+static int adp1653_init_controls(struct adp1653_flash *flash)
+{
+ struct v4l2_ctrl *fault;
+
+ v4l2_ctrl_handler_init(&flash->ctrls, 9);
+
+ flash->led_mode =
+ v4l2_ctrl_new_std_menu(&flash->ctrls, &adp1653_ctrl_ops,
+ V4L2_CID_FLASH_LED_MODE,
+ V4L2_FLASH_LED_MODE_TORCH, ~0x7, 0);
+ v4l2_ctrl_new_std_menu(&flash->ctrls, &adp1653_ctrl_ops,
+ V4L2_CID_FLASH_STROBE_SOURCE,
+ V4L2_FLASH_STROBE_SOURCE_SOFTWARE, ~0x1, 0);
+ v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
+ V4L2_CID_FLASH_STROBE, 0, 0, 0, 0);
+ v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
+ V4L2_CID_FLASH_STROBE_STOP, 0, 0, 0, 0);
+ flash->flash_timeout =
+ v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
+ V4L2_CID_FLASH_TIMEOUT, TIMEOUT_MIN,
+ flash->platform_data->max_flash_timeout,
+ TIMEOUT_STEP,
+ flash->platform_data->max_flash_timeout);
+ flash->flash_intensity =
+ v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
+ V4L2_CID_FLASH_INTENSITY,
+ ADP1653_FLASH_INTENSITY_MIN,
+ flash->platform_data->max_flash_intensity,
+ 1, flash->platform_data->max_flash_intensity);
+ flash->torch_intensity =
+ v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
+ V4L2_CID_FLASH_TORCH_INTENSITY,
+ ADP1653_TORCH_INTENSITY_MIN,
+ flash->platform_data->max_torch_intensity,
+ ADP1653_FLASH_INTENSITY_STEP,
+ flash->platform_data->max_torch_intensity);
+ flash->indicator_intensity =
+ v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
+ V4L2_CID_FLASH_INDICATOR_INTENSITY,
+ ADP1653_INDICATOR_INTENSITY_MIN,
+ flash->platform_data->max_indicator_intensity,
+ ADP1653_INDICATOR_INTENSITY_STEP,
+ ADP1653_INDICATOR_INTENSITY_MIN);
+ fault = v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
+ V4L2_CID_FLASH_FAULT, 0,
+ V4L2_FLASH_FAULT_OVER_VOLTAGE
+ | V4L2_FLASH_FAULT_OVER_TEMPERATURE
+ | V4L2_FLASH_FAULT_SHORT_CIRCUIT, 0, 0);
+
+ if (flash->ctrls.error)
+ return flash->ctrls.error;
+
+ fault->is_volatile = 1;
+
+ flash->subdev.ctrl_handler = &flash->ctrls;
+ return 0;
+}
+
+/* --------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+static int
+adp1653_init_device(struct adp1653_flash *flash)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
+ int rval;
+
+ /* Clear FAULT register by writing zero to OUT_SEL */
+ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, 0);
+ if (rval < 0) {
+ dev_err(&client->dev, "failed writing fault register\n");
+ return -EIO;
+ }
+
+ mutex_lock(&flash->ctrls.lock);
+ /* Reset faults before reading new ones. */
+ flash->fault = 0;
+ rval = adp1653_get_fault(flash);
+ mutex_unlock(&flash->ctrls.lock);
+ if (rval > 0) {
+ dev_err(&client->dev, "faults detected: 0x%1.1x\n", rval);
+ return -EIO;
+ }
+
+ mutex_lock(&flash->ctrls.lock);
+ rval = adp1653_update_hw(flash);
+ mutex_unlock(&flash->ctrls.lock);
+ if (rval) {
+ dev_err(&client->dev,
+ "adp1653_update_hw failed at %s\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+__adp1653_set_power(struct adp1653_flash *flash, int on)
+{
+ int ret;
+
+ ret = flash->platform_data->power(&flash->subdev, on);
+ if (ret < 0)
+ return ret;
+
+ if (!on)
+ return 0;
+
+ ret = adp1653_init_device(flash);
+ if (ret < 0)
+ flash->platform_data->power(&flash->subdev, 0);
+
+ return ret;
+}
+
+static int
+adp1653_set_power(struct v4l2_subdev *subdev, int on)
+{
+ struct adp1653_flash *flash = to_adp1653_flash(subdev);
+ int ret = 0;
+
+ mutex_lock(&flash->power_lock);
+
+ /* If the power count is modified from 0 to != 0 or from != 0 to 0,
+ * update the power state.
+ */
+ if (flash->power_count == !on) {
+ ret = __adp1653_set_power(flash, !!on);
+ if (ret < 0)
+ goto done;
+ }
+
+ /* Update the power count. */
+ flash->power_count += on ? 1 : -1;
+ WARN_ON(flash->power_count < 0);
+
+done:
+ mutex_unlock(&flash->power_lock);
+ return ret;
+}
+
+static int adp1653_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ return adp1653_set_power(sd, 1);
+}
+
+static int adp1653_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ return adp1653_set_power(sd, 0);
+}
+
+static const struct v4l2_subdev_core_ops adp1653_core_ops = {
+ .s_power = adp1653_set_power,
+};
+
+static const struct v4l2_subdev_ops adp1653_ops = {
+ .core = &adp1653_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops adp1653_internal_ops = {
+ .open = adp1653_open,
+ .close = adp1653_close,
+};
+
+/* --------------------------------------------------------------------------
+ * I2C driver
+ */
+#ifdef CONFIG_PM
+
+static int adp1653_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct adp1653_flash *flash = to_adp1653_flash(subdev);
+
+ if (!flash->power_count)
+ return 0;
+
+ return __adp1653_set_power(flash, 0);
+}
+
+static int adp1653_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct adp1653_flash *flash = to_adp1653_flash(subdev);
+
+ if (!flash->power_count)
+ return 0;
+
+ return __adp1653_set_power(flash, 1);
+}
+
+#else
+
+#define adp1653_suspend NULL
+#define adp1653_resume NULL
+
+#endif /* CONFIG_PM */
+
+static int adp1653_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct adp1653_flash *flash;
+ int ret;
+
+ flash = kzalloc(sizeof(*flash), GFP_KERNEL);
+ if (flash == NULL)
+ return -ENOMEM;
+
+ flash->platform_data = client->dev.platform_data;
+
+ mutex_init(&flash->power_lock);
+
+ v4l2_i2c_subdev_init(&flash->subdev, client, &adp1653_ops);
+ flash->subdev.internal_ops = &adp1653_internal_ops;
+ flash->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ adp1653_init_controls(flash);
+
+ ret = media_entity_init(&flash->subdev.entity, 0, NULL, 0);
+ if (ret < 0)
+ kfree(flash);
+
+ return ret;
+}
+
+static int __exit adp1653_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct adp1653_flash *flash = to_adp1653_flash(subdev);
+
+ v4l2_device_unregister_subdev(&flash->subdev);
+ v4l2_ctrl_handler_free(&flash->ctrls);
+ media_entity_cleanup(&flash->subdev.entity);
+ kfree(flash);
+ return 0;
+}
+
+static const struct i2c_device_id adp1653_id_table[] = {
+ { ADP1653_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adp1653_id_table);
+
+static struct dev_pm_ops adp1653_pm_ops = {
+ .suspend = adp1653_suspend,
+ .resume = adp1653_resume,
+};
+
+static struct i2c_driver adp1653_i2c_driver = {
+ .driver = {
+ .name = ADP1653_NAME,
+ .pm = &adp1653_pm_ops,
+ },
+ .probe = adp1653_probe,
+ .remove = __exit_p(adp1653_remove),
+ .id_table = adp1653_id_table,
+};
+
+static int __init adp1653_init(void)
+{
+ int rval;
+
+ rval = i2c_add_driver(&adp1653_i2c_driver);
+ if (rval)
+ printk(KERN_ALERT "%s: failed at i2c_add_driver\n", __func__);
+
+ return rval;
+}
+
+static void __exit adp1653_exit(void)
+{
+ i2c_del_driver(&adp1653_i2c_driver);
+}
+
+module_init(adp1653_init);
+module_exit(adp1653_exit);
+
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>");
+MODULE_DESCRIPTION("Analog Devices ADP1653 LED flash driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c
index f989f28..b6ed44a 100644
--- a/drivers/media/video/arv.c
+++ b/drivers/media/video/arv.c
@@ -27,7 +27,6 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/sched.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
@@ -54,7 +53,7 @@
*/
#define USE_INT 0 /* Don't modify */
-#define VERSION "0.04"
+#define VERSION "0.0.5"
#define ar_inl(addr) inl((unsigned long)(addr))
#define ar_outl(val, addr) outl((unsigned long)(val), (unsigned long)(addr))
@@ -404,7 +403,6 @@ static int ar_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, ar->vdev.name, sizeof(vcap->driver));
strlcpy(vcap->card, "Colour AR VGA", sizeof(vcap->card));
strlcpy(vcap->bus_info, "Platform", sizeof(vcap->bus_info));
- vcap->version = KERNEL_VERSION(0, 0, 4);
vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
return 0;
}
@@ -879,3 +877,4 @@ module_exit(ar_cleanup_module);
MODULE_AUTHOR("Takeo Takahashi <takahashi.takeo@renesas.com>");
MODULE_DESCRIPTION("Colour AR M64278(VGA) for Video4Linux");
MODULE_LICENSE("GPL");
+MODULE_VERSION(VERSION);
diff --git a/drivers/media/video/atmel-isi.c b/drivers/media/video/atmel-isi.c
new file mode 100644
index 0000000..7b89f00
--- /dev/null
+++ b/drivers/media/video/atmel-isi.c
@@ -0,0 +1,1048 @@
+/*
+ * Copyright (c) 2011 Atmel Corporation
+ * Josh Wu, <josh.wu@atmel.com>
+ *
+ * Based on previous work by Lars Haring, <lars.haring@atmel.com>
+ * and Sedji Gaouaou
+ * Based on the bttv driver for Bt848 with respective copyright holders
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/atmel-isi.h>
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define MAX_BUFFER_NUM 32
+#define MAX_SUPPORT_WIDTH 2048
+#define MAX_SUPPORT_HEIGHT 2048
+#define VID_LIMIT_BYTES (16 * 1024 * 1024)
+#define MIN_FRAME_RATE 15
+#define FRAME_INTERVAL_MILLI_SEC (1000 / MIN_FRAME_RATE)
+
+/* ISI states */
+enum {
+ ISI_STATE_IDLE = 0,
+ ISI_STATE_READY,
+ ISI_STATE_WAIT_SOF,
+};
+
+/* Frame buffer descriptor */
+struct fbd {
+ /* Physical address of the frame buffer */
+ u32 fb_address;
+ /* DMA Control Register(only in HISI2) */
+ u32 dma_ctrl;
+ /* Physical address of the next fbd */
+ u32 next_fbd_address;
+};
+
+static void set_dma_ctrl(struct fbd *fb_desc, u32 ctrl)
+{
+ fb_desc->dma_ctrl = ctrl;
+}
+
+struct isi_dma_desc {
+ struct list_head list;
+ struct fbd *p_fbd;
+ u32 fbd_phys;
+};
+
+/* Frame buffer data */
+struct frame_buffer {
+ struct vb2_buffer vb;
+ struct isi_dma_desc *p_dma_desc;
+ struct list_head list;
+};
+
+struct atmel_isi {
+ /* Protects the access of variables shared with the ISR */
+ spinlock_t lock;
+ void __iomem *regs;
+
+ int sequence;
+ /* State of the ISI module in capturing mode */
+ int state;
+
+ /* Wait queue for waiting for SOF */
+ wait_queue_head_t vsync_wq;
+
+ struct vb2_alloc_ctx *alloc_ctx;
+
+ /* Allocate descriptors for dma buffer use */
+ struct fbd *p_fb_descriptors;
+ u32 fb_descriptors_phys;
+ struct list_head dma_desc_head;
+ struct isi_dma_desc dma_desc[MAX_BUFFER_NUM];
+
+ struct completion complete;
+ struct clk *pclk;
+ unsigned int irq;
+
+ struct isi_platform_data *pdata;
+
+ struct list_head video_buffer_list;
+ struct frame_buffer *active;
+
+ struct soc_camera_device *icd;
+ struct soc_camera_host soc_host;
+};
+
+static void isi_writel(struct atmel_isi *isi, u32 reg, u32 val)
+{
+ writel(val, isi->regs + reg);
+}
+static u32 isi_readl(struct atmel_isi *isi, u32 reg)
+{
+ return readl(isi->regs + reg);
+}
+
+static int configure_geometry(struct atmel_isi *isi, u32 width,
+ u32 height, enum v4l2_mbus_pixelcode code)
+{
+ u32 cfg2, cr;
+
+ switch (code) {
+ /* YUV, including grey */
+ case V4L2_MBUS_FMT_Y8_1X8:
+ cr = ISI_CFG2_GRAYSCALE;
+ break;
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ cr = ISI_CFG2_YCC_SWAP_MODE_3;
+ break;
+ case V4L2_MBUS_FMT_VYUY8_2X8:
+ cr = ISI_CFG2_YCC_SWAP_MODE_2;
+ break;
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ cr = ISI_CFG2_YCC_SWAP_MODE_1;
+ break;
+ case V4L2_MBUS_FMT_YVYU8_2X8:
+ cr = ISI_CFG2_YCC_SWAP_DEFAULT;
+ break;
+ /* RGB, TODO */
+ default:
+ return -EINVAL;
+ }
+
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+
+ cfg2 = isi_readl(isi, ISI_CFG2);
+ cfg2 |= cr;
+ /* Set width */
+ cfg2 &= ~(ISI_CFG2_IM_HSIZE_MASK);
+ cfg2 |= ((width - 1) << ISI_CFG2_IM_HSIZE_OFFSET) &
+ ISI_CFG2_IM_HSIZE_MASK;
+ /* Set height */
+ cfg2 &= ~(ISI_CFG2_IM_VSIZE_MASK);
+ cfg2 |= ((height - 1) << ISI_CFG2_IM_VSIZE_OFFSET)
+ & ISI_CFG2_IM_VSIZE_MASK;
+ isi_writel(isi, ISI_CFG2, cfg2);
+
+ return 0;
+}
+
+static irqreturn_t atmel_isi_handle_streaming(struct atmel_isi *isi)
+{
+ if (isi->active) {
+ struct vb2_buffer *vb = &isi->active->vb;
+ struct frame_buffer *buf = isi->active;
+
+ list_del_init(&buf->list);
+ do_gettimeofday(&vb->v4l2_buf.timestamp);
+ vb->v4l2_buf.sequence = isi->sequence++;
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ }
+
+ if (list_empty(&isi->video_buffer_list)) {
+ isi->active = NULL;
+ } else {
+ /* start next dma frame. */
+ isi->active = list_entry(isi->video_buffer_list.next,
+ struct frame_buffer, list);
+ isi_writel(isi, ISI_DMA_C_DSCR,
+ isi->active->p_dma_desc->fbd_phys);
+ isi_writel(isi, ISI_DMA_C_CTRL,
+ ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
+ isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
+ }
+ return IRQ_HANDLED;
+}
+
+/* ISI interrupt service routine */
+static irqreturn_t isi_interrupt(int irq, void *dev_id)
+{
+ struct atmel_isi *isi = dev_id;
+ u32 status, mask, pending;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock(&isi->lock);
+
+ status = isi_readl(isi, ISI_STATUS);
+ mask = isi_readl(isi, ISI_INTMASK);
+ pending = status & mask;
+
+ if (pending & ISI_CTRL_SRST) {
+ complete(&isi->complete);
+ isi_writel(isi, ISI_INTDIS, ISI_CTRL_SRST);
+ ret = IRQ_HANDLED;
+ } else if (pending & ISI_CTRL_DIS) {
+ complete(&isi->complete);
+ isi_writel(isi, ISI_INTDIS, ISI_CTRL_DIS);
+ ret = IRQ_HANDLED;
+ } else {
+ if ((pending & ISI_SR_VSYNC) &&
+ (isi->state == ISI_STATE_IDLE)) {
+ isi->state = ISI_STATE_READY;
+ wake_up_interruptible(&isi->vsync_wq);
+ ret = IRQ_HANDLED;
+ }
+ if (likely(pending & ISI_SR_CXFR_DONE))
+ ret = atmel_isi_handle_streaming(isi);
+ }
+
+ spin_unlock(&isi->lock);
+ return ret;
+}
+
+#define WAIT_ISI_RESET 1
+#define WAIT_ISI_DISABLE 0
+static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset)
+{
+ unsigned long timeout;
+ /*
+ * The reset or disable will only succeed if we have a
+ * pixel clock from the camera.
+ */
+ init_completion(&isi->complete);
+
+ if (wait_reset) {
+ isi_writel(isi, ISI_INTEN, ISI_CTRL_SRST);
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_SRST);
+ } else {
+ isi_writel(isi, ISI_INTEN, ISI_CTRL_DIS);
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+ }
+
+ timeout = wait_for_completion_timeout(&isi->complete,
+ msecs_to_jiffies(100));
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------
+ Videobuf operations
+ ------------------------------------------------------------------*/
+static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned long sizes[],
+ void *alloc_ctxs[])
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ unsigned long size;
+ int ret, bytes_per_line;
+
+ /* Reset ISI */
+ ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET);
+ if (ret < 0) {
+ dev_err(icd->parent, "Reset ISI timed out\n");
+ return ret;
+ }
+ /* Disable all interrupts */
+ isi_writel(isi, ISI_INTDIS, ~0UL);
+
+ bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+
+ if (bytes_per_line < 0)
+ return bytes_per_line;
+
+ size = bytes_per_line * icd->user_height;
+
+ if (!*nbuffers || *nbuffers > MAX_BUFFER_NUM)
+ *nbuffers = MAX_BUFFER_NUM;
+
+ if (size * *nbuffers > VID_LIMIT_BYTES)
+ *nbuffers = VID_LIMIT_BYTES / size;
+
+ *nplanes = 1;
+ sizes[0] = size;
+ alloc_ctxs[0] = isi->alloc_ctx;
+
+ isi->sequence = 0;
+ isi->active = NULL;
+
+ dev_dbg(icd->parent, "%s, count=%d, size=%ld\n", __func__,
+ *nbuffers, size);
+
+ return 0;
+}
+
+static int buffer_init(struct vb2_buffer *vb)
+{
+ struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+
+ buf->p_dma_desc = NULL;
+ INIT_LIST_HEAD(&buf->list);
+
+ return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ unsigned long size;
+ struct isi_dma_desc *desc;
+ int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+
+ if (bytes_per_line < 0)
+ return bytes_per_line;
+
+ size = bytes_per_line * icd->user_height;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(icd->parent, "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(&buf->vb, 0, size);
+
+ if (!buf->p_dma_desc) {
+ if (list_empty(&isi->dma_desc_head)) {
+ dev_err(icd->parent, "Not enough dma descriptors.\n");
+ return -EINVAL;
+ } else {
+ /* Get an available descriptor */
+ desc = list_entry(isi->dma_desc_head.next,
+ struct isi_dma_desc, list);
+ /* Delete the descriptor since now it is used */
+ list_del_init(&desc->list);
+
+ /* Initialize the dma descriptor */
+ desc->p_fbd->fb_address =
+ vb2_dma_contig_plane_paddr(vb, 0);
+ desc->p_fbd->next_fbd_address = 0;
+ set_dma_ctrl(desc->p_fbd, ISI_DMA_CTRL_WB);
+
+ buf->p_dma_desc = desc;
+ }
+ }
+ return 0;
+}
+
+static void buffer_cleanup(struct vb2_buffer *vb)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+
+ /* This descriptor is available now and we add to head list */
+ if (buf->p_dma_desc)
+ list_add(&buf->p_dma_desc->list, &isi->dma_desc_head);
+}
+
+static void start_dma(struct atmel_isi *isi, struct frame_buffer *buffer)
+{
+ u32 ctrl, cfg1;
+
+ cfg1 = isi_readl(isi, ISI_CFG1);
+ /* Enable irq: cxfr for the codec path, pxfr for the preview path */
+ isi_writel(isi, ISI_INTEN,
+ ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE);
+
+ /* Check if already in a frame */
+ if (isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) {
+ dev_err(isi->icd->parent, "Already in frame handling.\n");
+ return;
+ }
+
+ isi_writel(isi, ISI_DMA_C_DSCR, buffer->p_dma_desc->fbd_phys);
+ isi_writel(isi, ISI_DMA_C_CTRL, ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
+ isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
+
+ /* Enable linked list */
+ cfg1 |= isi->pdata->frate | ISI_CFG1_DISCR;
+
+ /* Enable codec path and ISI */
+ ctrl = ISI_CTRL_CDC | ISI_CTRL_EN;
+ isi_writel(isi, ISI_CTRL, ctrl);
+ isi_writel(isi, ISI_CFG1, cfg1);
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&isi->lock, flags);
+ list_add_tail(&buf->list, &isi->video_buffer_list);
+
+ if (isi->active == NULL) {
+ isi->active = buf;
+ start_dma(isi, buf);
+ }
+ spin_unlock_irqrestore(&isi->lock, flags);
+}
+
+static int start_streaming(struct vb2_queue *vq)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+
+ u32 sr = 0;
+ int ret;
+
+ spin_lock_irq(&isi->lock);
+ isi->state = ISI_STATE_IDLE;
+ /* Clear any pending SOF interrupt */
+ sr = isi_readl(isi, ISI_STATUS);
+ /* Enable VSYNC interrupt for SOF */
+ isi_writel(isi, ISI_INTEN, ISI_SR_VSYNC);
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_EN);
+ spin_unlock_irq(&isi->lock);
+
+ dev_dbg(icd->parent, "Waiting for SOF\n");
+ ret = wait_event_interruptible(isi->vsync_wq,
+ isi->state != ISI_STATE_IDLE);
+ if (ret)
+ return ret;
+
+ if (isi->state != ISI_STATE_READY)
+ return -EIO;
+
+ spin_lock_irq(&isi->lock);
+ isi->state = ISI_STATE_WAIT_SOF;
+ isi_writel(isi, ISI_INTDIS, ISI_SR_VSYNC);
+ spin_unlock_irq(&isi->lock);
+
+ return 0;
+}
+
+/* abort streaming and wait for last buffer */
+static int stop_streaming(struct vb2_queue *vq)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ struct frame_buffer *buf, *node;
+ int ret = 0;
+ unsigned long timeout;
+
+ spin_lock_irq(&isi->lock);
+ isi->active = NULL;
+ /* Release all active buffers */
+ list_for_each_entry_safe(buf, node, &isi->video_buffer_list, list) {
+ list_del_init(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irq(&isi->lock);
+
+ timeout = jiffies + FRAME_INTERVAL_MILLI_SEC * HZ;
+ /* Wait until the end of the current frame. */
+ while ((isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) &&
+ time_before(jiffies, timeout))
+ msleep(1);
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(icd->parent,
+ "Timeout waiting for finishing codec request\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Disable interrupts */
+ isi_writel(isi, ISI_INTDIS,
+ ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE);
+
+ /* Disable ISI and wait for it is done */
+ ret = atmel_isi_wait_status(isi, WAIT_ISI_DISABLE);
+ if (ret < 0)
+ dev_err(icd->parent, "Disable ISI timed out\n");
+
+ return ret;
+}
+
+static struct vb2_ops isi_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_init = buffer_init,
+ .buf_prepare = buffer_prepare,
+ .buf_cleanup = buffer_cleanup,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = soc_camera_unlock,
+ .wait_finish = soc_camera_lock,
+};
+
+/* ------------------------------------------------------------------
+ SOC camera operations for the device
+ ------------------------------------------------------------------*/
+static int isi_camera_init_videobuf(struct vb2_queue *q,
+ struct soc_camera_device *icd)
+{
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP;
+ q->drv_priv = icd;
+ q->buf_struct_size = sizeof(struct frame_buffer);
+ q->ops = &isi_video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+
+ return vb2_queue_init(q);
+}
+
+static int isi_camera_set_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ int ret;
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+ if (!xlate) {
+ dev_warn(icd->parent, "Format %x not found\n",
+ pix->pixelformat);
+ return -EINVAL;
+ }
+
+ dev_dbg(icd->parent, "Plan to set format %dx%d\n",
+ pix->width, pix->height);
+
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ if (mf.code != xlate->code)
+ return -EINVAL;
+
+ ret = configure_geometry(isi, pix->width, pix->height, xlate->code);
+ if (ret < 0)
+ return ret;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+ icd->current_fmt = xlate;
+
+ dev_dbg(icd->parent, "Finally set format %dx%d\n",
+ pix->width, pix->height);
+
+ return ret;
+}
+
+static int isi_camera_try_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ u32 pixfmt = pix->pixelformat;
+ int ret;
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+ if (pixfmt && !xlate) {
+ dev_warn(icd->parent, "Format %x not found\n", pixfmt);
+ return -EINVAL;
+ }
+
+ /* limit to Atmel ISI hardware capabilities */
+ if (pix->height > MAX_SUPPORT_HEIGHT)
+ pix->height = MAX_SUPPORT_HEIGHT;
+ if (pix->width > MAX_SUPPORT_WIDTH)
+ pix->width = MAX_SUPPORT_WIDTH;
+
+ /* limit to sensor capabilities */
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->colorspace = mf.colorspace;
+
+ switch (mf.field) {
+ case V4L2_FIELD_ANY:
+ pix->field = V4L2_FIELD_NONE;
+ break;
+ case V4L2_FIELD_NONE:
+ break;
+ default:
+ dev_err(icd->parent, "Field type %d unsupported.\n",
+ mf.field);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct soc_mbus_pixelfmt isi_camera_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .name = "Packed YUV422 16 bit",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ },
+};
+
+/* This will be corrected as we get more formats */
+static bool isi_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt)
+{
+ return fmt->packing == SOC_MBUS_PACKING_NONE ||
+ (fmt->bits_per_sample == 8 &&
+ fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
+ (fmt->bits_per_sample > 8 &&
+ fmt->packing == SOC_MBUS_PACKING_EXTEND16);
+}
+
+static unsigned long make_bus_param(struct atmel_isi *isi)
+{
+ unsigned long flags;
+ /*
+ * Platform specified synchronization and pixel clock polarities are
+ * only a recommendation and are only used during probing. Atmel ISI
+ * camera interface only works in master mode, i.e., uses HSYNC and
+ * VSYNC signals from the sensor
+ */
+ flags = SOCAM_MASTER |
+ SOCAM_HSYNC_ACTIVE_HIGH |
+ SOCAM_HSYNC_ACTIVE_LOW |
+ SOCAM_VSYNC_ACTIVE_HIGH |
+ SOCAM_VSYNC_ACTIVE_LOW |
+ SOCAM_PCLK_SAMPLE_RISING |
+ SOCAM_PCLK_SAMPLE_FALLING |
+ SOCAM_DATA_ACTIVE_HIGH;
+
+ if (isi->pdata->data_width_flags & ISI_DATAWIDTH_10)
+ flags |= SOCAM_DATAWIDTH_10;
+
+ if (isi->pdata->data_width_flags & ISI_DATAWIDTH_8)
+ flags |= SOCAM_DATAWIDTH_8;
+
+ if (flags & SOCAM_DATAWIDTH_MASK)
+ return flags;
+
+ return 0;
+}
+
+static int isi_camera_try_bus_param(struct soc_camera_device *icd,
+ unsigned char buswidth)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ unsigned long camera_flags;
+ int ret;
+
+ camera_flags = icd->ops->query_bus_param(icd);
+ ret = soc_camera_bus_param_compatible(camera_flags,
+ make_bus_param(isi));
+ if (!ret)
+ return -EINVAL;
+ return 0;
+}
+
+
+static int isi_camera_get_formats(struct soc_camera_device *icd,
+ unsigned int idx,
+ struct soc_camera_format_xlate *xlate)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ int formats = 0, ret;
+ /* sensor format */
+ enum v4l2_mbus_pixelcode code;
+ /* soc camera host format */
+ const struct soc_mbus_pixelfmt *fmt;
+
+ ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+ if (ret < 0)
+ /* No more formats */
+ return 0;
+
+ fmt = soc_mbus_get_fmtdesc(code);
+ if (!fmt) {
+ dev_err(icd->parent,
+ "Invalid format code #%u: %d\n", idx, code);
+ return 0;
+ }
+
+ /* This also checks support for the requested bits-per-sample */
+ ret = isi_camera_try_bus_param(icd, fmt->bits_per_sample);
+ if (ret < 0) {
+ dev_err(icd->parent,
+ "Fail to try the bus parameters.\n");
+ return 0;
+ }
+
+ switch (code) {
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ case V4L2_MBUS_FMT_VYUY8_2X8:
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_YVYU8_2X8:
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = &isi_camera_formats[0];
+ xlate->code = code;
+ xlate++;
+ dev_dbg(icd->parent, "Providing format %s using code %d\n",
+ isi_camera_formats[0].name, code);
+ }
+ break;
+ default:
+ if (!isi_camera_packing_supported(fmt))
+ return 0;
+ if (xlate)
+ dev_dbg(icd->parent,
+ "Providing format %s in pass-through mode\n",
+ fmt->name);
+ }
+
+ /* Generic pass-through */
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = fmt;
+ xlate->code = code;
+ xlate++;
+ }
+
+ return formats;
+}
+
+/* Called with .video_lock held */
+static int isi_camera_add_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ int ret;
+
+ if (isi->icd)
+ return -EBUSY;
+
+ ret = clk_enable(isi->pclk);
+ if (ret)
+ return ret;
+
+ isi->icd = icd;
+ dev_dbg(icd->parent, "Atmel ISI Camera driver attached to camera %d\n",
+ icd->devnum);
+ return 0;
+}
+/* Called with .video_lock held */
+static void isi_camera_remove_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+
+ BUG_ON(icd != isi->icd);
+
+ clk_disable(isi->pclk);
+ isi->icd = NULL;
+
+ dev_dbg(icd->parent, "Atmel ISI Camera driver detached from camera %d\n",
+ icd->devnum);
+}
+
+static unsigned int isi_camera_poll(struct file *file, poll_table *pt)
+{
+ struct soc_camera_device *icd = file->private_data;
+
+ return vb2_poll(&icd->vb2_vidq, file, pt);
+}
+
+static int isi_camera_querycap(struct soc_camera_host *ici,
+ struct v4l2_capability *cap)
+{
+ strcpy(cap->driver, "atmel-isi");
+ strcpy(cap->card, "Atmel Image Sensor Interface");
+ cap->capabilities = (V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_STREAMING);
+ return 0;
+}
+
+static int isi_camera_set_bus_param(struct soc_camera_device *icd, u32 pixfmt)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ unsigned long bus_flags, camera_flags, common_flags;
+ int ret;
+ u32 cfg1 = 0;
+
+ camera_flags = icd->ops->query_bus_param(icd);
+
+ bus_flags = make_bus_param(isi);
+ common_flags = soc_camera_bus_param_compatible(camera_flags, bus_flags);
+ dev_dbg(icd->parent, "Flags cam: 0x%lx host: 0x%lx common: 0x%lx\n",
+ camera_flags, bus_flags, common_flags);
+ if (!common_flags)
+ return -EINVAL;
+
+ /* Make choises, based on platform preferences */
+ if ((common_flags & SOCAM_HSYNC_ACTIVE_HIGH) &&
+ (common_flags & SOCAM_HSYNC_ACTIVE_LOW)) {
+ if (isi->pdata->hsync_act_low)
+ common_flags &= ~SOCAM_HSYNC_ACTIVE_HIGH;
+ else
+ common_flags &= ~SOCAM_HSYNC_ACTIVE_LOW;
+ }
+
+ if ((common_flags & SOCAM_VSYNC_ACTIVE_HIGH) &&
+ (common_flags & SOCAM_VSYNC_ACTIVE_LOW)) {
+ if (isi->pdata->vsync_act_low)
+ common_flags &= ~SOCAM_VSYNC_ACTIVE_HIGH;
+ else
+ common_flags &= ~SOCAM_VSYNC_ACTIVE_LOW;
+ }
+
+ if ((common_flags & SOCAM_PCLK_SAMPLE_RISING) &&
+ (common_flags & SOCAM_PCLK_SAMPLE_FALLING)) {
+ if (isi->pdata->pclk_act_falling)
+ common_flags &= ~SOCAM_PCLK_SAMPLE_RISING;
+ else
+ common_flags &= ~SOCAM_PCLK_SAMPLE_FALLING;
+ }
+
+ ret = icd->ops->set_bus_param(icd, common_flags);
+ if (ret < 0) {
+ dev_dbg(icd->parent, "Camera set_bus_param(%lx) returned %d\n",
+ common_flags, ret);
+ return ret;
+ }
+
+ /* set bus param for ISI */
+ if (common_flags & SOCAM_HSYNC_ACTIVE_LOW)
+ cfg1 |= ISI_CFG1_HSYNC_POL_ACTIVE_LOW;
+ if (common_flags & SOCAM_VSYNC_ACTIVE_LOW)
+ cfg1 |= ISI_CFG1_VSYNC_POL_ACTIVE_LOW;
+ if (common_flags & SOCAM_PCLK_SAMPLE_FALLING)
+ cfg1 |= ISI_CFG1_PIXCLK_POL_ACTIVE_FALLING;
+
+ if (isi->pdata->has_emb_sync)
+ cfg1 |= ISI_CFG1_EMB_SYNC;
+ if (isi->pdata->isi_full_mode)
+ cfg1 |= ISI_CFG1_FULL_MODE;
+
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+ isi_writel(isi, ISI_CFG1, cfg1);
+
+ return 0;
+}
+
+static struct soc_camera_host_ops isi_soc_camera_host_ops = {
+ .owner = THIS_MODULE,
+ .add = isi_camera_add_device,
+ .remove = isi_camera_remove_device,
+ .set_fmt = isi_camera_set_fmt,
+ .try_fmt = isi_camera_try_fmt,
+ .get_formats = isi_camera_get_formats,
+ .init_videobuf2 = isi_camera_init_videobuf,
+ .poll = isi_camera_poll,
+ .querycap = isi_camera_querycap,
+ .set_bus_param = isi_camera_set_bus_param,
+};
+
+/* -----------------------------------------------------------------------*/
+static int __devexit atmel_isi_remove(struct platform_device *pdev)
+{
+ struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+ struct atmel_isi *isi = container_of(soc_host,
+ struct atmel_isi, soc_host);
+
+ free_irq(isi->irq, isi);
+ soc_camera_host_unregister(soc_host);
+ vb2_dma_contig_cleanup_ctx(isi->alloc_ctx);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct fbd) * MAX_BUFFER_NUM,
+ isi->p_fb_descriptors,
+ isi->fb_descriptors_phys);
+
+ iounmap(isi->regs);
+ clk_put(isi->pclk);
+ kfree(isi);
+
+ return 0;
+}
+
+static int __devinit atmel_isi_probe(struct platform_device *pdev)
+{
+ unsigned int irq;
+ struct atmel_isi *isi;
+ struct clk *pclk;
+ struct resource *regs;
+ int ret, i;
+ struct device *dev = &pdev->dev;
+ struct soc_camera_host *soc_host;
+ struct isi_platform_data *pdata;
+
+ pdata = dev->platform_data;
+ if (!pdata || !pdata->data_width_flags) {
+ dev_err(&pdev->dev,
+ "No config available for Atmel ISI\n");
+ return -EINVAL;
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return -ENXIO;
+
+ pclk = clk_get(&pdev->dev, "isi_clk");
+ if (IS_ERR(pclk))
+ return PTR_ERR(pclk);
+
+ isi = kzalloc(sizeof(struct atmel_isi), GFP_KERNEL);
+ if (!isi) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "Can't allocate interface!\n");
+ goto err_alloc_isi;
+ }
+
+ isi->pclk = pclk;
+ isi->pdata = pdata;
+ isi->active = NULL;
+ spin_lock_init(&isi->lock);
+ init_waitqueue_head(&isi->vsync_wq);
+ INIT_LIST_HEAD(&isi->video_buffer_list);
+ INIT_LIST_HEAD(&isi->dma_desc_head);
+
+ isi->p_fb_descriptors = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct fbd) * MAX_BUFFER_NUM,
+ &isi->fb_descriptors_phys,
+ GFP_KERNEL);
+ if (!isi->p_fb_descriptors) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "Can't allocate descriptors!\n");
+ goto err_alloc_descriptors;
+ }
+
+ for (i = 0; i < MAX_BUFFER_NUM; i++) {
+ isi->dma_desc[i].p_fbd = isi->p_fb_descriptors + i;
+ isi->dma_desc[i].fbd_phys = isi->fb_descriptors_phys +
+ i * sizeof(struct fbd);
+ list_add(&isi->dma_desc[i].list, &isi->dma_desc_head);
+ }
+
+ isi->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(isi->alloc_ctx)) {
+ ret = PTR_ERR(isi->alloc_ctx);
+ goto err_alloc_ctx;
+ }
+
+ isi->regs = ioremap(regs->start, resource_size(regs));
+ if (!isi->regs) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_req_irq;
+ }
+
+ ret = request_irq(irq, isi_interrupt, 0, "isi", isi);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
+ goto err_req_irq;
+ }
+ isi->irq = irq;
+
+ soc_host = &isi->soc_host;
+ soc_host->drv_name = "isi-camera";
+ soc_host->ops = &isi_soc_camera_host_ops;
+ soc_host->priv = isi;
+ soc_host->v4l2_dev.dev = &pdev->dev;
+ soc_host->nr = pdev->id;
+
+ ret = soc_camera_host_register(soc_host);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to register soc camera host\n");
+ goto err_register_soc_camera_host;
+ }
+ return 0;
+
+err_register_soc_camera_host:
+ free_irq(isi->irq, isi);
+err_req_irq:
+ iounmap(isi->regs);
+err_ioremap:
+ vb2_dma_contig_cleanup_ctx(isi->alloc_ctx);
+err_alloc_ctx:
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct fbd) * MAX_BUFFER_NUM,
+ isi->p_fb_descriptors,
+ isi->fb_descriptors_phys);
+err_alloc_descriptors:
+ kfree(isi);
+err_alloc_isi:
+ clk_put(isi->pclk);
+
+ return ret;
+}
+
+static struct platform_driver atmel_isi_driver = {
+ .probe = atmel_isi_probe,
+ .remove = __devexit_p(atmel_isi_remove),
+ .driver = {
+ .name = "atmel_isi",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init atmel_isi_init_module(void)
+{
+ return platform_driver_probe(&atmel_isi_driver, &atmel_isi_probe);
+}
+
+static void __exit atmel_isi_exit(void)
+{
+ platform_driver_unregister(&atmel_isi_driver);
+}
+module_init(atmel_isi_init_module);
+module_exit(atmel_isi_exit);
+
+MODULE_AUTHOR("Josh Wu <josh.wu@atmel.com>");
+MODULE_DESCRIPTION("The V4L2 driver for Atmel Linux");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("video");
diff --git a/drivers/media/video/au0828/au0828-core.c b/drivers/media/video/au0828/au0828-core.c
index ca342e4..1e4ce50 100644
--- a/drivers/media/video/au0828/au0828-core.c
+++ b/drivers/media/video/au0828/au0828-core.c
@@ -292,3 +292,4 @@ module_exit(au0828_exit);
MODULE_DESCRIPTION("Driver for Auvitek AU0828 based products");
MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.2");
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index c03eb29..0b3e481 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -33,7 +33,6 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/suspend.h>
-#include <linux/version.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-chip-ident.h>
@@ -43,8 +42,6 @@
static DEFINE_MUTEX(au0828_sysfs_lock);
-#define AU0828_VERSION_CODE KERNEL_VERSION(0, 0, 1)
-
/* ------------------------------------------------------------------
Videobuf operations
------------------------------------------------------------------*/
@@ -1254,8 +1251,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->card, dev->board.name, sizeof(cap->card));
strlcpy(cap->bus_info, dev->v4l2_dev.name, sizeof(cap->bus_info));
- cap->version = AU0828_VERSION_CODE;
-
/*set the device capabilities */
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_VBI_CAPTURE |
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 3c9e6c7..5b15f63 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -2892,13 +2892,10 @@ void __devinit bttv_idcard(struct bttv *btv)
{
unsigned int gpiobits;
int i,type;
- unsigned short tmp;
/* read PCI subsystem ID */
- pci_read_config_word(btv->c.pci, PCI_SUBSYSTEM_ID, &tmp);
- btv->cardid = tmp << 16;
- pci_read_config_word(btv->c.pci, PCI_SUBSYSTEM_VENDOR_ID, &tmp);
- btv->cardid |= tmp;
+ btv->cardid = btv->c.pci->subsystem_device << 16;
+ btv->cardid |= btv->c.pci->subsystem_vendor;
if (0 != btv->cardid && 0xffffffff != btv->cardid) {
/* look for the card */
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 834a483..14444de 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -57,6 +57,7 @@
#include <media/saa6588.h>
+#define BTTV_VERSION "0.9.19"
unsigned int bttv_num; /* number of Bt848s in use */
struct bttv *bttvs[BTTV_MAX];
@@ -163,6 +164,7 @@ MODULE_PARM_DESC(radio_nr, "radio device numbers");
MODULE_DESCRIPTION("bttv - v4l/v4l2 driver module for bt848/878 based cards");
MODULE_AUTHOR("Ralph Metzler & Marcus Metzler & Gerd Knorr");
MODULE_LICENSE("GPL");
+MODULE_VERSION(BTTV_VERSION);
/* ----------------------------------------------------------------------- */
/* sysfs */
@@ -2616,7 +2618,6 @@ static int bttv_querycap(struct file *file, void *priv,
strlcpy(cap->card, btv->video_dev->name, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"PCI:%s", pci_name(btv->c.pci));
- cap->version = BTTV_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_VBI_CAPTURE |
@@ -3416,7 +3417,6 @@ static int radio_querycap(struct file *file, void *priv,
strcpy(cap->driver, "bttv");
strlcpy(cap->card, btv->radio_dev->name, sizeof(cap->card));
sprintf(cap->bus_info, "PCI:%s", pci_name(btv->c.pci));
- cap->version = BTTV_VERSION_CODE;
cap->capabilities = V4L2_CAP_TUNER;
return 0;
@@ -4585,14 +4585,8 @@ static int __init bttv_init_module(void)
bttv_num = 0;
- printk(KERN_INFO "bttv: driver version %d.%d.%d loaded\n",
- (BTTV_VERSION_CODE >> 16) & 0xff,
- (BTTV_VERSION_CODE >> 8) & 0xff,
- BTTV_VERSION_CODE & 0xff);
-#ifdef SNAPSHOT
- printk(KERN_INFO "bttv: snapshot date %04d-%02d-%02d\n",
- SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
-#endif
+ printk(KERN_INFO "bttv: driver version %s loaded\n",
+ BTTV_VERSION);
if (gbuffers < 2 || gbuffers > VIDEO_MAX_FRAME)
gbuffers = 2;
if (gbufsize > BTTV_MAX_FBUF)
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h
index 9b776fa..318edf2 100644
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -25,9 +25,6 @@
#ifndef _BTTVP_H_
#define _BTTVP_H_
-#include <linux/version.h>
-#define BTTV_VERSION_CODE KERNEL_VERSION(0,9,18)
-
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/i2c.h>
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c
index c119350..f09df9d 100644
--- a/drivers/media/video/bw-qcam.c
+++ b/drivers/media/video/bw-qcam.c
@@ -71,7 +71,6 @@ OTHER DEALINGS IN THE SOFTWARE.
#include <linux/mm.h>
#include <linux/parport.h>
#include <linux/sched.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
@@ -647,7 +646,6 @@ static int qcam_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
strlcpy(vcap->card, "B&W Quickcam", sizeof(vcap->card));
strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
- vcap->version = KERNEL_VERSION(0, 0, 2);
vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
return 0;
}
@@ -895,6 +893,7 @@ static struct qcam *qcam_init(struct parport *port)
if (v4l2_device_register(NULL, v4l2_dev) < 0) {
v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
+ kfree(qcam);
return NULL;
}
@@ -1092,3 +1091,4 @@ module_init(init_bw_qcams);
module_exit(exit_bw_qcams);
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.3");
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index 24fc009..cd8ff04 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -35,7 +35,6 @@
#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/jiffies.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <asm/uaccess.h>
#include <media/v4l2-device.h>
@@ -517,7 +516,6 @@ static int qcam_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
strlcpy(vcap->card, "Color Quickcam", sizeof(vcap->card));
strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
- vcap->version = KERNEL_VERSION(0, 0, 3);
vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
return 0;
}
@@ -752,6 +750,7 @@ static struct qcam *qcam_init(struct parport *port)
if (v4l2_device_register(NULL, v4l2_dev) < 0) {
v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
+ kfree(qcam);
return NULL;
}
@@ -886,6 +885,7 @@ static void __exit cqcam_cleanup(void)
MODULE_AUTHOR("Philip Blundell <philb@gnu.org>");
MODULE_DESCRIPTION(BANNER);
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.4");
module_init(cqcam_init);
module_exit(cqcam_cleanup);
diff --git a/drivers/media/video/cafe_ccic-regs.h b/drivers/media/video/cafe_ccic-regs.h
deleted file mode 100644
index 8e2a87c..0000000
--- a/drivers/media/video/cafe_ccic-regs.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Register definitions for the m88alp01 camera interface. Offsets in bytes
- * as given in the spec.
- *
- * Copyright 2006 One Laptop Per Child Association, Inc.
- *
- * Written by Jonathan Corbet, corbet@lwn.net.
- *
- * This file may be distributed under the terms of the GNU General
- * Public License, version 2.
- */
-#define REG_Y0BAR 0x00
-#define REG_Y1BAR 0x04
-#define REG_Y2BAR 0x08
-/* ... */
-
-#define REG_IMGPITCH 0x24 /* Image pitch register */
-#define IMGP_YP_SHFT 2 /* Y pitch params */
-#define IMGP_YP_MASK 0x00003ffc /* Y pitch field */
-#define IMGP_UVP_SHFT 18 /* UV pitch (planar) */
-#define IMGP_UVP_MASK 0x3ffc0000
-#define REG_IRQSTATRAW 0x28 /* RAW IRQ Status */
-#define IRQ_EOF0 0x00000001 /* End of frame 0 */
-#define IRQ_EOF1 0x00000002 /* End of frame 1 */
-#define IRQ_EOF2 0x00000004 /* End of frame 2 */
-#define IRQ_SOF0 0x00000008 /* Start of frame 0 */
-#define IRQ_SOF1 0x00000010 /* Start of frame 1 */
-#define IRQ_SOF2 0x00000020 /* Start of frame 2 */
-#define IRQ_OVERFLOW 0x00000040 /* FIFO overflow */
-#define IRQ_TWSIW 0x00010000 /* TWSI (smbus) write */
-#define IRQ_TWSIR 0x00020000 /* TWSI read */
-#define IRQ_TWSIE 0x00040000 /* TWSI error */
-#define TWSIIRQS (IRQ_TWSIW|IRQ_TWSIR|IRQ_TWSIE)
-#define FRAMEIRQS (IRQ_EOF0|IRQ_EOF1|IRQ_EOF2|IRQ_SOF0|IRQ_SOF1|IRQ_SOF2)
-#define ALLIRQS (TWSIIRQS|FRAMEIRQS|IRQ_OVERFLOW)
-#define REG_IRQMASK 0x2c /* IRQ mask - same bits as IRQSTAT */
-#define REG_IRQSTAT 0x30 /* IRQ status / clear */
-
-#define REG_IMGSIZE 0x34 /* Image size */
-#define IMGSZ_V_MASK 0x1fff0000
-#define IMGSZ_V_SHIFT 16
-#define IMGSZ_H_MASK 0x00003fff
-#define REG_IMGOFFSET 0x38 /* IMage offset */
-
-#define REG_CTRL0 0x3c /* Control 0 */
-#define C0_ENABLE 0x00000001 /* Makes the whole thing go */
-
-/* Mask for all the format bits */
-#define C0_DF_MASK 0x00fffffc /* Bits 2-23 */
-
-/* RGB ordering */
-#define C0_RGB4_RGBX 0x00000000
-#define C0_RGB4_XRGB 0x00000004
-#define C0_RGB4_BGRX 0x00000008
-#define C0_RGB4_XBGR 0x0000000c
-#define C0_RGB5_RGGB 0x00000000
-#define C0_RGB5_GRBG 0x00000004
-#define C0_RGB5_GBRG 0x00000008
-#define C0_RGB5_BGGR 0x0000000c
-
-/* Spec has two fields for DIN and DOUT, but they must match, so
- combine them here. */
-#define C0_DF_YUV 0x00000000 /* Data is YUV */
-#define C0_DF_RGB 0x000000a0 /* ... RGB */
-#define C0_DF_BAYER 0x00000140 /* ... Bayer */
-/* 8-8-8 must be missing from the below - ask */
-#define C0_RGBF_565 0x00000000
-#define C0_RGBF_444 0x00000800
-#define C0_RGB_BGR 0x00001000 /* Blue comes first */
-#define C0_YUV_PLANAR 0x00000000 /* YUV 422 planar format */
-#define C0_YUV_PACKED 0x00008000 /* YUV 422 packed */
-#define C0_YUV_420PL 0x0000a000 /* YUV 420 planar */
-/* Think that 420 packed must be 111 - ask */
-#define C0_YUVE_YUYV 0x00000000 /* Y1CbY0Cr */
-#define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */
-#define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */
-#define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */
-#define C0_YUVE_XYUV 0x00000000 /* 420: .YUV */
-#define C0_YUVE_XYVU 0x00010000 /* 420: .YVU */
-#define C0_YUVE_XUVY 0x00020000 /* 420: .UVY */
-#define C0_YUVE_XVUY 0x00030000 /* 420: .VUY */
-/* Bayer bits 18,19 if needed */
-#define C0_HPOL_LOW 0x01000000 /* HSYNC polarity active low */
-#define C0_VPOL_LOW 0x02000000 /* VSYNC polarity active low */
-#define C0_VCLK_LOW 0x04000000 /* VCLK on falling edge */
-#define C0_DOWNSCALE 0x08000000 /* Enable downscaler */
-#define C0_SIFM_MASK 0xc0000000 /* SIF mode bits */
-#define C0_SIF_HVSYNC 0x00000000 /* Use H/VSYNC */
-#define CO_SOF_NOSYNC 0x40000000 /* Use inband active signaling */
-
-
-#define REG_CTRL1 0x40 /* Control 1 */
-#define C1_444ALPHA 0x00f00000 /* Alpha field in RGB444 */
-#define C1_ALPHA_SHFT 20
-#define C1_DMAB32 0x00000000 /* 32-byte DMA burst */
-#define C1_DMAB16 0x02000000 /* 16-byte DMA burst */
-#define C1_DMAB64 0x04000000 /* 64-byte DMA burst */
-#define C1_DMAB_MASK 0x06000000
-#define C1_TWOBUFS 0x08000000 /* Use only two DMA buffers */
-#define C1_PWRDWN 0x10000000 /* Power down */
-
-#define REG_CLKCTRL 0x88 /* Clock control */
-#define CLK_DIV_MASK 0x0000ffff /* Upper bits RW "reserved" */
-
-#define REG_GPR 0xb4 /* General purpose register. This
- controls inputs to the power and reset
- pins on the OV7670 used with OLPC;
- other deployments could differ. */
-#define GPR_C1EN 0x00000020 /* Pad 1 (power down) enable */
-#define GPR_C0EN 0x00000010 /* Pad 0 (reset) enable */
-#define GPR_C1 0x00000002 /* Control 1 value */
-/*
- * Control 0 is wired to reset on OLPC machines. For ov7x sensors,
- * it is active low, for 0v6x, instead, it's active high. What
- * fun.
- */
-#define GPR_C0 0x00000001 /* Control 0 value */
-
-#define REG_TWSIC0 0xb8 /* TWSI (smbus) control 0 */
-#define TWSIC0_EN 0x00000001 /* TWSI enable */
-#define TWSIC0_MODE 0x00000002 /* 1 = 16-bit, 0 = 8-bit */
-#define TWSIC0_SID 0x000003fc /* Slave ID */
-#define TWSIC0_SID_SHIFT 2
-#define TWSIC0_CLKDIV 0x0007fc00 /* Clock divider */
-#define TWSIC0_MASKACK 0x00400000 /* Mask ack from sensor */
-#define TWSIC0_OVMAGIC 0x00800000 /* Make it work on OV sensors */
-
-#define REG_TWSIC1 0xbc /* TWSI control 1 */
-#define TWSIC1_DATA 0x0000ffff /* Data to/from camchip */
-#define TWSIC1_ADDR 0x00ff0000 /* Address (register) */
-#define TWSIC1_ADDR_SHIFT 16
-#define TWSIC1_READ 0x01000000 /* Set for read op */
-#define TWSIC1_WSTAT 0x02000000 /* Write status */
-#define TWSIC1_RVALID 0x04000000 /* Read data valid */
-#define TWSIC1_ERROR 0x08000000 /* Something screwed up */
-
-
-#define REG_UBAR 0xc4 /* Upper base address register */
-
-/*
- * Here's the weird global control registers which are said to live
- * way up here.
- */
-#define REG_GL_CSR 0x3004 /* Control/status register */
-#define GCSR_SRS 0x00000001 /* SW Reset set */
-#define GCSR_SRC 0x00000002 /* SW Reset clear */
-#define GCSR_MRS 0x00000004 /* Master reset set */
-#define GCSR_MRC 0x00000008 /* HW Reset clear */
-#define GCSR_CCIC_EN 0x00004000 /* CCIC Clock enable */
-#define REG_GL_IMASK 0x300c /* Interrupt mask register */
-#define GIMSK_CCIC_EN 0x00000004 /* CCIC Interrupt enable */
-
-#define REG_GL_FCR 0x3038 /* GPIO functional control register */
-#define GFCR_GPIO_ON 0x08 /* Camera GPIO enabled */
-#define REG_GL_GPIOR 0x315c /* GPIO register */
-#define GGPIO_OUT 0x80000 /* GPIO output */
-#define GGPIO_VAL 0x00008 /* Output pin value */
-
-#define REG_LEN REG_GL_IMASK + 4
-
-
-/*
- * Useful stuff that probably belongs somewhere global.
- */
-#define VGA_WIDTH 640
-#define VGA_HEIGHT 480
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
deleted file mode 100644
index 6647033..0000000
--- a/drivers/media/video/cafe_ccic.c
+++ /dev/null
@@ -1,2267 +0,0 @@
-/*
- * A driver for the CMOS camera controller in the Marvell 88ALP01 "cafe"
- * multifunction chip. Currently works with the Omnivision OV7670
- * sensor.
- *
- * The data sheet for this device can be found at:
- * http://www.marvell.com/products/pc_connectivity/88alp01/
- *
- * Copyright 2006 One Laptop Per Child Association, Inc.
- * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net>
- *
- * Written by Jonathan Corbet, corbet@lwn.net.
- *
- * v4l2_device/v4l2_subdev conversion by:
- * Copyright (C) 2009 Hans Verkuil <hverkuil@xs4all.nl>
- *
- * Note: this conversion is untested! Please contact the linux-media
- * mailinglist if you can test this, together with the test results.
- *
- * This file may be distributed under the terms of the GNU General
- * Public License, version 2.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/dmi.h>
-#include <linux/mm.h>
-#include <linux/pci.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/videodev2.h>
-#include <linux/slab.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-chip-ident.h>
-#include <linux/device.h>
-#include <linux/wait.h>
-#include <linux/list.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/vmalloc.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-
-#include "ov7670.h"
-#include "cafe_ccic-regs.h"
-
-#define CAFE_VERSION 0x000002
-
-
-/*
- * Parameters.
- */
-MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
-MODULE_DESCRIPTION("Marvell 88ALP01 CMOS Camera Controller driver");
-MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("Video");
-
-/*
- * Internal DMA buffer management. Since the controller cannot do S/G I/O,
- * we must have physically contiguous buffers to bring frames into.
- * These parameters control how many buffers we use, whether we
- * allocate them at load time (better chance of success, but nails down
- * memory) or when somebody tries to use the camera (riskier), and,
- * for load-time allocation, how big they should be.
- *
- * The controller can cycle through three buffers. We could use
- * more by flipping pointers around, but it probably makes little
- * sense.
- */
-
-#define MAX_DMA_BUFS 3
-static int alloc_bufs_at_read;
-module_param(alloc_bufs_at_read, bool, 0444);
-MODULE_PARM_DESC(alloc_bufs_at_read,
- "Non-zero value causes DMA buffers to be allocated when the "
- "video capture device is read, rather than at module load "
- "time. This saves memory, but decreases the chances of "
- "successfully getting those buffers.");
-
-static int n_dma_bufs = 3;
-module_param(n_dma_bufs, uint, 0644);
-MODULE_PARM_DESC(n_dma_bufs,
- "The number of DMA buffers to allocate. Can be either two "
- "(saves memory, makes timing tighter) or three.");
-
-static int dma_buf_size = VGA_WIDTH * VGA_HEIGHT * 2; /* Worst case */
-module_param(dma_buf_size, uint, 0444);
-MODULE_PARM_DESC(dma_buf_size,
- "The size of the allocated DMA buffers. If actual operating "
- "parameters require larger buffers, an attempt to reallocate "
- "will be made.");
-
-static int min_buffers = 1;
-module_param(min_buffers, uint, 0644);
-MODULE_PARM_DESC(min_buffers,
- "The minimum number of streaming I/O buffers we are willing "
- "to work with.");
-
-static int max_buffers = 10;
-module_param(max_buffers, uint, 0644);
-MODULE_PARM_DESC(max_buffers,
- "The maximum number of streaming I/O buffers an application "
- "will be allowed to allocate. These buffers are big and live "
- "in vmalloc space.");
-
-static int flip;
-module_param(flip, bool, 0444);
-MODULE_PARM_DESC(flip,
- "If set, the sensor will be instructed to flip the image "
- "vertically.");
-
-
-enum cafe_state {
- S_NOTREADY, /* Not yet initialized */
- S_IDLE, /* Just hanging around */
- S_FLAKED, /* Some sort of problem */
- S_SINGLEREAD, /* In read() */
- S_SPECREAD, /* Speculative read (for future read()) */
- S_STREAMING /* Streaming data */
-};
-
-/*
- * Tracking of streaming I/O buffers.
- */
-struct cafe_sio_buffer {
- struct list_head list;
- struct v4l2_buffer v4lbuf;
- char *buffer; /* Where it lives in kernel space */
- int mapcount;
- struct cafe_camera *cam;
-};
-
-/*
- * A description of one of our devices.
- * Locking: controlled by s_mutex. Certain fields, however, require
- * the dev_lock spinlock; they are marked as such by comments.
- * dev_lock is also required for access to device registers.
- */
-struct cafe_camera
-{
- struct v4l2_device v4l2_dev;
- enum cafe_state state;
- unsigned long flags; /* Buffer status, mainly (dev_lock) */
- int users; /* How many open FDs */
- struct file *owner; /* Who has data access (v4l2) */
-
- /*
- * Subsystem structures.
- */
- struct pci_dev *pdev;
- struct video_device vdev;
- struct i2c_adapter i2c_adapter;
- struct v4l2_subdev *sensor;
- unsigned short sensor_addr;
-
- unsigned char __iomem *regs;
- struct list_head dev_list; /* link to other devices */
-
- /* DMA buffers */
- unsigned int nbufs; /* How many are alloc'd */
- int next_buf; /* Next to consume (dev_lock) */
- unsigned int dma_buf_size; /* allocated size */
- void *dma_bufs[MAX_DMA_BUFS]; /* Internal buffer addresses */
- dma_addr_t dma_handles[MAX_DMA_BUFS]; /* Buffer bus addresses */
- unsigned int specframes; /* Unconsumed spec frames (dev_lock) */
- unsigned int sequence; /* Frame sequence number */
- unsigned int buf_seq[MAX_DMA_BUFS]; /* Sequence for individual buffers */
-
- /* Streaming buffers */
- unsigned int n_sbufs; /* How many we have */
- struct cafe_sio_buffer *sb_bufs; /* The array of housekeeping structs */
- struct list_head sb_avail; /* Available for data (we own) (dev_lock) */
- struct list_head sb_full; /* With data (user space owns) (dev_lock) */
- struct tasklet_struct s_tasklet;
-
- /* Current operating parameters */
- u32 sensor_type; /* Currently ov7670 only */
- struct v4l2_pix_format pix_format;
- enum v4l2_mbus_pixelcode mbus_code;
-
- /* Locks */
- struct mutex s_mutex; /* Access to this structure */
- spinlock_t dev_lock; /* Access to device */
-
- /* Misc */
- wait_queue_head_t smbus_wait; /* Waiting on i2c events */
- wait_queue_head_t iowait; /* Waiting on frame data */
-};
-
-/*
- * Status flags. Always manipulated with bit operations.
- */
-#define CF_BUF0_VALID 0 /* Buffers valid - first three */
-#define CF_BUF1_VALID 1
-#define CF_BUF2_VALID 2
-#define CF_DMA_ACTIVE 3 /* A frame is incoming */
-#define CF_CONFIG_NEEDED 4 /* Must configure hardware */
-
-#define sensor_call(cam, o, f, args...) \
- v4l2_subdev_call(cam->sensor, o, f, ##args)
-
-static inline struct cafe_camera *to_cam(struct v4l2_device *dev)
-{
- return container_of(dev, struct cafe_camera, v4l2_dev);
-}
-
-static struct cafe_format_struct {
- __u8 *desc;
- __u32 pixelformat;
- int bpp; /* Bytes per pixel */
- enum v4l2_mbus_pixelcode mbus_code;
-} cafe_formats[] = {
- {
- .desc = "YUYV 4:2:2",
- .pixelformat = V4L2_PIX_FMT_YUYV,
- .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
- .bpp = 2,
- },
- {
- .desc = "RGB 444",
- .pixelformat = V4L2_PIX_FMT_RGB444,
- .mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
- .bpp = 2,
- },
- {
- .desc = "RGB 565",
- .pixelformat = V4L2_PIX_FMT_RGB565,
- .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
- .bpp = 2,
- },
- {
- .desc = "Raw RGB Bayer",
- .pixelformat = V4L2_PIX_FMT_SBGGR8,
- .mbus_code = V4L2_MBUS_FMT_SBGGR8_1X8,
- .bpp = 1
- },
-};
-#define N_CAFE_FMTS ARRAY_SIZE(cafe_formats)
-
-static struct cafe_format_struct *cafe_find_format(u32 pixelformat)
-{
- unsigned i;
-
- for (i = 0; i < N_CAFE_FMTS; i++)
- if (cafe_formats[i].pixelformat == pixelformat)
- return cafe_formats + i;
- /* Not found? Then return the first format. */
- return cafe_formats;
-}
-
-/*
- * Start over with DMA buffers - dev_lock needed.
- */
-static void cafe_reset_buffers(struct cafe_camera *cam)
-{
- int i;
-
- cam->next_buf = -1;
- for (i = 0; i < cam->nbufs; i++)
- clear_bit(i, &cam->flags);
- cam->specframes = 0;
-}
-
-static inline int cafe_needs_config(struct cafe_camera *cam)
-{
- return test_bit(CF_CONFIG_NEEDED, &cam->flags);
-}
-
-static void cafe_set_config_needed(struct cafe_camera *cam, int needed)
-{
- if (needed)
- set_bit(CF_CONFIG_NEEDED, &cam->flags);
- else
- clear_bit(CF_CONFIG_NEEDED, &cam->flags);
-}
-
-
-
-
-/*
- * Debugging and related.
- */
-#define cam_err(cam, fmt, arg...) \
- dev_err(&(cam)->pdev->dev, fmt, ##arg);
-#define cam_warn(cam, fmt, arg...) \
- dev_warn(&(cam)->pdev->dev, fmt, ##arg);
-#define cam_dbg(cam, fmt, arg...) \
- dev_dbg(&(cam)->pdev->dev, fmt, ##arg);
-
-
-/* ---------------------------------------------------------------------*/
-
-/*
- * Device register I/O
- */
-static inline void cafe_reg_write(struct cafe_camera *cam, unsigned int reg,
- unsigned int val)
-{
- iowrite32(val, cam->regs + reg);
-}
-
-static inline unsigned int cafe_reg_read(struct cafe_camera *cam,
- unsigned int reg)
-{
- return ioread32(cam->regs + reg);
-}
-
-
-static inline void cafe_reg_write_mask(struct cafe_camera *cam, unsigned int reg,
- unsigned int val, unsigned int mask)
-{
- unsigned int v = cafe_reg_read(cam, reg);
-
- v = (v & ~mask) | (val & mask);
- cafe_reg_write(cam, reg, v);
-}
-
-static inline void cafe_reg_clear_bit(struct cafe_camera *cam,
- unsigned int reg, unsigned int val)
-{
- cafe_reg_write_mask(cam, reg, 0, val);
-}
-
-static inline void cafe_reg_set_bit(struct cafe_camera *cam,
- unsigned int reg, unsigned int val)
-{
- cafe_reg_write_mask(cam, reg, val, val);
-}
-
-
-
-/* -------------------------------------------------------------------- */
-/*
- * The I2C/SMBUS interface to the camera itself starts here. The
- * controller handles SMBUS itself, presenting a relatively simple register
- * interface; all we have to do is to tell it where to route the data.
- */
-#define CAFE_SMBUS_TIMEOUT (HZ) /* generous */
-
-static int cafe_smbus_write_done(struct cafe_camera *cam)
-{
- unsigned long flags;
- int c1;
-
- /*
- * We must delay after the interrupt, or the controller gets confused
- * and never does give us good status. Fortunately, we don't do this
- * often.
- */
- udelay(20);
- spin_lock_irqsave(&cam->dev_lock, flags);
- c1 = cafe_reg_read(cam, REG_TWSIC1);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- return (c1 & (TWSIC1_WSTAT|TWSIC1_ERROR)) != TWSIC1_WSTAT;
-}
-
-static int cafe_smbus_write_data(struct cafe_camera *cam,
- u16 addr, u8 command, u8 value)
-{
- unsigned int rval;
- unsigned long flags;
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
- rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
- /*
- * Marvell sez set clkdiv to all 1's for now.
- */
- rval |= TWSIC0_CLKDIV;
- cafe_reg_write(cam, REG_TWSIC0, rval);
- (void) cafe_reg_read(cam, REG_TWSIC1); /* force write */
- rval = value | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
- cafe_reg_write(cam, REG_TWSIC1, rval);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-
- /* Unfortunately, reading TWSIC1 too soon after sending a command
- * causes the device to die.
- * Use a busy-wait because we often send a large quantity of small
- * commands at-once; using msleep() would cause a lot of context
- * switches which take longer than 2ms, resulting in a noticeable
- * boot-time and capture-start delays.
- */
- mdelay(2);
-
- /*
- * Another sad fact is that sometimes, commands silently complete but
- * cafe_smbus_write_done() never becomes aware of this.
- * This happens at random and appears to possible occur with any
- * command.
- * We don't understand why this is. We work around this issue
- * with the timeout in the wait below, assuming that all commands
- * complete within the timeout.
- */
- wait_event_timeout(cam->smbus_wait, cafe_smbus_write_done(cam),
- CAFE_SMBUS_TIMEOUT);
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- rval = cafe_reg_read(cam, REG_TWSIC1);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-
- if (rval & TWSIC1_WSTAT) {
- cam_err(cam, "SMBUS write (%02x/%02x/%02x) timed out\n", addr,
- command, value);
- return -EIO;
- }
- if (rval & TWSIC1_ERROR) {
- cam_err(cam, "SMBUS write (%02x/%02x/%02x) error\n", addr,
- command, value);
- return -EIO;
- }
- return 0;
-}
-
-
-
-static int cafe_smbus_read_done(struct cafe_camera *cam)
-{
- unsigned long flags;
- int c1;
-
- /*
- * We must delay after the interrupt, or the controller gets confused
- * and never does give us good status. Fortunately, we don't do this
- * often.
- */
- udelay(20);
- spin_lock_irqsave(&cam->dev_lock, flags);
- c1 = cafe_reg_read(cam, REG_TWSIC1);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- return c1 & (TWSIC1_RVALID|TWSIC1_ERROR);
-}
-
-
-
-static int cafe_smbus_read_data(struct cafe_camera *cam,
- u16 addr, u8 command, u8 *value)
-{
- unsigned int rval;
- unsigned long flags;
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
- rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
- /*
- * Marvel sez set clkdiv to all 1's for now.
- */
- rval |= TWSIC0_CLKDIV;
- cafe_reg_write(cam, REG_TWSIC0, rval);
- (void) cafe_reg_read(cam, REG_TWSIC1); /* force write */
- rval = TWSIC1_READ | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
- cafe_reg_write(cam, REG_TWSIC1, rval);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-
- wait_event_timeout(cam->smbus_wait,
- cafe_smbus_read_done(cam), CAFE_SMBUS_TIMEOUT);
- spin_lock_irqsave(&cam->dev_lock, flags);
- rval = cafe_reg_read(cam, REG_TWSIC1);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-
- if (rval & TWSIC1_ERROR) {
- cam_err(cam, "SMBUS read (%02x/%02x) error\n", addr, command);
- return -EIO;
- }
- if (! (rval & TWSIC1_RVALID)) {
- cam_err(cam, "SMBUS read (%02x/%02x) timed out\n", addr,
- command);
- return -EIO;
- }
- *value = rval & 0xff;
- return 0;
-}
-
-/*
- * Perform a transfer over SMBUS. This thing is called under
- * the i2c bus lock, so we shouldn't race with ourselves...
- */
-static int cafe_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
- unsigned short flags, char rw, u8 command,
- int size, union i2c_smbus_data *data)
-{
- struct v4l2_device *v4l2_dev = i2c_get_adapdata(adapter);
- struct cafe_camera *cam = to_cam(v4l2_dev);
- int ret = -EINVAL;
-
- /*
- * This interface would appear to only do byte data ops. OK
- * it can do word too, but the cam chip has no use for that.
- */
- if (size != I2C_SMBUS_BYTE_DATA) {
- cam_err(cam, "funky xfer size %d\n", size);
- return -EINVAL;
- }
-
- if (rw == I2C_SMBUS_WRITE)
- ret = cafe_smbus_write_data(cam, addr, command, data->byte);
- else if (rw == I2C_SMBUS_READ)
- ret = cafe_smbus_read_data(cam, addr, command, &data->byte);
- return ret;
-}
-
-
-static void cafe_smbus_enable_irq(struct cafe_camera *cam)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- cafe_reg_set_bit(cam, REG_IRQMASK, TWSIIRQS);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-}
-
-static u32 cafe_smbus_func(struct i2c_adapter *adapter)
-{
- return I2C_FUNC_SMBUS_READ_BYTE_DATA |
- I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
-}
-
-static struct i2c_algorithm cafe_smbus_algo = {
- .smbus_xfer = cafe_smbus_xfer,
- .functionality = cafe_smbus_func
-};
-
-/* Somebody is on the bus */
-static void cafe_ctlr_stop_dma(struct cafe_camera *cam);
-static void cafe_ctlr_power_down(struct cafe_camera *cam);
-
-static int cafe_smbus_setup(struct cafe_camera *cam)
-{
- struct i2c_adapter *adap = &cam->i2c_adapter;
- int ret;
-
- cafe_smbus_enable_irq(cam);
- adap->owner = THIS_MODULE;
- adap->algo = &cafe_smbus_algo;
- strcpy(adap->name, "cafe_ccic");
- adap->dev.parent = &cam->pdev->dev;
- i2c_set_adapdata(adap, &cam->v4l2_dev);
- ret = i2c_add_adapter(adap);
- if (ret)
- printk(KERN_ERR "Unable to register cafe i2c adapter\n");
- return ret;
-}
-
-static void cafe_smbus_shutdown(struct cafe_camera *cam)
-{
- i2c_del_adapter(&cam->i2c_adapter);
-}
-
-
-/* ------------------------------------------------------------------- */
-/*
- * Deal with the controller.
- */
-
-/*
- * Do everything we think we need to have the interface operating
- * according to the desired format.
- */
-static void cafe_ctlr_dma(struct cafe_camera *cam)
-{
- /*
- * Store the first two Y buffers (we aren't supporting
- * planar formats for now, so no UV bufs). Then either
- * set the third if it exists, or tell the controller
- * to just use two.
- */
- cafe_reg_write(cam, REG_Y0BAR, cam->dma_handles[0]);
- cafe_reg_write(cam, REG_Y1BAR, cam->dma_handles[1]);
- if (cam->nbufs > 2) {
- cafe_reg_write(cam, REG_Y2BAR, cam->dma_handles[2]);
- cafe_reg_clear_bit(cam, REG_CTRL1, C1_TWOBUFS);
- }
- else
- cafe_reg_set_bit(cam, REG_CTRL1, C1_TWOBUFS);
- cafe_reg_write(cam, REG_UBAR, 0); /* 32 bits only for now */
-}
-
-static void cafe_ctlr_image(struct cafe_camera *cam)
-{
- int imgsz;
- struct v4l2_pix_format *fmt = &cam->pix_format;
-
- imgsz = ((fmt->height << IMGSZ_V_SHIFT) & IMGSZ_V_MASK) |
- (fmt->bytesperline & IMGSZ_H_MASK);
- cafe_reg_write(cam, REG_IMGSIZE, imgsz);
- cafe_reg_write(cam, REG_IMGOFFSET, 0);
- /* YPITCH just drops the last two bits */
- cafe_reg_write_mask(cam, REG_IMGPITCH, fmt->bytesperline,
- IMGP_YP_MASK);
- /*
- * Tell the controller about the image format we are using.
- */
- switch (cam->pix_format.pixelformat) {
- case V4L2_PIX_FMT_YUYV:
- cafe_reg_write_mask(cam, REG_CTRL0,
- C0_DF_YUV|C0_YUV_PACKED|C0_YUVE_YUYV,
- C0_DF_MASK);
- break;
-
- case V4L2_PIX_FMT_RGB444:
- cafe_reg_write_mask(cam, REG_CTRL0,
- C0_DF_RGB|C0_RGBF_444|C0_RGB4_XRGB,
- C0_DF_MASK);
- /* Alpha value? */
- break;
-
- case V4L2_PIX_FMT_RGB565:
- cafe_reg_write_mask(cam, REG_CTRL0,
- C0_DF_RGB|C0_RGBF_565|C0_RGB5_BGGR,
- C0_DF_MASK);
- break;
-
- default:
- cam_err(cam, "Unknown format %x\n", cam->pix_format.pixelformat);
- break;
- }
- /*
- * Make sure it knows we want to use hsync/vsync.
- */
- cafe_reg_write_mask(cam, REG_CTRL0, C0_SIF_HVSYNC,
- C0_SIFM_MASK);
-}
-
-
-/*
- * Configure the controller for operation; caller holds the
- * device mutex.
- */
-static int cafe_ctlr_configure(struct cafe_camera *cam)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- cafe_ctlr_dma(cam);
- cafe_ctlr_image(cam);
- cafe_set_config_needed(cam, 0);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- return 0;
-}
-
-static void cafe_ctlr_irq_enable(struct cafe_camera *cam)
-{
- /*
- * Clear any pending interrupts, since we do not
- * expect to have I/O active prior to enabling.
- */
- cafe_reg_write(cam, REG_IRQSTAT, FRAMEIRQS);
- cafe_reg_set_bit(cam, REG_IRQMASK, FRAMEIRQS);
-}
-
-static void cafe_ctlr_irq_disable(struct cafe_camera *cam)
-{
- cafe_reg_clear_bit(cam, REG_IRQMASK, FRAMEIRQS);
-}
-
-/*
- * Make the controller start grabbing images. Everything must
- * be set up before doing this.
- */
-static void cafe_ctlr_start(struct cafe_camera *cam)
-{
- /* set_bit performs a read, so no other barrier should be
- needed here */
- cafe_reg_set_bit(cam, REG_CTRL0, C0_ENABLE);
-}
-
-static void cafe_ctlr_stop(struct cafe_camera *cam)
-{
- cafe_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
-}
-
-static void cafe_ctlr_init(struct cafe_camera *cam)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- /*
- * Added magic to bring up the hardware on the B-Test board
- */
- cafe_reg_write(cam, 0x3038, 0x8);
- cafe_reg_write(cam, 0x315c, 0x80008);
- /*
- * Go through the dance needed to wake the device up.
- * Note that these registers are global and shared
- * with the NAND and SD devices. Interaction between the
- * three still needs to be examined.
- */
- cafe_reg_write(cam, REG_GL_CSR, GCSR_SRS|GCSR_MRS); /* Needed? */
- cafe_reg_write(cam, REG_GL_CSR, GCSR_SRC|GCSR_MRC);
- cafe_reg_write(cam, REG_GL_CSR, GCSR_SRC|GCSR_MRS);
- /*
- * Here we must wait a bit for the controller to come around.
- */
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- msleep(5);
- spin_lock_irqsave(&cam->dev_lock, flags);
-
- cafe_reg_write(cam, REG_GL_CSR, GCSR_CCIC_EN|GCSR_SRC|GCSR_MRC);
- cafe_reg_set_bit(cam, REG_GL_IMASK, GIMSK_CCIC_EN);
- /*
- * Make sure it's not powered down.
- */
- cafe_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
- /*
- * Turn off the enable bit. It sure should be off anyway,
- * but it's good to be sure.
- */
- cafe_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
- /*
- * Mask all interrupts.
- */
- cafe_reg_write(cam, REG_IRQMASK, 0);
- /*
- * Clock the sensor appropriately. Controller clock should
- * be 48MHz, sensor "typical" value is half that.
- */
- cafe_reg_write_mask(cam, REG_CLKCTRL, 2, CLK_DIV_MASK);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-}
-
-
-/*
- * Stop the controller, and don't return until we're really sure that no
- * further DMA is going on.
- */
-static void cafe_ctlr_stop_dma(struct cafe_camera *cam)
-{
- unsigned long flags;
-
- /*
- * Theory: stop the camera controller (whether it is operating
- * or not). Delay briefly just in case we race with the SOF
- * interrupt, then wait until no DMA is active.
- */
- spin_lock_irqsave(&cam->dev_lock, flags);
- cafe_ctlr_stop(cam);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- mdelay(1);
- wait_event_timeout(cam->iowait,
- !test_bit(CF_DMA_ACTIVE, &cam->flags), HZ);
- if (test_bit(CF_DMA_ACTIVE, &cam->flags))
- cam_err(cam, "Timeout waiting for DMA to end\n");
- /* This would be bad news - what now? */
- spin_lock_irqsave(&cam->dev_lock, flags);
- cam->state = S_IDLE;
- cafe_ctlr_irq_disable(cam);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-}
-
-/*
- * Power up and down.
- */
-static void cafe_ctlr_power_up(struct cafe_camera *cam)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- cafe_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
- /*
- * Part one of the sensor dance: turn the global
- * GPIO signal on.
- */
- cafe_reg_write(cam, REG_GL_FCR, GFCR_GPIO_ON);
- cafe_reg_write(cam, REG_GL_GPIOR, GGPIO_OUT|GGPIO_VAL);
- /*
- * Put the sensor into operational mode (assumes OLPC-style
- * wiring). Control 0 is reset - set to 1 to operate.
- * Control 1 is power down, set to 0 to operate.
- */
- cafe_reg_write(cam, REG_GPR, GPR_C1EN|GPR_C0EN); /* pwr up, reset */
-/* mdelay(1); */ /* Marvell says 1ms will do it */
- cafe_reg_write(cam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C0);
-/* mdelay(1); */ /* Enough? */
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- msleep(5); /* Just to be sure */
-}
-
-static void cafe_ctlr_power_down(struct cafe_camera *cam)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- cafe_reg_write(cam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C1);
- cafe_reg_write(cam, REG_GL_FCR, GFCR_GPIO_ON);
- cafe_reg_write(cam, REG_GL_GPIOR, GGPIO_OUT);
- cafe_reg_set_bit(cam, REG_CTRL1, C1_PWRDWN);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-}
-
-/* -------------------------------------------------------------------- */
-/*
- * Communications with the sensor.
- */
-
-static int __cafe_cam_reset(struct cafe_camera *cam)
-{
- return sensor_call(cam, core, reset, 0);
-}
-
-/*
- * We have found the sensor on the i2c. Let's try to have a
- * conversation.
- */
-static int cafe_cam_init(struct cafe_camera *cam)
-{
- struct v4l2_dbg_chip_ident chip;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- if (cam->state != S_NOTREADY)
- cam_warn(cam, "Cam init with device in funky state %d",
- cam->state);
- ret = __cafe_cam_reset(cam);
- if (ret)
- goto out;
- chip.ident = V4L2_IDENT_NONE;
- chip.match.type = V4L2_CHIP_MATCH_I2C_ADDR;
- chip.match.addr = cam->sensor_addr;
- ret = sensor_call(cam, core, g_chip_ident, &chip);
- if (ret)
- goto out;
- cam->sensor_type = chip.ident;
- if (cam->sensor_type != V4L2_IDENT_OV7670) {
- cam_err(cam, "Unsupported sensor type 0x%x", cam->sensor_type);
- ret = -EINVAL;
- goto out;
- }
-/* Get/set parameters? */
- ret = 0;
- cam->state = S_IDLE;
- out:
- cafe_ctlr_power_down(cam);
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-/*
- * Configure the sensor to match the parameters we have. Caller should
- * hold s_mutex
- */
-static int cafe_cam_set_flip(struct cafe_camera *cam)
-{
- struct v4l2_control ctrl;
-
- memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_VFLIP;
- ctrl.value = flip;
- return sensor_call(cam, core, s_ctrl, &ctrl);
-}
-
-
-static int cafe_cam_configure(struct cafe_camera *cam)
-{
- struct v4l2_mbus_framefmt mbus_fmt;
- int ret;
-
- v4l2_fill_mbus_format(&mbus_fmt, &cam->pix_format, cam->mbus_code);
- ret = sensor_call(cam, core, init, 0);
- if (ret == 0)
- ret = sensor_call(cam, video, s_mbus_fmt, &mbus_fmt);
- /*
- * OV7670 does weird things if flip is set *before* format...
- */
- ret += cafe_cam_set_flip(cam);
- return ret;
-}
-
-/* -------------------------------------------------------------------- */
-/*
- * DMA buffer management. These functions need s_mutex held.
- */
-
-/* FIXME: this is inefficient as hell, since dma_alloc_coherent just
- * does a get_free_pages() call, and we waste a good chunk of an orderN
- * allocation. Should try to allocate the whole set in one chunk.
- */
-static int cafe_alloc_dma_bufs(struct cafe_camera *cam, int loadtime)
-{
- int i;
-
- cafe_set_config_needed(cam, 1);
- if (loadtime)
- cam->dma_buf_size = dma_buf_size;
- else
- cam->dma_buf_size = cam->pix_format.sizeimage;
- if (n_dma_bufs > 3)
- n_dma_bufs = 3;
-
- cam->nbufs = 0;
- for (i = 0; i < n_dma_bufs; i++) {
- cam->dma_bufs[i] = dma_alloc_coherent(&cam->pdev->dev,
- cam->dma_buf_size, cam->dma_handles + i,
- GFP_KERNEL);
- if (cam->dma_bufs[i] == NULL) {
- cam_warn(cam, "Failed to allocate DMA buffer\n");
- break;
- }
- /* For debug, remove eventually */
- memset(cam->dma_bufs[i], 0xcc, cam->dma_buf_size);
- (cam->nbufs)++;
- }
-
- switch (cam->nbufs) {
- case 1:
- dma_free_coherent(&cam->pdev->dev, cam->dma_buf_size,
- cam->dma_bufs[0], cam->dma_handles[0]);
- cam->nbufs = 0;
- case 0:
- cam_err(cam, "Insufficient DMA buffers, cannot operate\n");
- return -ENOMEM;
-
- case 2:
- if (n_dma_bufs > 2)
- cam_warn(cam, "Will limp along with only 2 buffers\n");
- break;
- }
- return 0;
-}
-
-static void cafe_free_dma_bufs(struct cafe_camera *cam)
-{
- int i;
-
- for (i = 0; i < cam->nbufs; i++) {
- dma_free_coherent(&cam->pdev->dev, cam->dma_buf_size,
- cam->dma_bufs[i], cam->dma_handles[i]);
- cam->dma_bufs[i] = NULL;
- }
- cam->nbufs = 0;
-}
-
-
-
-
-
-/* ----------------------------------------------------------------------- */
-/*
- * Here starts the V4L2 interface code.
- */
-
-/*
- * Read an image from the device.
- */
-static ssize_t cafe_deliver_buffer(struct cafe_camera *cam,
- char __user *buffer, size_t len, loff_t *pos)
-{
- int bufno;
- unsigned long flags;
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- if (cam->next_buf < 0) {
- cam_err(cam, "deliver_buffer: No next buffer\n");
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- return -EIO;
- }
- bufno = cam->next_buf;
- clear_bit(bufno, &cam->flags);
- if (++(cam->next_buf) >= cam->nbufs)
- cam->next_buf = 0;
- if (! test_bit(cam->next_buf, &cam->flags))
- cam->next_buf = -1;
- cam->specframes = 0;
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-
- if (len > cam->pix_format.sizeimage)
- len = cam->pix_format.sizeimage;
- if (copy_to_user(buffer, cam->dma_bufs[bufno], len))
- return -EFAULT;
- (*pos) += len;
- return len;
-}
-
-/*
- * Get everything ready, and start grabbing frames.
- */
-static int cafe_read_setup(struct cafe_camera *cam, enum cafe_state state)
-{
- int ret;
- unsigned long flags;
-
- /*
- * Configuration. If we still don't have DMA buffers,
- * make one last, desperate attempt.
- */
- if (cam->nbufs == 0)
- if (cafe_alloc_dma_bufs(cam, 0))
- return -ENOMEM;
-
- if (cafe_needs_config(cam)) {
- cafe_cam_configure(cam);
- ret = cafe_ctlr_configure(cam);
- if (ret)
- return ret;
- }
-
- /*
- * Turn it loose.
- */
- spin_lock_irqsave(&cam->dev_lock, flags);
- cafe_reset_buffers(cam);
- cafe_ctlr_irq_enable(cam);
- cam->state = state;
- cafe_ctlr_start(cam);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- return 0;
-}
-
-
-static ssize_t cafe_v4l_read(struct file *filp,
- char __user *buffer, size_t len, loff_t *pos)
-{
- struct cafe_camera *cam = filp->private_data;
- int ret = 0;
-
- /*
- * Perhaps we're in speculative read mode and already
- * have data?
- */
- mutex_lock(&cam->s_mutex);
- if (cam->state == S_SPECREAD) {
- if (cam->next_buf >= 0) {
- ret = cafe_deliver_buffer(cam, buffer, len, pos);
- if (ret != 0)
- goto out_unlock;
- }
- } else if (cam->state == S_FLAKED || cam->state == S_NOTREADY) {
- ret = -EIO;
- goto out_unlock;
- } else if (cam->state != S_IDLE) {
- ret = -EBUSY;
- goto out_unlock;
- }
-
- /*
- * v4l2: multiple processes can open the device, but only
- * one gets to grab data from it.
- */
- if (cam->owner && cam->owner != filp) {
- ret = -EBUSY;
- goto out_unlock;
- }
- cam->owner = filp;
-
- /*
- * Do setup if need be.
- */
- if (cam->state != S_SPECREAD) {
- ret = cafe_read_setup(cam, S_SINGLEREAD);
- if (ret)
- goto out_unlock;
- }
- /*
- * Wait for something to happen. This should probably
- * be interruptible (FIXME).
- */
- wait_event_timeout(cam->iowait, cam->next_buf >= 0, HZ);
- if (cam->next_buf < 0) {
- cam_err(cam, "read() operation timed out\n");
- cafe_ctlr_stop_dma(cam);
- ret = -EIO;
- goto out_unlock;
- }
- /*
- * Give them their data and we should be done.
- */
- ret = cafe_deliver_buffer(cam, buffer, len, pos);
-
- out_unlock:
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-
-
-
-
-
-
-
-/*
- * Streaming I/O support.
- */
-
-
-
-static int cafe_vidioc_streamon(struct file *filp, void *priv,
- enum v4l2_buf_type type)
-{
- struct cafe_camera *cam = filp->private_data;
- int ret = -EINVAL;
-
- if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- goto out;
- mutex_lock(&cam->s_mutex);
- if (cam->state != S_IDLE || cam->n_sbufs == 0)
- goto out_unlock;
-
- cam->sequence = 0;
- ret = cafe_read_setup(cam, S_STREAMING);
-
- out_unlock:
- mutex_unlock(&cam->s_mutex);
- out:
- return ret;
-}
-
-
-static int cafe_vidioc_streamoff(struct file *filp, void *priv,
- enum v4l2_buf_type type)
-{
- struct cafe_camera *cam = filp->private_data;
- int ret = -EINVAL;
-
- if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- goto out;
- mutex_lock(&cam->s_mutex);
- if (cam->state != S_STREAMING)
- goto out_unlock;
-
- cafe_ctlr_stop_dma(cam);
- ret = 0;
-
- out_unlock:
- mutex_unlock(&cam->s_mutex);
- out:
- return ret;
-}
-
-
-
-static int cafe_setup_siobuf(struct cafe_camera *cam, int index)
-{
- struct cafe_sio_buffer *buf = cam->sb_bufs + index;
-
- INIT_LIST_HEAD(&buf->list);
- buf->v4lbuf.length = PAGE_ALIGN(cam->pix_format.sizeimage);
- buf->buffer = vmalloc_user(buf->v4lbuf.length);
- if (buf->buffer == NULL)
- return -ENOMEM;
- buf->mapcount = 0;
- buf->cam = cam;
-
- buf->v4lbuf.index = index;
- buf->v4lbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buf->v4lbuf.field = V4L2_FIELD_NONE;
- buf->v4lbuf.memory = V4L2_MEMORY_MMAP;
- /*
- * Offset: must be 32-bit even on a 64-bit system. videobuf-dma-sg
- * just uses the length times the index, but the spec warns
- * against doing just that - vma merging problems. So we
- * leave a gap between each pair of buffers.
- */
- buf->v4lbuf.m.offset = 2*index*buf->v4lbuf.length;
- return 0;
-}
-
-static int cafe_free_sio_buffers(struct cafe_camera *cam)
-{
- int i;
-
- /*
- * If any buffers are mapped, we cannot free them at all.
- */
- for (i = 0; i < cam->n_sbufs; i++)
- if (cam->sb_bufs[i].mapcount > 0)
- return -EBUSY;
- /*
- * OK, let's do it.
- */
- for (i = 0; i < cam->n_sbufs; i++)
- vfree(cam->sb_bufs[i].buffer);
- cam->n_sbufs = 0;
- kfree(cam->sb_bufs);
- cam->sb_bufs = NULL;
- INIT_LIST_HEAD(&cam->sb_avail);
- INIT_LIST_HEAD(&cam->sb_full);
- return 0;
-}
-
-
-
-static int cafe_vidioc_reqbufs(struct file *filp, void *priv,
- struct v4l2_requestbuffers *req)
-{
- struct cafe_camera *cam = filp->private_data;
- int ret = 0; /* Silence warning */
-
- /*
- * Make sure it's something we can do. User pointers could be
- * implemented without great pain, but that's not been done yet.
- */
- if (req->memory != V4L2_MEMORY_MMAP)
- return -EINVAL;
- /*
- * If they ask for zero buffers, they really want us to stop streaming
- * (if it's happening) and free everything. Should we check owner?
- */
- mutex_lock(&cam->s_mutex);
- if (req->count == 0) {
- if (cam->state == S_STREAMING)
- cafe_ctlr_stop_dma(cam);
- ret = cafe_free_sio_buffers (cam);
- goto out;
- }
- /*
- * Device needs to be idle and working. We *could* try to do the
- * right thing in S_SPECREAD by shutting things down, but it
- * probably doesn't matter.
- */
- if (cam->state != S_IDLE || (cam->owner && cam->owner != filp)) {
- ret = -EBUSY;
- goto out;
- }
- cam->owner = filp;
-
- if (req->count < min_buffers)
- req->count = min_buffers;
- else if (req->count > max_buffers)
- req->count = max_buffers;
- if (cam->n_sbufs > 0) {
- ret = cafe_free_sio_buffers(cam);
- if (ret)
- goto out;
- }
-
- cam->sb_bufs = kzalloc(req->count*sizeof(struct cafe_sio_buffer),
- GFP_KERNEL);
- if (cam->sb_bufs == NULL) {
- ret = -ENOMEM;
- goto out;
- }
- for (cam->n_sbufs = 0; cam->n_sbufs < req->count; (cam->n_sbufs++)) {
- ret = cafe_setup_siobuf(cam, cam->n_sbufs);
- if (ret)
- break;
- }
-
- if (cam->n_sbufs == 0) /* no luck at all - ret already set */
- kfree(cam->sb_bufs);
- req->count = cam->n_sbufs; /* In case of partial success */
-
- out:
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-
-static int cafe_vidioc_querybuf(struct file *filp, void *priv,
- struct v4l2_buffer *buf)
-{
- struct cafe_camera *cam = filp->private_data;
- int ret = -EINVAL;
-
- mutex_lock(&cam->s_mutex);
- if (buf->index >= cam->n_sbufs)
- goto out;
- *buf = cam->sb_bufs[buf->index].v4lbuf;
- ret = 0;
- out:
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-static int cafe_vidioc_qbuf(struct file *filp, void *priv,
- struct v4l2_buffer *buf)
-{
- struct cafe_camera *cam = filp->private_data;
- struct cafe_sio_buffer *sbuf;
- int ret = -EINVAL;
- unsigned long flags;
-
- mutex_lock(&cam->s_mutex);
- if (buf->index >= cam->n_sbufs)
- goto out;
- sbuf = cam->sb_bufs + buf->index;
- if (sbuf->v4lbuf.flags & V4L2_BUF_FLAG_QUEUED) {
- ret = 0; /* Already queued?? */
- goto out;
- }
- if (sbuf->v4lbuf.flags & V4L2_BUF_FLAG_DONE) {
- /* Spec doesn't say anything, seems appropriate tho */
- ret = -EBUSY;
- goto out;
- }
- sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_QUEUED;
- spin_lock_irqsave(&cam->dev_lock, flags);
- list_add(&sbuf->list, &cam->sb_avail);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- ret = 0;
- out:
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-static int cafe_vidioc_dqbuf(struct file *filp, void *priv,
- struct v4l2_buffer *buf)
-{
- struct cafe_camera *cam = filp->private_data;
- struct cafe_sio_buffer *sbuf;
- int ret = -EINVAL;
- unsigned long flags;
-
- mutex_lock(&cam->s_mutex);
- if (cam->state != S_STREAMING)
- goto out_unlock;
- if (list_empty(&cam->sb_full) && filp->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- goto out_unlock;
- }
-
- while (list_empty(&cam->sb_full) && cam->state == S_STREAMING) {
- mutex_unlock(&cam->s_mutex);
- if (wait_event_interruptible(cam->iowait,
- !list_empty(&cam->sb_full))) {
- ret = -ERESTARTSYS;
- goto out;
- }
- mutex_lock(&cam->s_mutex);
- }
-
- if (cam->state != S_STREAMING)
- ret = -EINTR;
- else {
- spin_lock_irqsave(&cam->dev_lock, flags);
- /* Should probably recheck !list_empty() here */
- sbuf = list_entry(cam->sb_full.next,
- struct cafe_sio_buffer, list);
- list_del_init(&sbuf->list);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_DONE;
- *buf = sbuf->v4lbuf;
- ret = 0;
- }
-
- out_unlock:
- mutex_unlock(&cam->s_mutex);
- out:
- return ret;
-}
-
-
-
-static void cafe_v4l_vm_open(struct vm_area_struct *vma)
-{
- struct cafe_sio_buffer *sbuf = vma->vm_private_data;
- /*
- * Locking: done under mmap_sem, so we don't need to
- * go back to the camera lock here.
- */
- sbuf->mapcount++;
-}
-
-
-static void cafe_v4l_vm_close(struct vm_area_struct *vma)
-{
- struct cafe_sio_buffer *sbuf = vma->vm_private_data;
-
- mutex_lock(&sbuf->cam->s_mutex);
- sbuf->mapcount--;
- /* Docs say we should stop I/O too... */
- if (sbuf->mapcount == 0)
- sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_MAPPED;
- mutex_unlock(&sbuf->cam->s_mutex);
-}
-
-static const struct vm_operations_struct cafe_v4l_vm_ops = {
- .open = cafe_v4l_vm_open,
- .close = cafe_v4l_vm_close
-};
-
-
-static int cafe_v4l_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct cafe_camera *cam = filp->private_data;
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- int ret = -EINVAL;
- int i;
- struct cafe_sio_buffer *sbuf = NULL;
-
- if (! (vma->vm_flags & VM_WRITE) || ! (vma->vm_flags & VM_SHARED))
- return -EINVAL;
- /*
- * Find the buffer they are looking for.
- */
- mutex_lock(&cam->s_mutex);
- for (i = 0; i < cam->n_sbufs; i++)
- if (cam->sb_bufs[i].v4lbuf.m.offset == offset) {
- sbuf = cam->sb_bufs + i;
- break;
- }
- if (sbuf == NULL)
- goto out;
-
- ret = remap_vmalloc_range(vma, sbuf->buffer, 0);
- if (ret)
- goto out;
- vma->vm_flags |= VM_DONTEXPAND;
- vma->vm_private_data = sbuf;
- vma->vm_ops = &cafe_v4l_vm_ops;
- sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_MAPPED;
- cafe_v4l_vm_open(vma);
- ret = 0;
- out:
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-
-
-static int cafe_v4l_open(struct file *filp)
-{
- struct cafe_camera *cam = video_drvdata(filp);
-
- filp->private_data = cam;
-
- mutex_lock(&cam->s_mutex);
- if (cam->users == 0) {
- cafe_ctlr_power_up(cam);
- __cafe_cam_reset(cam);
- cafe_set_config_needed(cam, 1);
- /* FIXME make sure this is complete */
- }
- (cam->users)++;
- mutex_unlock(&cam->s_mutex);
- return 0;
-}
-
-
-static int cafe_v4l_release(struct file *filp)
-{
- struct cafe_camera *cam = filp->private_data;
-
- mutex_lock(&cam->s_mutex);
- (cam->users)--;
- if (filp == cam->owner) {
- cafe_ctlr_stop_dma(cam);
- cafe_free_sio_buffers(cam);
- cam->owner = NULL;
- }
- if (cam->users == 0) {
- cafe_ctlr_power_down(cam);
- if (alloc_bufs_at_read)
- cafe_free_dma_bufs(cam);
- }
- mutex_unlock(&cam->s_mutex);
- return 0;
-}
-
-
-
-static unsigned int cafe_v4l_poll(struct file *filp,
- struct poll_table_struct *pt)
-{
- struct cafe_camera *cam = filp->private_data;
-
- poll_wait(filp, &cam->iowait, pt);
- if (cam->next_buf >= 0)
- return POLLIN | POLLRDNORM;
- return 0;
-}
-
-
-
-static int cafe_vidioc_queryctrl(struct file *filp, void *priv,
- struct v4l2_queryctrl *qc)
-{
- struct cafe_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, core, queryctrl, qc);
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-
-static int cafe_vidioc_g_ctrl(struct file *filp, void *priv,
- struct v4l2_control *ctrl)
-{
- struct cafe_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, core, g_ctrl, ctrl);
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-
-static int cafe_vidioc_s_ctrl(struct file *filp, void *priv,
- struct v4l2_control *ctrl)
-{
- struct cafe_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, core, s_ctrl, ctrl);
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-
-
-
-
-static int cafe_vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- strcpy(cap->driver, "cafe_ccic");
- strcpy(cap->card, "cafe_ccic");
- cap->version = CAFE_VERSION;
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
- return 0;
-}
-
-
-/*
- * The default format we use until somebody says otherwise.
- */
-static const struct v4l2_pix_format cafe_def_pix_format = {
- .width = VGA_WIDTH,
- .height = VGA_HEIGHT,
- .pixelformat = V4L2_PIX_FMT_YUYV,
- .field = V4L2_FIELD_NONE,
- .bytesperline = VGA_WIDTH*2,
- .sizeimage = VGA_WIDTH*VGA_HEIGHT*2,
-};
-
-static const enum v4l2_mbus_pixelcode cafe_def_mbus_code =
- V4L2_MBUS_FMT_YUYV8_2X8;
-
-static int cafe_vidioc_enum_fmt_vid_cap(struct file *filp,
- void *priv, struct v4l2_fmtdesc *fmt)
-{
- if (fmt->index >= N_CAFE_FMTS)
- return -EINVAL;
- strlcpy(fmt->description, cafe_formats[fmt->index].desc,
- sizeof(fmt->description));
- fmt->pixelformat = cafe_formats[fmt->index].pixelformat;
- return 0;
-}
-
-static int cafe_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
- struct v4l2_format *fmt)
-{
- struct cafe_camera *cam = priv;
- struct cafe_format_struct *f;
- struct v4l2_pix_format *pix = &fmt->fmt.pix;
- struct v4l2_mbus_framefmt mbus_fmt;
- int ret;
-
- f = cafe_find_format(pix->pixelformat);
- pix->pixelformat = f->pixelformat;
- v4l2_fill_mbus_format(&mbus_fmt, pix, f->mbus_code);
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, video, try_mbus_fmt, &mbus_fmt);
- mutex_unlock(&cam->s_mutex);
- v4l2_fill_pix_format(pix, &mbus_fmt);
- pix->bytesperline = pix->width * f->bpp;
- pix->sizeimage = pix->height * pix->bytesperline;
- return ret;
-}
-
-static int cafe_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
- struct v4l2_format *fmt)
-{
- struct cafe_camera *cam = priv;
- struct cafe_format_struct *f;
- int ret;
-
- /*
- * Can't do anything if the device is not idle
- * Also can't if there are streaming buffers in place.
- */
- if (cam->state != S_IDLE || cam->n_sbufs > 0)
- return -EBUSY;
-
- f = cafe_find_format(fmt->fmt.pix.pixelformat);
-
- /*
- * See if the formatting works in principle.
- */
- ret = cafe_vidioc_try_fmt_vid_cap(filp, priv, fmt);
- if (ret)
- return ret;
- /*
- * Now we start to change things for real, so let's do it
- * under lock.
- */
- mutex_lock(&cam->s_mutex);
- cam->pix_format = fmt->fmt.pix;
- cam->mbus_code = f->mbus_code;
-
- /*
- * Make sure we have appropriate DMA buffers.
- */
- ret = -ENOMEM;
- if (cam->nbufs > 0 && cam->dma_buf_size < cam->pix_format.sizeimage)
- cafe_free_dma_bufs(cam);
- if (cam->nbufs == 0) {
- if (cafe_alloc_dma_bufs(cam, 0))
- goto out;
- }
- /*
- * It looks like this might work, so let's program the sensor.
- */
- ret = cafe_cam_configure(cam);
- if (! ret)
- ret = cafe_ctlr_configure(cam);
- out:
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-/*
- * Return our stored notion of how the camera is/should be configured.
- * The V4l2 spec wants us to be smarter, and actually get this from
- * the camera (and not mess with it at open time). Someday.
- */
-static int cafe_vidioc_g_fmt_vid_cap(struct file *filp, void *priv,
- struct v4l2_format *f)
-{
- struct cafe_camera *cam = priv;
-
- f->fmt.pix = cam->pix_format;
- return 0;
-}
-
-/*
- * We only have one input - the sensor - so minimize the nonsense here.
- */
-static int cafe_vidioc_enum_input(struct file *filp, void *priv,
- struct v4l2_input *input)
-{
- if (input->index != 0)
- return -EINVAL;
-
- input->type = V4L2_INPUT_TYPE_CAMERA;
- input->std = V4L2_STD_ALL; /* Not sure what should go here */
- strcpy(input->name, "Camera");
- return 0;
-}
-
-static int cafe_vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
-
-static int cafe_vidioc_s_input(struct file *filp, void *priv, unsigned int i)
-{
- if (i != 0)
- return -EINVAL;
- return 0;
-}
-
-/* from vivi.c */
-static int cafe_vidioc_s_std(struct file *filp, void *priv, v4l2_std_id *a)
-{
- return 0;
-}
-
-/*
- * G/S_PARM. Most of this is done by the sensor, but we are
- * the level which controls the number of read buffers.
- */
-static int cafe_vidioc_g_parm(struct file *filp, void *priv,
- struct v4l2_streamparm *parms)
-{
- struct cafe_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, video, g_parm, parms);
- mutex_unlock(&cam->s_mutex);
- parms->parm.capture.readbuffers = n_dma_bufs;
- return ret;
-}
-
-static int cafe_vidioc_s_parm(struct file *filp, void *priv,
- struct v4l2_streamparm *parms)
-{
- struct cafe_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, video, s_parm, parms);
- mutex_unlock(&cam->s_mutex);
- parms->parm.capture.readbuffers = n_dma_bufs;
- return ret;
-}
-
-static int cafe_vidioc_g_chip_ident(struct file *file, void *priv,
- struct v4l2_dbg_chip_ident *chip)
-{
- struct cafe_camera *cam = priv;
-
- chip->ident = V4L2_IDENT_NONE;
- chip->revision = 0;
- if (v4l2_chip_match_host(&chip->match)) {
- chip->ident = V4L2_IDENT_CAFE;
- return 0;
- }
- return sensor_call(cam, core, g_chip_ident, chip);
-}
-
-static int cafe_vidioc_enum_framesizes(struct file *filp, void *priv,
- struct v4l2_frmsizeenum *sizes)
-{
- struct cafe_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, video, enum_framesizes, sizes);
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-static int cafe_vidioc_enum_frameintervals(struct file *filp, void *priv,
- struct v4l2_frmivalenum *interval)
-{
- struct cafe_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, video, enum_frameintervals, interval);
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int cafe_vidioc_g_register(struct file *file, void *priv,
- struct v4l2_dbg_register *reg)
-{
- struct cafe_camera *cam = priv;
-
- if (v4l2_chip_match_host(&reg->match)) {
- reg->val = cafe_reg_read(cam, reg->reg);
- reg->size = 4;
- return 0;
- }
- return sensor_call(cam, core, g_register, reg);
-}
-
-static int cafe_vidioc_s_register(struct file *file, void *priv,
- struct v4l2_dbg_register *reg)
-{
- struct cafe_camera *cam = priv;
-
- if (v4l2_chip_match_host(&reg->match)) {
- cafe_reg_write(cam, reg->reg, reg->val);
- return 0;
- }
- return sensor_call(cam, core, s_register, reg);
-}
-#endif
-
-/*
- * This template device holds all of those v4l2 methods; we
- * clone it for specific real devices.
- */
-
-static const struct v4l2_file_operations cafe_v4l_fops = {
- .owner = THIS_MODULE,
- .open = cafe_v4l_open,
- .release = cafe_v4l_release,
- .read = cafe_v4l_read,
- .poll = cafe_v4l_poll,
- .mmap = cafe_v4l_mmap,
- .unlocked_ioctl = video_ioctl2,
-};
-
-static const struct v4l2_ioctl_ops cafe_v4l_ioctl_ops = {
- .vidioc_querycap = cafe_vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = cafe_vidioc_enum_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = cafe_vidioc_try_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = cafe_vidioc_s_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = cafe_vidioc_g_fmt_vid_cap,
- .vidioc_enum_input = cafe_vidioc_enum_input,
- .vidioc_g_input = cafe_vidioc_g_input,
- .vidioc_s_input = cafe_vidioc_s_input,
- .vidioc_s_std = cafe_vidioc_s_std,
- .vidioc_reqbufs = cafe_vidioc_reqbufs,
- .vidioc_querybuf = cafe_vidioc_querybuf,
- .vidioc_qbuf = cafe_vidioc_qbuf,
- .vidioc_dqbuf = cafe_vidioc_dqbuf,
- .vidioc_streamon = cafe_vidioc_streamon,
- .vidioc_streamoff = cafe_vidioc_streamoff,
- .vidioc_queryctrl = cafe_vidioc_queryctrl,
- .vidioc_g_ctrl = cafe_vidioc_g_ctrl,
- .vidioc_s_ctrl = cafe_vidioc_s_ctrl,
- .vidioc_g_parm = cafe_vidioc_g_parm,
- .vidioc_s_parm = cafe_vidioc_s_parm,
- .vidioc_enum_framesizes = cafe_vidioc_enum_framesizes,
- .vidioc_enum_frameintervals = cafe_vidioc_enum_frameintervals,
- .vidioc_g_chip_ident = cafe_vidioc_g_chip_ident,
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = cafe_vidioc_g_register,
- .vidioc_s_register = cafe_vidioc_s_register,
-#endif
-};
-
-static struct video_device cafe_v4l_template = {
- .name = "cafe",
- .tvnorms = V4L2_STD_NTSC_M,
- .current_norm = V4L2_STD_NTSC_M, /* make mplayer happy */
-
- .fops = &cafe_v4l_fops,
- .ioctl_ops = &cafe_v4l_ioctl_ops,
- .release = video_device_release_empty,
-};
-
-
-/* ---------------------------------------------------------------------- */
-/*
- * Interrupt handler stuff
- */
-
-
-
-static void cafe_frame_tasklet(unsigned long data)
-{
- struct cafe_camera *cam = (struct cafe_camera *) data;
- int i;
- unsigned long flags;
- struct cafe_sio_buffer *sbuf;
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- for (i = 0; i < cam->nbufs; i++) {
- int bufno = cam->next_buf;
- if (bufno < 0) { /* "will never happen" */
- cam_err(cam, "No valid bufs in tasklet!\n");
- break;
- }
- if (++(cam->next_buf) >= cam->nbufs)
- cam->next_buf = 0;
- if (! test_bit(bufno, &cam->flags))
- continue;
- if (list_empty(&cam->sb_avail))
- break; /* Leave it valid, hope for better later */
- clear_bit(bufno, &cam->flags);
- sbuf = list_entry(cam->sb_avail.next,
- struct cafe_sio_buffer, list);
- /*
- * Drop the lock during the big copy. This *should* be safe...
- */
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- memcpy(sbuf->buffer, cam->dma_bufs[bufno],
- cam->pix_format.sizeimage);
- sbuf->v4lbuf.bytesused = cam->pix_format.sizeimage;
- sbuf->v4lbuf.sequence = cam->buf_seq[bufno];
- sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_QUEUED;
- sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_DONE;
- spin_lock_irqsave(&cam->dev_lock, flags);
- list_move_tail(&sbuf->list, &cam->sb_full);
- }
- if (! list_empty(&cam->sb_full))
- wake_up(&cam->iowait);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-}
-
-
-
-static void cafe_frame_complete(struct cafe_camera *cam, int frame)
-{
- /*
- * Basic frame housekeeping.
- */
- if (test_bit(frame, &cam->flags) && printk_ratelimit())
- cam_err(cam, "Frame overrun on %d, frames lost\n", frame);
- set_bit(frame, &cam->flags);
- clear_bit(CF_DMA_ACTIVE, &cam->flags);
- if (cam->next_buf < 0)
- cam->next_buf = frame;
- cam->buf_seq[frame] = ++(cam->sequence);
-
- switch (cam->state) {
- /*
- * If in single read mode, try going speculative.
- */
- case S_SINGLEREAD:
- cam->state = S_SPECREAD;
- cam->specframes = 0;
- wake_up(&cam->iowait);
- break;
-
- /*
- * If we are already doing speculative reads, and nobody is
- * reading them, just stop.
- */
- case S_SPECREAD:
- if (++(cam->specframes) >= cam->nbufs) {
- cafe_ctlr_stop(cam);
- cafe_ctlr_irq_disable(cam);
- cam->state = S_IDLE;
- }
- wake_up(&cam->iowait);
- break;
- /*
- * For the streaming case, we defer the real work to the
- * camera tasklet.
- *
- * FIXME: if the application is not consuming the buffers,
- * we should eventually put things on hold and restart in
- * vidioc_dqbuf().
- */
- case S_STREAMING:
- tasklet_schedule(&cam->s_tasklet);
- break;
-
- default:
- cam_err(cam, "Frame interrupt in non-operational state\n");
- break;
- }
-}
-
-
-
-
-static void cafe_frame_irq(struct cafe_camera *cam, unsigned int irqs)
-{
- unsigned int frame;
-
- cafe_reg_write(cam, REG_IRQSTAT, FRAMEIRQS); /* Clear'em all */
- /*
- * Handle any frame completions. There really should
- * not be more than one of these, or we have fallen
- * far behind.
- */
- for (frame = 0; frame < cam->nbufs; frame++)
- if (irqs & (IRQ_EOF0 << frame))
- cafe_frame_complete(cam, frame);
- /*
- * If a frame starts, note that we have DMA active. This
- * code assumes that we won't get multiple frame interrupts
- * at once; may want to rethink that.
- */
- if (irqs & (IRQ_SOF0 | IRQ_SOF1 | IRQ_SOF2))
- set_bit(CF_DMA_ACTIVE, &cam->flags);
-}
-
-
-
-static irqreturn_t cafe_irq(int irq, void *data)
-{
- struct cafe_camera *cam = data;
- unsigned int irqs;
-
- spin_lock(&cam->dev_lock);
- irqs = cafe_reg_read(cam, REG_IRQSTAT);
- if ((irqs & ALLIRQS) == 0) {
- spin_unlock(&cam->dev_lock);
- return IRQ_NONE;
- }
- if (irqs & FRAMEIRQS)
- cafe_frame_irq(cam, irqs);
- if (irqs & TWSIIRQS) {
- cafe_reg_write(cam, REG_IRQSTAT, TWSIIRQS);
- wake_up(&cam->smbus_wait);
- }
- spin_unlock(&cam->dev_lock);
- return IRQ_HANDLED;
-}
-
-
-/* -------------------------------------------------------------------------- */
-/*
- * PCI interface stuff.
- */
-
-static const struct dmi_system_id olpc_xo1_dmi[] = {
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "OLPC"),
- DMI_MATCH(DMI_PRODUCT_NAME, "XO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1"),
- },
- },
- { }
-};
-
-static int cafe_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- int ret;
- struct cafe_camera *cam;
- struct ov7670_config sensor_cfg = {
- /* This controller only does SMBUS */
- .use_smbus = true,
-
- /*
- * Exclude QCIF mode, because it only captures a tiny portion
- * of the sensor FOV
- */
- .min_width = 320,
- .min_height = 240,
- };
- struct i2c_board_info ov7670_info = {
- .type = "ov7670",
- .addr = 0x42,
- .platform_data = &sensor_cfg,
- };
-
- /*
- * Start putting together one of our big camera structures.
- */
- ret = -ENOMEM;
- cam = kzalloc(sizeof(struct cafe_camera), GFP_KERNEL);
- if (cam == NULL)
- goto out;
- ret = v4l2_device_register(&pdev->dev, &cam->v4l2_dev);
- if (ret)
- goto out_free;
-
- mutex_init(&cam->s_mutex);
- spin_lock_init(&cam->dev_lock);
- cam->state = S_NOTREADY;
- cafe_set_config_needed(cam, 1);
- init_waitqueue_head(&cam->smbus_wait);
- init_waitqueue_head(&cam->iowait);
- cam->pdev = pdev;
- cam->pix_format = cafe_def_pix_format;
- cam->mbus_code = cafe_def_mbus_code;
- INIT_LIST_HEAD(&cam->dev_list);
- INIT_LIST_HEAD(&cam->sb_avail);
- INIT_LIST_HEAD(&cam->sb_full);
- tasklet_init(&cam->s_tasklet, cafe_frame_tasklet, (unsigned long) cam);
- /*
- * Get set up on the PCI bus.
- */
- ret = pci_enable_device(pdev);
- if (ret)
- goto out_unreg;
- pci_set_master(pdev);
-
- ret = -EIO;
- cam->regs = pci_iomap(pdev, 0, 0);
- if (! cam->regs) {
- printk(KERN_ERR "Unable to ioremap cafe-ccic regs\n");
- goto out_unreg;
- }
- ret = request_irq(pdev->irq, cafe_irq, IRQF_SHARED, "cafe-ccic", cam);
- if (ret)
- goto out_iounmap;
- /*
- * Initialize the controller and leave it powered up. It will
- * stay that way until the sensor driver shows up.
- */
- cafe_ctlr_init(cam);
- cafe_ctlr_power_up(cam);
- /*
- * Set up I2C/SMBUS communications. We have to drop the mutex here
- * because the sensor could attach in this call chain, leading to
- * unsightly deadlocks.
- */
- ret = cafe_smbus_setup(cam);
- if (ret)
- goto out_freeirq;
-
- /* Apply XO-1 clock speed */
- if (dmi_check_system(olpc_xo1_dmi))
- sensor_cfg.clock_speed = 45;
-
- cam->sensor_addr = ov7670_info.addr;
- cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev, &cam->i2c_adapter,
- &ov7670_info, NULL);
- if (cam->sensor == NULL) {
- ret = -ENODEV;
- goto out_smbus;
- }
-
- ret = cafe_cam_init(cam);
- if (ret)
- goto out_smbus;
-
- /*
- * Get the v4l2 setup done.
- */
- mutex_lock(&cam->s_mutex);
- cam->vdev = cafe_v4l_template;
- cam->vdev.debug = 0;
-/* cam->vdev.debug = V4L2_DEBUG_IOCTL_ARG;*/
- cam->vdev.v4l2_dev = &cam->v4l2_dev;
- ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
- if (ret)
- goto out_unlock;
- video_set_drvdata(&cam->vdev, cam);
-
- /*
- * If so requested, try to get our DMA buffers now.
- */
- if (!alloc_bufs_at_read) {
- if (cafe_alloc_dma_bufs(cam, 1))
- cam_warn(cam, "Unable to alloc DMA buffers at load"
- " will try again later.");
- }
-
- mutex_unlock(&cam->s_mutex);
- return 0;
-
-out_unlock:
- mutex_unlock(&cam->s_mutex);
-out_smbus:
- cafe_smbus_shutdown(cam);
-out_freeirq:
- cafe_ctlr_power_down(cam);
- free_irq(pdev->irq, cam);
-out_iounmap:
- pci_iounmap(pdev, cam->regs);
-out_free:
- v4l2_device_unregister(&cam->v4l2_dev);
-out_unreg:
- kfree(cam);
-out:
- return ret;
-}
-
-
-/*
- * Shut down an initialized device
- */
-static void cafe_shutdown(struct cafe_camera *cam)
-{
-/* FIXME: Make sure we take care of everything here */
- if (cam->n_sbufs > 0)
- /* What if they are still mapped? Shouldn't be, but... */
- cafe_free_sio_buffers(cam);
- cafe_ctlr_stop_dma(cam);
- cafe_ctlr_power_down(cam);
- cafe_smbus_shutdown(cam);
- cafe_free_dma_bufs(cam);
- free_irq(cam->pdev->irq, cam);
- pci_iounmap(cam->pdev, cam->regs);
- video_unregister_device(&cam->vdev);
-}
-
-
-static void cafe_pci_remove(struct pci_dev *pdev)
-{
- struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
- struct cafe_camera *cam = to_cam(v4l2_dev);
-
- if (cam == NULL) {
- printk(KERN_WARNING "pci_remove on unknown pdev %p\n", pdev);
- return;
- }
- mutex_lock(&cam->s_mutex);
- if (cam->users > 0)
- cam_warn(cam, "Removing a device with users!\n");
- cafe_shutdown(cam);
- v4l2_device_unregister(&cam->v4l2_dev);
- kfree(cam);
-/* No unlock - it no longer exists */
-}
-
-
-#ifdef CONFIG_PM
-/*
- * Basic power management.
- */
-static int cafe_pci_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
- struct cafe_camera *cam = to_cam(v4l2_dev);
- int ret;
- enum cafe_state cstate;
-
- ret = pci_save_state(pdev);
- if (ret)
- return ret;
- cstate = cam->state; /* HACK - stop_dma sets to idle */
- cafe_ctlr_stop_dma(cam);
- cafe_ctlr_power_down(cam);
- pci_disable_device(pdev);
- cam->state = cstate;
- return 0;
-}
-
-
-static int cafe_pci_resume(struct pci_dev *pdev)
-{
- struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
- struct cafe_camera *cam = to_cam(v4l2_dev);
- int ret = 0;
-
- pci_restore_state(pdev);
- ret = pci_enable_device(pdev);
-
- if (ret) {
- cam_warn(cam, "Unable to re-enable device on resume!\n");
- return ret;
- }
- cafe_ctlr_init(cam);
-
- mutex_lock(&cam->s_mutex);
- if (cam->users > 0) {
- cafe_ctlr_power_up(cam);
- __cafe_cam_reset(cam);
- } else {
- cafe_ctlr_power_down(cam);
- }
- mutex_unlock(&cam->s_mutex);
-
- set_bit(CF_CONFIG_NEEDED, &cam->flags);
- if (cam->state == S_SPECREAD)
- cam->state = S_IDLE; /* Don't bother restarting */
- else if (cam->state == S_SINGLEREAD || cam->state == S_STREAMING)
- ret = cafe_read_setup(cam, cam->state);
- return ret;
-}
-
-#endif /* CONFIG_PM */
-
-
-static struct pci_device_id cafe_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_MARVELL,
- PCI_DEVICE_ID_MARVELL_88ALP01_CCIC) },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, cafe_ids);
-
-static struct pci_driver cafe_pci_driver = {
- .name = "cafe1000-ccic",
- .id_table = cafe_ids,
- .probe = cafe_pci_probe,
- .remove = cafe_pci_remove,
-#ifdef CONFIG_PM
- .suspend = cafe_pci_suspend,
- .resume = cafe_pci_resume,
-#endif
-};
-
-
-
-
-static int __init cafe_init(void)
-{
- int ret;
-
- printk(KERN_NOTICE "Marvell M88ALP01 'CAFE' Camera Controller version %d\n",
- CAFE_VERSION);
- ret = pci_register_driver(&cafe_pci_driver);
- if (ret) {
- printk(KERN_ERR "Unable to register cafe_ccic driver\n");
- goto out;
- }
- ret = 0;
-
- out:
- return ret;
-}
-
-
-static void __exit cafe_exit(void)
-{
- pci_unregister_driver(&cafe_pci_driver);
-}
-
-module_init(cafe_init);
-module_exit(cafe_exit);
diff --git a/drivers/media/video/cpia2/cpia2.h b/drivers/media/video/cpia2/cpia2.h
index 6d6d184..ab25218 100644
--- a/drivers/media/video/cpia2/cpia2.h
+++ b/drivers/media/video/cpia2/cpia2.h
@@ -31,7 +31,6 @@
#ifndef __CPIA2_H__
#define __CPIA2_H__
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <linux/usb.h>
@@ -43,10 +42,6 @@
/* define for verbose debug output */
//#define _CPIA2_DEBUG_
-#define CPIA2_MAJ_VER 3
-#define CPIA2_MIN_VER 0
-#define CPIA2_PATCH_VER 0
-
/***
* Image defines
***/
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 40eb632..077eb1d 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -29,8 +29,7 @@
* Alan Cox <alan@lxorguk.ukuu.org.uk>
****************************************************************************/
-#include <linux/version.h>
-
+#define CPIA_VERSION "3.0.1"
#include <linux/module.h>
#include <linux/time.h>
@@ -80,6 +79,7 @@ MODULE_AUTHOR("Steve Miller (STMicroelectronics) <steve.miller@st.com>");
MODULE_DESCRIPTION("V4L-driver for STMicroelectronics CPiA2 based cameras");
MODULE_SUPPORTED_DEVICE("video");
MODULE_LICENSE("GPL");
+MODULE_VERSION(CPIA_VERSION);
#define ABOUT "V4L-Driver for Vision CPiA2 based cameras"
@@ -465,9 +465,6 @@ static int cpia2_querycap(struct file *file, void *fh, struct v4l2_capability *v
if (usb_make_path(cam->dev, vc->bus_info, sizeof(vc->bus_info)) <0)
memset(vc->bus_info,0, sizeof(vc->bus_info));
- vc->version = KERNEL_VERSION(CPIA2_MAJ_VER, CPIA2_MIN_VER,
- CPIA2_PATCH_VER);
-
vc->capabilities = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING;
@@ -1558,8 +1555,8 @@ static void __init check_parameters(void)
*****************************************************************************/
static int __init cpia2_init(void)
{
- LOG("%s v%d.%d.%d\n",
- ABOUT, CPIA2_MAJ_VER, CPIA2_MIN_VER, CPIA2_PATCH_VER);
+ LOG("%s v%s\n",
+ ABOUT, CPIA_VERSION);
check_parameters();
cpia2_usb_init();
return 0;
@@ -1579,4 +1576,3 @@ static void __exit cpia2_exit(void)
module_init(cpia2_init);
module_exit(cpia2_exit);
-
diff --git a/drivers/media/video/cx18/cx18-alsa-main.c b/drivers/media/video/cx18/cx18-alsa-main.c
index d50d69d..a1e6c2a 100644
--- a/drivers/media/video/cx18/cx18-alsa-main.c
+++ b/drivers/media/video/cx18/cx18-alsa-main.c
@@ -192,6 +192,7 @@ static int snd_cx18_init(struct v4l2_device *v4l2_dev)
err_exit_free:
if (sc != NULL)
snd_card_free(sc);
+ kfree(cxsc);
err_exit:
return ret;
}
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index 0864272..1834207 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -25,7 +25,6 @@
#ifndef CX18_DRIVER_H
#define CX18_DRIVER_H
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
diff --git a/drivers/media/video/cx18/cx18-ioctl.c b/drivers/media/video/cx18/cx18-ioctl.c
index e80134f..afe0a29 100644
--- a/drivers/media/video/cx18/cx18-ioctl.c
+++ b/drivers/media/video/cx18/cx18-ioctl.c
@@ -469,7 +469,6 @@ static int cx18_querycap(struct file *file, void *fh,
strlcpy(vcap->card, cx->card_name, sizeof(vcap->card));
snprintf(vcap->bus_info, sizeof(vcap->bus_info),
"PCI:%s", pci_name(cx->pci_dev));
- vcap->version = CX18_DRIVER_VERSION; /* version */
vcap->capabilities = cx->v4l2_cap; /* capabilities */
return 0;
}
diff --git a/drivers/media/video/cx18/cx18-version.h b/drivers/media/video/cx18/cx18-version.h
index cd189b6..fed48b6 100644
--- a/drivers/media/video/cx18/cx18-version.h
+++ b/drivers/media/video/cx18/cx18-version.h
@@ -23,12 +23,6 @@
#define CX18_VERSION_H
#define CX18_DRIVER_NAME "cx18"
-#define CX18_DRIVER_VERSION_MAJOR 1
-#define CX18_DRIVER_VERSION_MINOR 5
-#define CX18_DRIVER_VERSION_PATCHLEVEL 0
-
-#define CX18_VERSION __stringify(CX18_DRIVER_VERSION_MAJOR) "." __stringify(CX18_DRIVER_VERSION_MINOR) "." __stringify(CX18_DRIVER_VERSION_PATCHLEVEL)
-#define CX18_DRIVER_VERSION KERNEL_VERSION(CX18_DRIVER_VERSION_MAJOR, \
- CX18_DRIVER_VERSION_MINOR, CX18_DRIVER_VERSION_PATCHLEVEL)
+#define CX18_VERSION "1.5.1"
#endif
diff --git a/drivers/media/video/cx231xx/cx231xx-avcore.c b/drivers/media/video/cx231xx/cx231xx-avcore.c
index 8d78134..53ff26e 100644
--- a/drivers/media/video/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/video/cx231xx/cx231xx-avcore.c
@@ -355,6 +355,8 @@ int cx231xx_afe_update_power_control(struct cx231xx *dev,
case CX231XX_BOARD_HAUPPAUGE_EXETER:
case CX231XX_BOARD_HAUPPAUGE_USBLIVE2:
case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
+ case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
+ case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
if (avmode == POLARIS_AVMODE_ANALOGT_TV) {
while (afe_power_status != (FLD_PWRDN_TUNING_BIAS |
FLD_PWRDN_ENABLE_PLL)) {
@@ -1733,6 +1735,8 @@ int cx231xx_dif_set_standard(struct cx231xx *dev, u32 standard)
break;
case CX231XX_BOARD_CNXT_RDE_253S:
case CX231XX_BOARD_CNXT_RDU_253S:
+ case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
+ case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
func_mode = 0x01;
break;
default:
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 2270381..53dae2a 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -387,6 +387,7 @@ struct cx231xx_board cx231xx_boards[] = {
.norm = V4L2_STD_NTSC,
.no_alt_vanc = 1,
.external_av = 1,
+ .dont_use_port_3 = 1,
.input = {{
.type = CX231XX_VMUX_COMPOSITE1,
.vmux = CX231XX_VIN_2_1,
@@ -532,6 +533,76 @@ struct cx231xx_board cx231xx_boards[] = {
.gpio = NULL,
} },
},
+ [CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL] = {
+ .name = "Hauppauge WinTV USB2 FM (PAL)",
+ .tuner_type = TUNER_NXP_TDA18271,
+ .tuner_addr = 0x60,
+ .tuner_gpio = RDE250_XCV_TUNER,
+ .tuner_sif_gpio = 0x05,
+ .tuner_scl_gpio = 0x1a,
+ .tuner_sda_gpio = 0x1b,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x0c,
+ .gpio_pin_status_mask = 0x4001000,
+ .tuner_i2c_master = 1,
+ .norm = V4L2_STD_PAL,
+
+ .input = {{
+ .type = CX231XX_VMUX_TELEVISION,
+ .vmux = CX231XX_VIN_3_1,
+ .amux = CX231XX_AMUX_VIDEO,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_COMPOSITE1,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_SVIDEO,
+ .vmux = CX231XX_VIN_1_1 |
+ (CX231XX_VIN_1_2 << 8) |
+ CX25840_SVIDEO_ON,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ } },
+ },
+ [CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC] = {
+ .name = "Hauppauge WinTV USB2 FM (NTSC)",
+ .tuner_type = TUNER_NXP_TDA18271,
+ .tuner_addr = 0x60,
+ .tuner_gpio = RDE250_XCV_TUNER,
+ .tuner_sif_gpio = 0x05,
+ .tuner_scl_gpio = 0x1a,
+ .tuner_sda_gpio = 0x1b,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x0c,
+ .gpio_pin_status_mask = 0x4001000,
+ .tuner_i2c_master = 1,
+ .norm = V4L2_STD_NTSC,
+
+ .input = {{
+ .type = CX231XX_VMUX_TELEVISION,
+ .vmux = CX231XX_VIN_3_1,
+ .amux = CX231XX_AMUX_VIDEO,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_COMPOSITE1,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_SVIDEO,
+ .vmux = CX231XX_VIN_1_1 |
+ (CX231XX_VIN_1_2 << 8) |
+ CX25840_SVIDEO_ON,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ } },
+ },
};
const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards);
@@ -553,6 +624,10 @@ struct usb_device_id cx231xx_id_table[] = {
.driver_info = CX231XX_BOARD_CNXT_RDE_250},
{USB_DEVICE(0x0572, 0x58A0),
.driver_info = CX231XX_BOARD_CNXT_RDU_250},
+ {USB_DEVICE(0x2040, 0xb110),
+ .driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL},
+ {USB_DEVICE(0x2040, 0xb111),
+ .driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC},
{USB_DEVICE(0x2040, 0xb120),
.driver_info = CX231XX_BOARD_HAUPPAUGE_EXETER},
{USB_DEVICE(0x2040, 0xb140),
@@ -1051,6 +1126,9 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
if (assoc_desc->bFirstInterface != ifnum) {
cx231xx_err(DRIVER_NAME ": Not found "
"matching IAD interface\n");
+ cx231xx_devused &= ~(1 << nr);
+ kfree(dev);
+ dev = NULL;
return -ENODEV;
}
diff --git a/drivers/media/video/cx231xx/cx231xx-core.c b/drivers/media/video/cx231xx/cx231xx-core.c
index abe500f..d4457f9 100644
--- a/drivers/media/video/cx231xx/cx231xx-core.c
+++ b/drivers/media/video/cx231xx/cx231xx-core.c
@@ -742,6 +742,8 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
case CX231XX_BOARD_CNXT_RDU_253S:
case CX231XX_BOARD_HAUPPAUGE_EXETER:
case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
+ case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
+ case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
break;
default:
@@ -1381,6 +1383,8 @@ int cx231xx_dev_init(struct cx231xx *dev)
case CX231XX_BOARD_CNXT_RDU_253S:
case CX231XX_BOARD_HAUPPAUGE_EXETER:
case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
+ case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
+ case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
break;
default:
diff --git a/drivers/media/video/cx231xx/cx231xx-video.c b/drivers/media/video/cx231xx/cx231xx-video.c
index a69c24d..6e81f97 100644
--- a/drivers/media/video/cx231xx/cx231xx-video.c
+++ b/drivers/media/video/cx231xx/cx231xx-video.c
@@ -29,7 +29,6 @@
#include <linux/bitmap.h>
#include <linux/usb.h>
#include <linux/i2c.h>
-#include <linux/version.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -45,7 +44,7 @@
#include "cx231xx.h"
#include "cx231xx-vbi.h"
-#define CX231XX_VERSION_CODE KERNEL_VERSION(0, 0, 1)
+#define CX231XX_VERSION "0.0.2"
#define DRIVER_AUTHOR "Srinivasa Deevi <srinivasa.deevi@conexant.com>"
#define DRIVER_DESC "Conexant cx231xx based USB video device driver"
@@ -70,6 +69,7 @@ do {\
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION(CX231XX_VERSION);
static unsigned int card[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
static unsigned int video_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
@@ -1179,7 +1179,8 @@ static int vidioc_enum_input(struct file *file, void *priv,
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
- unsigned int n;
+ u32 gen_stat;
+ unsigned int ret, n;
n = i->index;
if (n >= MAX_CX231XX_INPUT)
@@ -1198,6 +1199,18 @@ static int vidioc_enum_input(struct file *file, void *priv,
i->std = dev->vdev->tvnorms;
+ /* If they are asking about the active input, read signal status */
+ if (n == dev->video_input) {
+ ret = cx231xx_read_i2c_data(dev, VID_BLK_I2C_ADDRESS,
+ GEN_STAT, 2, &gen_stat, 4);
+ if (ret > 0) {
+ if ((gen_stat & FLD_VPRES) == 0x00)
+ i->status |= V4L2_IN_ST_NO_SIGNAL;
+ if ((gen_stat & FLD_HLOCK) == 0x00)
+ i->status |= V4L2_IN_ST_NO_H_LOCK;
+ }
+ }
+
return 0;
}
@@ -1869,8 +1882,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->card, cx231xx_boards[dev->model].name, sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->version = CX231XX_VERSION_CODE;
-
cap->capabilities = V4L2_CAP_VBI_CAPTURE |
#if 0
V4L2_CAP_SLICED_VBI_CAPTURE |
@@ -2057,7 +2068,6 @@ static int radio_querycap(struct file *file, void *priv,
strlcpy(cap->card, cx231xx_boards[dev->model].name, sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->version = CX231XX_VERSION_CODE;
cap->capabilities = V4L2_CAP_TUNER;
return 0;
}
@@ -2570,11 +2580,8 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
{
int ret;
- cx231xx_info("%s: v4l2 driver version %d.%d.%d\n",
- dev->name,
- (CX231XX_VERSION_CODE >> 16) & 0xff,
- (CX231XX_VERSION_CODE >> 8) & 0xff,
- CX231XX_VERSION_CODE & 0xff);
+ cx231xx_info("%s: v4l2 driver version %s\n",
+ dev->name, CX231XX_VERSION);
/* set default norm */
/*dev->norm = cx231xx_video_template.current_norm; */
diff --git a/drivers/media/video/cx231xx/cx231xx.h b/drivers/media/video/cx231xx/cx231xx.h
index 46dd840..2000bc6 100644
--- a/drivers/media/video/cx231xx/cx231xx.h
+++ b/drivers/media/video/cx231xx/cx231xx.h
@@ -43,7 +43,7 @@
#include "cx231xx-conf-reg.h"
#define DRIVER_NAME "cx231xx"
-#define PWR_SLEEP_INTERVAL 5
+#define PWR_SLEEP_INTERVAL 10
/* I2C addresses for control block in Cx231xx */
#define AFE_DEVICE_ADDRESS 0x60
@@ -67,6 +67,8 @@
#define CX231XX_BOARD_PV_XCAPTURE_USB 11
#define CX231XX_BOARD_KWORLD_UB430_USB_HYBRID 12
#define CX231XX_BOARD_ICONBIT_U100 13
+#define CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL 14
+#define CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC 15
/* Limits minimum and default number of buffers */
#define CX231XX_MIN_BUF 4
@@ -112,7 +114,6 @@
V4L2_STD_PAL_BG | V4L2_STD_PAL_DK | V4L2_STD_PAL_I | \
V4L2_STD_PAL_M | V4L2_STD_PAL_N | V4L2_STD_PAL_Nc | \
V4L2_STD_PAL_60 | V4L2_STD_SECAM_L | V4L2_STD_SECAM_DK)
-#define CX231xx_VERSION_CODE KERNEL_VERSION(0, 0, 2)
#define SLEEP_S5H1432 30
#define CX23417_OSC_EN 8
diff --git a/drivers/media/video/cx23885/altera-ci.c b/drivers/media/video/cx23885/altera-ci.c
index 678539b..1fa8927 100644
--- a/drivers/media/video/cx23885/altera-ci.c
+++ b/drivers/media/video/cx23885/altera-ci.c
@@ -52,7 +52,6 @@
* | DATA7| DATA6| DATA5| DATA4| DATA3| DATA2| DATA1| DATA0|
* +-------+-------+-------+-------+-------+-------+-------+-------+
*/
-#include <linux/version.h>
#include <media/videobuf-dma-sg.h>
#include <media/videobuf-dvb.h>
#include "altera-ci.h"
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index 9a98dc5..67c4a59 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -1359,7 +1359,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->card, cx23885_boards[tsport->dev->board].name,
sizeof(cap->card));
sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
- cap->version = CX23885_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 934185c..76b7563 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -29,11 +29,17 @@
#include "../../../staging/altera-stapl/altera.h"
#include "cx23885.h"
#include "tuner-xc2028.h"
+#include "netup-eeprom.h"
#include "netup-init.h"
#include "altera-ci.h"
+#include "xc4000.h"
#include "xc5000.h"
#include "cx23888-ir.h"
+static unsigned int netup_card_rev = 1;
+module_param(netup_card_rev, int, 0644);
+MODULE_PARM_DESC(netup_card_rev,
+ "NetUP Dual DVB-T/C CI card revision");
static unsigned int enable_885_ir;
module_param(enable_885_ir, int, 0644);
MODULE_PARM_DESC(enable_885_ir,
@@ -175,6 +181,34 @@ struct cx23885_board cx23885_boards[] = {
.name = "Leadtek Winfast PxDVR3200 H",
.portc = CX23885_MPEG_DVB,
},
+ [CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000] = {
+ .name = "Leadtek Winfast PxDVR3200 H XC4000",
+ .porta = CX23885_ANALOG_VIDEO,
+ .portc = CX23885_MPEG_DVB,
+ .tuner_type = TUNER_XC4000,
+ .tuner_addr = 0x61,
+ .radio_type = TUNER_XC4000,
+ .radio_addr = 0x61,
+ .input = {{
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_VIN2_CH1 |
+ CX25840_VIN5_CH2 |
+ CX25840_NONE0_CH3,
+ }, {
+ .type = CX23885_VMUX_COMPOSITE1,
+ .vmux = CX25840_COMPOSITE1,
+ }, {
+ .type = CX23885_VMUX_SVIDEO,
+ .vmux = CX25840_SVIDEO_LUMA3 |
+ CX25840_SVIDEO_CHROMA4,
+ }, {
+ .type = CX23885_VMUX_COMPONENT,
+ .vmux = CX25840_VIN7_CH1 |
+ CX25840_VIN6_CH2 |
+ CX25840_VIN8_CH3 |
+ CX25840_COMPONENT_ON,
+ } },
+ },
[CX23885_BOARD_COMPRO_VIDEOMATE_E650F] = {
.name = "Compro VideoMate E650F",
.portc = CX23885_MPEG_DVB,
@@ -433,6 +467,10 @@ struct cx23885_subid cx23885_subids[] = {
.subdevice = 0x6681,
.card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H,
}, {
+ .subvendor = 0x107d,
+ .subdevice = 0x6f39,
+ .card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000,
+ }, {
.subvendor = 0x185b,
.subdevice = 0xe800,
.card = CX23885_BOARD_COMPRO_VIDEOMATE_E650F,
@@ -749,6 +787,7 @@ int cx23885_tuner_callback(void *priv, int component, int command, int arg)
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
+ case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200:
@@ -909,6 +948,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_set(GP0_IO, 0x000f000f);
break;
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
+ case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200:
@@ -1097,12 +1137,19 @@ int cx23885_ir_init(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1800:
case CX23885_BOARD_HAUPPAUGE_HVR1200:
case CX23885_BOARD_HAUPPAUGE_HVR1400:
- case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
/* FIXME: Implement me */
break;
+ case CX23885_BOARD_HAUPPAUGE_HVR1270:
+ ret = cx23888_ir_probe(dev);
+ if (ret)
+ break;
+ dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_888_IR);
+ v4l2_subdev_call(dev->sd_cx25840, core, s_io_pin_config,
+ ir_rx_pin_cfg_count, ir_rx_pin_cfg);
+ break;
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
ret = cx23888_ir_probe(dev);
@@ -1156,6 +1203,7 @@ int cx23885_ir_init(struct cx23885_dev *dev)
void cx23885_ir_fini(struct cx23885_dev *dev)
{
switch (dev->board) {
+ case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
cx23885_irq_remove(dev, PCI_MSK_IR);
@@ -1199,6 +1247,7 @@ int netup_jtag_io(void *device, int tms, int tdi, int read_tdo)
void cx23885_ir_pci_int_enable(struct cx23885_dev *dev)
{
switch (dev->board) {
+ case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
if (dev->sd_ir)
@@ -1325,6 +1374,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1700:
case CX23885_BOARD_HAUPPAUGE_HVR1400:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
+ case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
@@ -1353,10 +1403,12 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
case CX23885_BOARD_HAUPPAUGE_HVR1700:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
+ case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
+ case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_MYGICA_X8506:
case CX23885_BOARD_MAGICPRO_PROHDTVE2:
@@ -1383,6 +1435,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
const struct firmware *fw;
const char *filename = "dvb-netup-altera-01.fw";
char *action = "configure";
+ static struct netup_card_info cinfo;
struct altera_config netup_config = {
.dev = dev,
.action = action,
@@ -1391,6 +1444,21 @@ void cx23885_card_setup(struct cx23885_dev *dev)
netup_initialize(dev);
+ netup_get_card_info(&dev->i2c_bus[0].i2c_adap, &cinfo);
+ if (netup_card_rev)
+ cinfo.rev = netup_card_rev;
+
+ switch (cinfo.rev) {
+ case 0x4:
+ filename = "dvb-netup-altera-04.fw";
+ break;
+ default:
+ filename = "dvb-netup-altera-01.fw";
+ break;
+ }
+ printk(KERN_INFO "NetUP card rev=0x%x fw_filename=%s\n",
+ cinfo.rev, filename);
+
ret = request_firmware(&fw, filename, &dev->pci->dev);
if (ret != 0)
printk(KERN_ERR "did not find the firmware file. (%s) "
diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c
index 419777a..ee41a88 100644
--- a/drivers/media/video/cx23885/cx23885-core.c
+++ b/drivers/media/video/cx23885/cx23885-core.c
@@ -42,6 +42,7 @@
MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
MODULE_LICENSE("GPL");
+MODULE_VERSION(CX23885_VERSION);
static unsigned int debug;
module_param(debug, int, 0644);
@@ -2147,14 +2148,8 @@ static struct pci_driver cx23885_pci_driver = {
static int __init cx23885_init(void)
{
- printk(KERN_INFO "cx23885 driver version %d.%d.%d loaded\n",
- (CX23885_VERSION_CODE >> 16) & 0xff,
- (CX23885_VERSION_CODE >> 8) & 0xff,
- CX23885_VERSION_CODE & 0xff);
-#ifdef SNAPSHOT
- printk(KERN_INFO "cx23885: snapshot date %04d-%02d-%02d\n",
- SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
-#endif
+ printk(KERN_INFO "cx23885 driver version %s loaded\n",
+ CX23885_VERSION);
return pci_register_driver(&cx23885_pci_driver);
}
@@ -2165,5 +2160,3 @@ static void __exit cx23885_fini(void)
module_init(cx23885_init);
module_exit(cx23885_fini);
-
-/* ----------------------------------------------------------- */
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index 3c315f9..aa83f07 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -37,6 +37,7 @@
#include "tda8290.h"
#include "tda18271.h"
#include "lgdt330x.h"
+#include "xc4000.h"
#include "xc5000.h"
#include "max2165.h"
#include "tda10048.h"
@@ -921,6 +922,26 @@ static int dvb_register(struct cx23885_tsport *port)
fe->ops.tuner_ops.set_config(fe, &ctl);
}
break;
+ case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
+ i2c_bus = &dev->i2c_bus[0];
+
+ fe0->dvb.frontend = dvb_attach(zl10353_attach,
+ &dvico_fusionhdtv_xc3028,
+ &i2c_bus->i2c_adap);
+ if (fe0->dvb.frontend != NULL) {
+ struct dvb_frontend *fe;
+ struct xc4000_config cfg = {
+ .i2c_address = 0x61,
+ .default_pm = 0,
+ .dvb_amplitude = 134,
+ .set_smoothedcvbs = 1,
+ .if_khz = 4560
+ };
+
+ fe = dvb_attach(xc4000_attach, fe0->dvb.frontend,
+ &dev->i2c_bus[1].i2c_adap, &cfg);
+ }
+ break;
case CX23885_BOARD_TBS_6920:
i2c_bus = &dev->i2c_bus[1];
@@ -1249,7 +1270,7 @@ int cx23885_dvb_unregister(struct cx23885_tsport *port)
* implement MFE support.
*/
fe0 = videobuf_dvb_get_frontend(&port->frontends, 1);
- if (fe0->dvb.frontend)
+ if (fe0 && fe0->dvb.frontend)
videobuf_dvb_unregister_bus(&port->frontends);
switch (port->dev->board) {
diff --git a/drivers/media/video/cx23885/cx23885-input.c b/drivers/media/video/cx23885/cx23885-input.c
index e97cafd..ce765e3 100644
--- a/drivers/media/video/cx23885/cx23885-input.c
+++ b/drivers/media/video/cx23885/cx23885-input.c
@@ -82,6 +82,7 @@ void cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events)
return;
switch (dev->board) {
+ case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_TEVII_S470:
@@ -133,6 +134,7 @@ static int cx23885_input_ir_start(struct cx23885_dev *dev)
v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, &params);
switch (dev->board) {
+ case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
@@ -229,6 +231,9 @@ static void cx23885_input_ir_stop(struct cx23885_dev *dev)
v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, &params);
v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, &params);
}
+ flush_work_sync(&dev->cx25840_work);
+ flush_work_sync(&dev->ir_rx_work);
+ flush_work_sync(&dev->ir_tx_work);
}
static void cx23885_input_ir_close(struct rc_dev *rc)
@@ -257,6 +262,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
return -ENODEV;
switch (dev->board) {
+ case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index ee57f6b..896bb32 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -1000,7 +1000,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->card, cx23885_boards[dev->board].name,
sizeof(cap->card));
sprintf(cap->bus_info, "PCIe:%s", pci_name(dev->pci));
- cap->version = CX23885_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index c186473..d86bc0b 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -36,10 +36,9 @@
#include "cx23885-reg.h"
#include "media/cx2341x.h"
-#include <linux/version.h>
#include <linux/mutex.h>
-#define CX23885_VERSION_CODE KERNEL_VERSION(0, 0, 2)
+#define CX23885_VERSION "0.0.3"
#define UNSET (-1U)
@@ -86,6 +85,7 @@
#define CX23885_BOARD_LEADTEK_WINFAST_PXTV1200 28
#define CX23885_BOARD_GOTVIEW_X5_3D_HYBRID 29
#define CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF 30
+#define CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000 31
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
index 423c1af..68d1240 100644
--- a/drivers/media/video/cx88/cx88-alsa.c
+++ b/drivers/media/video/cx88/cx88-alsa.c
@@ -113,6 +113,8 @@ MODULE_DESCRIPTION("ALSA driver module for cx2388x based TV cards");
MODULE_AUTHOR("Ricardo Cerqueira");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");
+MODULE_VERSION(CX88_VERSION);
+
MODULE_SUPPORTED_DEVICE("{{Conexant,23881},"
"{{Conexant,23882},"
"{{Conexant,23883}");
@@ -973,14 +975,8 @@ static struct pci_driver cx88_audio_pci_driver = {
*/
static int __init cx88_audio_init(void)
{
- printk(KERN_INFO "cx2388x alsa driver version %d.%d.%d loaded\n",
- (CX88_VERSION_CODE >> 16) & 0xff,
- (CX88_VERSION_CODE >> 8) & 0xff,
- CX88_VERSION_CODE & 0xff);
-#ifdef SNAPSHOT
- printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
- SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
-#endif
+ printk(KERN_INFO "cx2388x alsa driver version %s loaded\n",
+ CX88_VERSION);
return pci_register_driver(&cx88_audio_pci_driver);
}
@@ -994,10 +990,3 @@ static void __exit cx88_audio_fini(void)
module_init(cx88_audio_init);
module_exit(cx88_audio_fini);
-
-/* ----------------------------------------------------------- */
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index 11e49bb..e46446a 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -42,6 +42,7 @@
MODULE_DESCRIPTION("driver for cx2388x/cx23416 based mpeg encoder cards");
MODULE_AUTHOR("Jelle Foks <jelle@foks.us>, Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
+MODULE_VERSION(CX88_VERSION);
static unsigned int mpegbufs = 32;
module_param(mpegbufs,int,0644);
@@ -730,7 +731,6 @@ static int vidioc_querycap (struct file *file, void *priv,
strcpy(cap->driver, "cx88_blackbird");
strlcpy(cap->card, core->board.name, sizeof(cap->card));
sprintf(cap->bus_info,"PCI:%s",pci_name(dev->pci));
- cap->version = CX88_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
@@ -1368,14 +1368,8 @@ static struct cx8802_driver cx8802_blackbird_driver = {
static int __init blackbird_init(void)
{
- printk(KERN_INFO "cx2388x blackbird driver version %d.%d.%d loaded\n",
- (CX88_VERSION_CODE >> 16) & 0xff,
- (CX88_VERSION_CODE >> 8) & 0xff,
- CX88_VERSION_CODE & 0xff);
-#ifdef SNAPSHOT
- printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
- SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
-#endif
+ printk(KERN_INFO "cx2388x blackbird driver version %s loaded\n",
+ CX88_VERSION);
return cx8802_register_driver(&cx8802_blackbird_driver);
}
@@ -1389,11 +1383,3 @@ module_exit(blackbird_fini);
module_param_named(video_debug,cx8802_mpeg_template.debug, int, 0644);
MODULE_PARM_DESC(debug,"enable debug messages [video]");
-
-/* ----------------------------------------------------------- */
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- * kate: eol "unix"; indent-width 3; remove-trailing-space on; replace-trailing-space-save on; tab-width 8; replace-tabs off; space-indent off; mixed-indent off
- */
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 27222c9..0d719fa 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -28,6 +28,7 @@
#include "cx88.h"
#include "tea5767.h"
+#include "xc4000.h"
static unsigned int tuner[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
static unsigned int radio[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
@@ -2119,6 +2120,99 @@ static const struct cx88_board cx88_boards[] = {
},
.mpeg = CX88_MPEG_DVB,
},
+ [CX88_BOARD_WINFAST_DTV1800H_XC4000] = {
+ .name = "Leadtek WinFast DTV1800 H (XC4000)",
+ .tuner_type = TUNER_XC4000,
+ .radio_type = TUNER_XC4000,
+ .tuner_addr = 0x61,
+ .radio_addr = 0x61,
+ /*
+ * GPIO setting
+ *
+ * 2: mute (0=off,1=on)
+ * 12: tuner reset pin
+ * 13: audio source (0=tuner audio,1=line in)
+ * 14: FM (0=on,1=off ???)
+ */
+ .input = {{
+ .type = CX88_VMUX_TELEVISION,
+ .vmux = 0,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x6040, /* pin 13 = 0, pin 14 = 1 */
+ .gpio2 = 0x0000,
+ }, {
+ .type = CX88_VMUX_COMPOSITE1,
+ .vmux = 1,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x6060, /* pin 13 = 1, pin 14 = 1 */
+ .gpio2 = 0x0000,
+ }, {
+ .type = CX88_VMUX_SVIDEO,
+ .vmux = 2,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x6060, /* pin 13 = 1, pin 14 = 1 */
+ .gpio2 = 0x0000,
+ }},
+ .radio = {
+ .type = CX88_RADIO,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x6000, /* pin 13 = 0, pin 14 = 0 */
+ .gpio2 = 0x0000,
+ },
+ .mpeg = CX88_MPEG_DVB,
+ },
+ [CX88_BOARD_WINFAST_DTV2000H_PLUS] = {
+ .name = "Leadtek WinFast DTV2000 H PLUS",
+ .tuner_type = TUNER_XC4000,
+ .radio_type = TUNER_XC4000,
+ .tuner_addr = 0x61,
+ .radio_addr = 0x61,
+ /*
+ * GPIO
+ * 2: 1: mute audio
+ * 12: 0: reset XC4000
+ * 13: 1: audio input is line in (0: tuner)
+ * 14: 0: FM radio
+ * 16: 0: RF input is cable
+ */
+ .input = {{
+ .type = CX88_VMUX_TELEVISION,
+ .vmux = 0,
+ .gpio0 = 0x0403,
+ .gpio1 = 0xF0D7,
+ .gpio2 = 0x0101,
+ .gpio3 = 0x0000,
+ }, {
+ .type = CX88_VMUX_CABLE,
+ .vmux = 0,
+ .gpio0 = 0x0403,
+ .gpio1 = 0xF0D7,
+ .gpio2 = 0x0100,
+ .gpio3 = 0x0000,
+ }, {
+ .type = CX88_VMUX_COMPOSITE1,
+ .vmux = 1,
+ .gpio0 = 0x0403, /* was 0x0407 */
+ .gpio1 = 0xF0F7,
+ .gpio2 = 0x0101,
+ .gpio3 = 0x0000,
+ }, {
+ .type = CX88_VMUX_SVIDEO,
+ .vmux = 2,
+ .gpio0 = 0x0403, /* was 0x0407 */
+ .gpio1 = 0xF0F7,
+ .gpio2 = 0x0101,
+ .gpio3 = 0x0000,
+ }},
+ .radio = {
+ .type = CX88_RADIO,
+ .gpio0 = 0x0403,
+ .gpio1 = 0xF097,
+ .gpio2 = 0x0100,
+ .gpio3 = 0x0000,
+ },
+ .mpeg = CX88_MPEG_DVB,
+ },
[CX88_BOARD_PROF_7301] = {
.name = "Prof 7301 DVB-S/S2",
.tuner_type = UNSET,
@@ -2581,6 +2675,15 @@ static const struct cx88_subid cx88_subids[] = {
.subdevice = 0x6654,
.card = CX88_BOARD_WINFAST_DTV1800H,
}, {
+ /* WinFast DTV1800 H with XC4000 tuner */
+ .subvendor = 0x107d,
+ .subdevice = 0x6f38,
+ .card = CX88_BOARD_WINFAST_DTV1800H_XC4000,
+ }, {
+ .subvendor = 0x107d,
+ .subdevice = 0x6f42,
+ .card = CX88_BOARD_WINFAST_DTV2000H_PLUS,
+ }, {
/* PVR2000 PAL Model [107d:6630] */
.subvendor = 0x107d,
.subdevice = 0x6630,
@@ -2846,6 +2949,23 @@ static int cx88_xc3028_winfast1800h_callback(struct cx88_core *core,
return -EINVAL;
}
+static int cx88_xc4000_winfast2000h_plus_callback(struct cx88_core *core,
+ int command, int arg)
+{
+ switch (command) {
+ case XC4000_TUNER_RESET:
+ /* GPIO 12 (xc4000 tuner reset) */
+ cx_set(MO_GP1_IO, 0x1010);
+ mdelay(50);
+ cx_clear(MO_GP1_IO, 0x10);
+ mdelay(75);
+ cx_set(MO_GP1_IO, 0x10);
+ mdelay(75);
+ return 0;
+ }
+ return -EINVAL;
+}
+
/* ------------------------------------------------------------------- */
/* some Divco specific stuff */
static int cx88_pv_8000gt_callback(struct cx88_core *core,
@@ -2948,6 +3068,19 @@ static int cx88_xc2028_tuner_callback(struct cx88_core *core,
return -EINVAL;
}
+static int cx88_xc4000_tuner_callback(struct cx88_core *core,
+ int command, int arg)
+{
+ /* Board-specific callbacks */
+ switch (core->boardnr) {
+ case CX88_BOARD_WINFAST_DTV1800H_XC4000:
+ case CX88_BOARD_WINFAST_DTV2000H_PLUS:
+ return cx88_xc4000_winfast2000h_plus_callback(core,
+ command, arg);
+ }
+ return -EINVAL;
+}
+
/* ----------------------------------------------------------------------- */
/* Tuner callback function. Currently only needed for the Pinnacle *
* PCTV HD 800i with an xc5000 sillicon tuner. This is used for both *
@@ -3022,6 +3155,9 @@ int cx88_tuner_callback(void *priv, int component, int command, int arg)
case TUNER_XC2028:
info_printk(core, "Calling XC2028/3028 callback\n");
return cx88_xc2028_tuner_callback(core, command, arg);
+ case TUNER_XC4000:
+ info_printk(core, "Calling XC4000 callback\n");
+ return cx88_xc4000_tuner_callback(core, command, arg);
case TUNER_XC5000:
info_printk(core, "Calling XC5000 callback\n");
return cx88_xc5000_tuner_callback(core, command, arg);
@@ -3109,13 +3245,13 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_WINFAST_DTV1800H:
- /* GPIO 12 (xc3028 tuner reset) */
- cx_set(MO_GP1_IO, 0x1010);
- mdelay(50);
- cx_clear(MO_GP1_IO, 0x10);
- mdelay(50);
- cx_set(MO_GP1_IO, 0x10);
- mdelay(50);
+ cx88_xc3028_winfast1800h_callback(core, XC2028_TUNER_RESET, 0);
+ break;
+
+ case CX88_BOARD_WINFAST_DTV1800H_XC4000:
+ case CX88_BOARD_WINFAST_DTV2000H_PLUS:
+ cx88_xc4000_winfast2000h_plus_callback(core,
+ XC4000_TUNER_RESET, 0);
break;
case CX88_BOARD_TWINHAN_VP1027_DVBS:
diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c
index 2e145f0..fbcaa1c 100644
--- a/drivers/media/video/cx88/cx88-core.c
+++ b/drivers/media/video/cx88/cx88-core.c
@@ -636,6 +636,9 @@ int cx88_reset(struct cx88_core *core)
cx_write(MO_PCI_INTSTAT, 0xFFFFFFFF); // Clear PCI int
cx_write(MO_INT1_STAT, 0xFFFFFFFF); // Clear RISC int
+ /* set default notch filter */
+ cx_andor(MO_HTOTAL, 0x1800, (HLNotchFilter4xFsc << 11));
+
/* Reset on-board parts */
cx_write(MO_SRST_IO, 0);
msleep(10);
@@ -759,8 +762,8 @@ int cx88_set_scale(struct cx88_core *core, unsigned int width, unsigned int heig
if (nocomb)
value |= (3 << 5); // disable comb filter
- cx_write(MO_FILTER_EVEN, value);
- cx_write(MO_FILTER_ODD, value);
+ cx_andor(MO_FILTER_EVEN, 0x7ffc7f, value); /* preserve PEAKEN, PSEL */
+ cx_andor(MO_FILTER_ODD, 0x7ffc7f, value);
dprintk(1,"set_scale: filter 0x%04x\n", value);
return 0;
@@ -994,10 +997,10 @@ int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm)
// htotal
tmp64 = norm_htotal(norm) * (u64)vdec_clock;
do_div(tmp64, fsc8);
- htotal = (u32)tmp64 | (HLNotchFilter4xFsc << 11);
+ htotal = (u32)tmp64;
dprintk(1,"set_tvnorm: MO_HTOTAL 0x%08x [old=0x%08x,htotal=%d]\n",
htotal, cx_read(MO_HTOTAL), (u32)tmp64);
- cx_write(MO_HTOTAL, htotal);
+ cx_andor(MO_HTOTAL, 0x07ff, htotal);
// vbi stuff, set vbi offset to 10 (for 20 Clk*2 pixels), this makes
// the effective vbi offset ~244 samples, the same as the Bt8x8
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index c69df7e..cf3d33a 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -41,6 +41,7 @@
#include "or51132.h"
#include "lgdt330x.h"
#include "s5h1409.h"
+#include "xc4000.h"
#include "xc5000.h"
#include "nxt200x.h"
#include "cx24123.h"
@@ -63,6 +64,7 @@ MODULE_DESCRIPTION("driver for cx2388x based DVB cards");
MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
+MODULE_VERSION(CX88_VERSION);
static unsigned int debug;
module_param(debug, int, 0644);
@@ -605,6 +607,39 @@ static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
return 0;
}
+static int attach_xc4000(struct cx8802_dev *dev, struct xc4000_config *cfg)
+{
+ struct dvb_frontend *fe;
+ struct videobuf_dvb_frontend *fe0 = NULL;
+
+ /* Get the first frontend */
+ fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1);
+ if (!fe0)
+ return -EINVAL;
+
+ if (!fe0->dvb.frontend) {
+ printk(KERN_ERR "%s/2: dvb frontend not attached. "
+ "Can't attach xc4000\n",
+ dev->core->name);
+ return -EINVAL;
+ }
+
+ fe = dvb_attach(xc4000_attach, fe0->dvb.frontend, &dev->core->i2c_adap,
+ cfg);
+ if (!fe) {
+ printk(KERN_ERR "%s/2: xc4000 attach failed\n",
+ dev->core->name);
+ dvb_frontend_detach(fe0->dvb.frontend);
+ dvb_unregister_frontend(fe0->dvb.frontend);
+ fe0->dvb.frontend = NULL;
+ return -EINVAL;
+ }
+
+ printk(KERN_INFO "%s/2: xc4000 attached\n", dev->core->name);
+
+ return 0;
+}
+
static int cx24116_set_ts_param(struct dvb_frontend *fe,
int is_punctured)
{
@@ -1294,7 +1329,25 @@ static int dvb_register(struct cx8802_dev *dev)
goto frontend_detach;
}
break;
- case CX88_BOARD_GENIATECH_X8000_MT:
+ case CX88_BOARD_WINFAST_DTV1800H_XC4000:
+ case CX88_BOARD_WINFAST_DTV2000H_PLUS:
+ fe0->dvb.frontend = dvb_attach(zl10353_attach,
+ &cx88_pinnacle_hybrid_pctv,
+ &core->i2c_adap);
+ if (fe0->dvb.frontend) {
+ struct xc4000_config cfg = {
+ .i2c_address = 0x61,
+ .default_pm = 0,
+ .dvb_amplitude = 134,
+ .set_smoothedcvbs = 1,
+ .if_khz = 4560
+ };
+ fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL;
+ if (attach_xc4000(dev, &cfg) < 0)
+ goto frontend_detach;
+ }
+ break;
+ case CX88_BOARD_GENIATECH_X8000_MT:
dev->ts_gen_cntrl = 0x00;
fe0->dvb.frontend = dvb_attach(zl10353_attach,
@@ -1577,6 +1630,11 @@ static int cx8802_dvb_advise_acquire(struct cx8802_driver *drv)
udelay(1000);
break;
+ case CX88_BOARD_WINFAST_DTV2000H_PLUS:
+ /* set RF input to AIR for DVB-T (GPIO 16) */
+ cx_write(MO_GP2_IO, 0x0101);
+ break;
+
default:
err = -ENODEV;
}
@@ -1692,14 +1750,8 @@ static struct cx8802_driver cx8802_dvb_driver = {
static int __init dvb_init(void)
{
- printk(KERN_INFO "cx88/2: cx2388x dvb driver version %d.%d.%d loaded\n",
- (CX88_VERSION_CODE >> 16) & 0xff,
- (CX88_VERSION_CODE >> 8) & 0xff,
- CX88_VERSION_CODE & 0xff);
-#ifdef SNAPSHOT
- printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
- SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
-#endif
+ printk(KERN_INFO "cx88/2: cx2388x dvb driver version %s loaded\n",
+ CX88_VERSION);
return cx8802_register_driver(&cx8802_dvb_driver);
}
@@ -1710,10 +1762,3 @@ static void __exit dvb_fini(void)
module_init(dvb_init);
module_exit(dvb_fini);
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * compile-command: "make DVB=1"
- * End:
- */
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index 3f44200..e614201 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -100,6 +100,8 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
break;
case CX88_BOARD_WINFAST_DTV1000:
case CX88_BOARD_WINFAST_DTV1800H:
+ case CX88_BOARD_WINFAST_DTV1800H_XC4000:
+ case CX88_BOARD_WINFAST_DTV2000H_PLUS:
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
gpio = (gpio & 0x6ff) | ((cx_read(MO_GP1_IO) << 8) & 0x900);
auxgpio = gpio;
@@ -289,6 +291,8 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
case CX88_BOARD_WINFAST_DTV2000H:
case CX88_BOARD_WINFAST_DTV2000H_J:
case CX88_BOARD_WINFAST_DTV1800H:
+ case CX88_BOARD_WINFAST_DTV1800H_XC4000:
+ case CX88_BOARD_WINFAST_DTV2000H_PLUS:
ir_codes = RC_MAP_WINFAST;
ir->gpio_addr = MO_GP0_IO;
ir->mask_keycode = 0x8f8;
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index 1a7b983..cd5386e 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -39,6 +39,7 @@ MODULE_AUTHOR("Jelle Foks <jelle@foks.us>");
MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
+MODULE_VERSION(CX88_VERSION);
static unsigned int debug;
module_param(debug,int,0644);
@@ -613,13 +614,17 @@ static int cx8802_request_acquire(struct cx8802_driver *drv)
core->active_type_id != drv->type_id)
return -EBUSY;
- core->input = 0;
- for (i = 0;
- i < (sizeof(core->board.input) / sizeof(struct cx88_input));
- i++) {
- if (core->board.input[i].type == CX88_VMUX_DVB) {
- core->input = i;
- break;
+ if (drv->type_id == CX88_MPEG_DVB) {
+ /* When switching to DVB, always set the input to the tuner */
+ core->last_analog_input = core->input;
+ core->input = 0;
+ for (i = 0;
+ i < (sizeof(core->board.input) / sizeof(struct cx88_input));
+ i++) {
+ if (core->board.input[i].type == CX88_VMUX_DVB) {
+ core->input = i;
+ break;
+ }
}
}
@@ -644,6 +649,12 @@ static int cx8802_request_release(struct cx8802_driver *drv)
if (drv->advise_release && --core->active_ref == 0)
{
+ if (drv->type_id == CX88_MPEG_DVB) {
+ /* If the DVB driver is releasing, reset the input
+ state to the last configured analog input */
+ core->input = core->last_analog_input;
+ }
+
drv->advise_release(drv);
core->active_type_id = CX88_BOARD_NONE;
mpeg_dbg(1,"%s() Post release GPIO=%x\n", __func__, cx_read(MO_GP0_IO));
@@ -890,14 +901,8 @@ static struct pci_driver cx8802_pci_driver = {
static int __init cx8802_init(void)
{
- printk(KERN_INFO "cx88/2: cx2388x MPEG-TS Driver Manager version %d.%d.%d loaded\n",
- (CX88_VERSION_CODE >> 16) & 0xff,
- (CX88_VERSION_CODE >> 8) & 0xff,
- CX88_VERSION_CODE & 0xff);
-#ifdef SNAPSHOT
- printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
- SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
-#endif
+ printk(KERN_INFO "cx88/2: cx2388x MPEG-TS Driver Manager version %s loaded\n",
+ CX88_VERSION);
return pci_register_driver(&cx8802_pci_driver);
}
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index cef4f28..60d28fd 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -45,6 +45,7 @@
MODULE_DESCRIPTION("v4l2 driver module for cx2388x based TV cards");
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
+MODULE_VERSION(CX88_VERSION);
/* ------------------------------------------------------------------ */
@@ -220,7 +221,23 @@ static const struct cx88_ctrl cx8800_ctls[] = {
.reg = MO_UV_SATURATION,
.mask = 0x00ff,
.shift = 0,
- },{
+ }, {
+ .v = {
+ .id = V4L2_CID_SHARPNESS,
+ .name = "Sharpness",
+ .minimum = 0,
+ .maximum = 4,
+ .step = 1,
+ .default_value = 0x0,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ },
+ .off = 0,
+ /* NOTE: the value is converted and written to both even
+ and odd registers in the code */
+ .reg = MO_FILTER_ODD,
+ .mask = 7 << 7,
+ .shift = 7,
+ }, {
.v = {
.id = V4L2_CID_CHROMA_AGC,
.name = "Chroma AGC",
@@ -245,6 +262,20 @@ static const struct cx88_ctrl cx8800_ctls[] = {
.mask = 1 << 9,
.shift = 9,
}, {
+ .v = {
+ .id = V4L2_CID_BAND_STOP_FILTER,
+ .name = "Notch filter",
+ .minimum = 0,
+ .maximum = 3,
+ .step = 1,
+ .default_value = 0x0,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ },
+ .off = 0,
+ .reg = MO_HTOTAL,
+ .mask = 3 << 11,
+ .shift = 11,
+ }, {
/* --- audio --- */
.v = {
.id = V4L2_CID_AUDIO_MUTE,
@@ -300,8 +331,10 @@ const u32 cx88_user_ctrls[] = {
V4L2_CID_AUDIO_VOLUME,
V4L2_CID_AUDIO_BALANCE,
V4L2_CID_AUDIO_MUTE,
+ V4L2_CID_SHARPNESS,
V4L2_CID_CHROMA_AGC,
V4L2_CID_COLOR_KILLER,
+ V4L2_CID_BAND_STOP_FILTER,
0
};
EXPORT_SYMBOL(cx88_user_ctrls);
@@ -962,6 +995,10 @@ int cx88_get_control (struct cx88_core *core, struct v4l2_control *ctl)
case V4L2_CID_AUDIO_VOLUME:
ctl->value = 0x3f - (value & 0x3f);
break;
+ case V4L2_CID_SHARPNESS:
+ ctl->value = ((value & 0x0200) ? (((value & 0x0180) >> 7) + 1)
+ : 0);
+ break;
default:
ctl->value = ((value + (c->off << c->shift)) & c->mask) >> c->shift;
break;
@@ -1039,6 +1076,12 @@ int cx88_set_control(struct cx88_core *core, struct v4l2_control *ctl)
}
mask=0xffff;
break;
+ case V4L2_CID_SHARPNESS:
+ /* 0b000, 0b100, 0b101, 0b110, or 0b111 */
+ value = (ctl->value < 1 ? 0 : ((ctl->value + 3) << 7));
+ /* needs to be set for both fields */
+ cx_andor(MO_FILTER_EVEN, mask, value);
+ break;
case V4L2_CID_CHROMA_AGC:
/* Do not allow chroma AGC to be enabled for SECAM */
value = ((ctl->value - c->off) << c->shift) & c->mask;
@@ -1161,7 +1204,6 @@ static int vidioc_querycap (struct file *file, void *priv,
strcpy(cap->driver, "cx8800");
strlcpy(cap->card, core->board.name, sizeof(cap->card));
sprintf(cap->bus_info,"PCI:%s",pci_name(dev->pci));
- cap->version = CX88_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
@@ -1480,7 +1522,6 @@ static int radio_querycap (struct file *file, void *priv,
strcpy(cap->driver, "cx8800");
strlcpy(cap->card, core->board.name, sizeof(cap->card));
sprintf(cap->bus_info,"PCI:%s", pci_name(dev->pci));
- cap->version = CX88_VERSION_CODE;
cap->capabilities = V4L2_CAP_TUNER;
return 0;
}
@@ -2139,14 +2180,8 @@ static struct pci_driver cx8800_pci_driver = {
static int __init cx8800_init(void)
{
- printk(KERN_INFO "cx88/0: cx2388x v4l2 driver version %d.%d.%d loaded\n",
- (CX88_VERSION_CODE >> 16) & 0xff,
- (CX88_VERSION_CODE >> 8) & 0xff,
- CX88_VERSION_CODE & 0xff);
-#ifdef SNAPSHOT
- printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
- SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
-#endif
+ printk(KERN_INFO "cx88/0: cx2388x v4l2 driver version %s loaded\n",
+ CX88_VERSION);
return pci_register_driver(&cx8800_pci_driver);
}
@@ -2157,11 +2192,3 @@ static void __exit cx8800_fini(void)
module_init(cx8800_init);
module_exit(cx8800_fini);
-
-/* ----------------------------------------------------------- */
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- * kate: eol "unix"; indent-width 3; remove-trailing-space on; replace-trailing-space-save on; tab-width 8; replace-tabs off; space-indent off; mixed-indent off
- */
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index a399a8b..fa8d307 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -39,9 +39,9 @@
#include "cx88-reg.h"
#include "tuner-xc2028.h"
-#include <linux/version.h>
#include <linux/mutex.h>
-#define CX88_VERSION_CODE KERNEL_VERSION(0, 0, 8)
+
+#define CX88_VERSION "0.0.9"
#define UNSET (-1U)
@@ -242,6 +242,8 @@ extern const struct sram_channel const cx88_sram_channels[];
#define CX88_BOARD_SAMSUNG_SMT_7020 84
#define CX88_BOARD_TWINHAN_VP1027_DVBS 85
#define CX88_BOARD_TEVII_S464 86
+#define CX88_BOARD_WINFAST_DTV2000H_PLUS 87
+#define CX88_BOARD_WINFAST_DTV1800H_XC4000 88
enum cx88_itype {
CX88_VMUX_COMPOSITE1 = 1,
@@ -375,6 +377,7 @@ struct cx88_core {
u32 audiomode_manual;
u32 audiomode_current;
u32 input;
+ u32 last_analog_input;
u32 astat;
u32 use_nicam;
unsigned long last_change;
diff --git a/drivers/media/video/davinci/Kconfig b/drivers/media/video/davinci/Kconfig
index 6b19540..60a456e 100644
--- a/drivers/media/video/davinci/Kconfig
+++ b/drivers/media/video/davinci/Kconfig
@@ -91,3 +91,26 @@ config VIDEO_ISIF
To compile this driver as a module, choose M here: the
module will be called vpfe.
+
+config VIDEO_DM644X_VPBE
+ tristate "DM644X VPBE HW module"
+ depends on ARCH_DAVINCI_DM644x
+ select VIDEO_VPSS_SYSTEM
+ select VIDEOBUF_DMA_CONTIG
+ help
+ Enables VPBE modules used for display on a DM644x
+ SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vpbe.
+
+
+config VIDEO_VPBE_DISPLAY
+ tristate "VPBE V4L2 Display driver"
+ depends on ARCH_DAVINCI_DM644x
+ select VIDEO_DM644X_VPBE
+ help
+ Enables VPBE V4L2 Display driver on a DM644x device
+
+ To compile this driver as a module, choose M here: the
+ module will be called vpbe_display.
diff --git a/drivers/media/video/davinci/Makefile b/drivers/media/video/davinci/Makefile
index a379557..ae7dafb 100644
--- a/drivers/media/video/davinci/Makefile
+++ b/drivers/media/video/davinci/Makefile
@@ -16,3 +16,5 @@ obj-$(CONFIG_VIDEO_VPFE_CAPTURE) += vpfe_capture.o
obj-$(CONFIG_VIDEO_DM6446_CCDC) += dm644x_ccdc.o
obj-$(CONFIG_VIDEO_DM355_CCDC) += dm355_ccdc.o
obj-$(CONFIG_VIDEO_ISIF) += isif.o
+obj-$(CONFIG_VIDEO_DM644X_VPBE) += vpbe.o vpbe_osd.o vpbe_venc.o
+obj-$(CONFIG_VIDEO_VPBE_DISPLAY) += vpbe_display.o
diff --git a/drivers/media/video/davinci/vpbe.c b/drivers/media/video/davinci/vpbe.c
new file mode 100644
index 0000000..d773d30
--- /dev/null
+++ b/drivers/media/video/davinci/vpbe.c
@@ -0,0 +1,864 @@
+/*
+ * Copyright (C) 2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <media/v4l2-device.h>
+#include <media/davinci/vpbe_types.h>
+#include <media/davinci/vpbe.h>
+#include <media/davinci/vpss.h>
+#include <media/davinci/vpbe_venc.h>
+
+#define VPBE_DEFAULT_OUTPUT "Composite"
+#define VPBE_DEFAULT_MODE "ntsc"
+
+static char *def_output = VPBE_DEFAULT_OUTPUT;
+static char *def_mode = VPBE_DEFAULT_MODE;
+static int debug;
+
+module_param(def_output, charp, S_IRUGO);
+module_param(def_mode, charp, S_IRUGO);
+module_param(debug, int, 0644);
+
+MODULE_PARM_DESC(def_output, "vpbe output name (default:Composite)");
+MODULE_PARM_DESC(def_mode, "vpbe output mode name (default:ntsc");
+MODULE_PARM_DESC(debug, "Debug level 0-1");
+
+MODULE_DESCRIPTION("TI DMXXX VPBE Display controller");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
+
+/**
+ * vpbe_current_encoder_info - Get config info for current encoder
+ * @vpbe_dev - vpbe device ptr
+ *
+ * Return ptr to current encoder config info
+ */
+static struct encoder_config_info*
+vpbe_current_encoder_info(struct vpbe_device *vpbe_dev)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int index = vpbe_dev->current_sd_index;
+
+ return ((index == 0) ? &cfg->venc :
+ &cfg->ext_encoders[index-1]);
+}
+
+/**
+ * vpbe_find_encoder_sd_index - Given a name find encoder sd index
+ *
+ * @vpbe_config - ptr to vpbe cfg
+ * @output_index - index used by application
+ *
+ * Return sd index of the encoder
+ */
+static int vpbe_find_encoder_sd_index(struct vpbe_config *cfg,
+ int index)
+{
+ char *encoder_name = cfg->outputs[index].subdev_name;
+ int i;
+
+ /* Venc is always first */
+ if (!strcmp(encoder_name, cfg->venc.module_name))
+ return 0;
+
+ for (i = 0; i < cfg->num_ext_encoders; i++) {
+ if (!strcmp(encoder_name,
+ cfg->ext_encoders[i].module_name))
+ return i+1;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * vpbe_g_cropcap - Get crop capabilities of the display
+ * @vpbe_dev - vpbe device ptr
+ * @cropcap - cropcap is a ptr to struct v4l2_cropcap
+ *
+ * Update the crop capabilities in crop cap for current
+ * mode
+ */
+static int vpbe_g_cropcap(struct vpbe_device *vpbe_dev,
+ struct v4l2_cropcap *cropcap)
+{
+ if (NULL == cropcap)
+ return -EINVAL;
+ cropcap->bounds.left = 0;
+ cropcap->bounds.top = 0;
+ cropcap->bounds.width = vpbe_dev->current_timings.xres;
+ cropcap->bounds.height = vpbe_dev->current_timings.yres;
+ cropcap->defrect = cropcap->bounds;
+
+ return 0;
+}
+
+/**
+ * vpbe_enum_outputs - enumerate outputs
+ * @vpbe_dev - vpbe device ptr
+ * @output - ptr to v4l2_output structure
+ *
+ * Enumerates the outputs available at the vpbe display
+ * returns the status, -EINVAL if end of output list
+ */
+static int vpbe_enum_outputs(struct vpbe_device *vpbe_dev,
+ struct v4l2_output *output)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int temp_index = output->index;
+
+ if (temp_index >= cfg->num_outputs)
+ return -EINVAL;
+
+ *output = cfg->outputs[temp_index].output;
+ output->index = temp_index;
+
+ return 0;
+}
+
+static int vpbe_get_mode_info(struct vpbe_device *vpbe_dev, char *mode)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct vpbe_enc_mode_info var;
+ int curr_output = vpbe_dev->current_out_index;
+ int i;
+
+ if (NULL == mode)
+ return -EINVAL;
+
+ for (i = 0; i < cfg->outputs[curr_output].num_modes; i++) {
+ var = cfg->outputs[curr_output].modes[i];
+ if (!strcmp(mode, var.name)) {
+ vpbe_dev->current_timings = var;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int vpbe_get_current_mode_info(struct vpbe_device *vpbe_dev,
+ struct vpbe_enc_mode_info *mode_info)
+{
+ if (NULL == mode_info)
+ return -EINVAL;
+
+ *mode_info = vpbe_dev->current_timings;
+
+ return 0;
+}
+
+static int vpbe_get_dv_preset_info(struct vpbe_device *vpbe_dev,
+ unsigned int dv_preset)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct vpbe_enc_mode_info var;
+ int curr_output = vpbe_dev->current_out_index;
+ int i;
+
+ for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) {
+ var = cfg->outputs[curr_output].modes[i];
+ if ((var.timings_type & VPBE_ENC_DV_PRESET) &&
+ (var.timings.dv_preset == dv_preset)) {
+ vpbe_dev->current_timings = var;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/* Get std by std id */
+static int vpbe_get_std_info(struct vpbe_device *vpbe_dev,
+ v4l2_std_id std_id)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct vpbe_enc_mode_info var;
+ int curr_output = vpbe_dev->current_out_index;
+ int i;
+
+ for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) {
+ var = cfg->outputs[curr_output].modes[i];
+ if ((var.timings_type & VPBE_ENC_STD) &&
+ (var.timings.std_id & std_id)) {
+ vpbe_dev->current_timings = var;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int vpbe_get_std_info_by_name(struct vpbe_device *vpbe_dev,
+ char *std_name)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct vpbe_enc_mode_info var;
+ int curr_output = vpbe_dev->current_out_index;
+ int i;
+
+ for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) {
+ var = cfg->outputs[curr_output].modes[i];
+ if (!strcmp(var.name, std_name)) {
+ vpbe_dev->current_timings = var;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * vpbe_set_output - Set output
+ * @vpbe_dev - vpbe device ptr
+ * @index - index of output
+ *
+ * Set vpbe output to the output specified by the index
+ */
+static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
+{
+ struct encoder_config_info *curr_enc_info =
+ vpbe_current_encoder_info(vpbe_dev);
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int enc_out_index;
+ int sd_index;
+ int ret = 0;
+
+ if (index >= cfg->num_outputs)
+ return -EINVAL;
+
+ mutex_lock(&vpbe_dev->lock);
+
+ sd_index = vpbe_dev->current_sd_index;
+ enc_out_index = cfg->outputs[index].output.index;
+ /*
+ * Currently we switch the encoder based on output selected
+ * by the application. If media controller is implemented later
+ * there is will be an API added to setup_link between venc
+ * and external encoder. So in that case below comparison always
+ * match and encoder will not be switched. But if application
+ * chose not to use media controller, then this provides current
+ * way of switching encoder at the venc output.
+ */
+ if (strcmp(curr_enc_info->module_name,
+ cfg->outputs[index].subdev_name)) {
+ /* Need to switch the encoder at the output */
+ sd_index = vpbe_find_encoder_sd_index(cfg, index);
+ if (sd_index < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ret)
+ goto out;
+ }
+
+ /* Set output at the encoder */
+ ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
+ s_routing, 0, enc_out_index, 0);
+ if (ret)
+ goto out;
+
+ /*
+ * It is assumed that venc or extenal encoder will set a default
+ * mode in the sub device. For external encoder or LCD pannel output,
+ * we also need to set up the lcd port for the required mode. So setup
+ * the lcd port for the default mode that is configured in the board
+ * arch/arm/mach-davinci/board-dm355-evm.setup file for the external
+ * encoder.
+ */
+ ret = vpbe_get_mode_info(vpbe_dev,
+ cfg->outputs[index].default_mode);
+ if (!ret) {
+ struct osd_state *osd_device = vpbe_dev->osd_device;
+
+ osd_device->ops.set_left_margin(osd_device,
+ vpbe_dev->current_timings.left_margin);
+ osd_device->ops.set_top_margin(osd_device,
+ vpbe_dev->current_timings.upper_margin);
+ vpbe_dev->current_sd_index = sd_index;
+ vpbe_dev->current_out_index = index;
+ }
+out:
+ mutex_unlock(&vpbe_dev->lock);
+ return ret;
+}
+
+static int vpbe_set_default_output(struct vpbe_device *vpbe_dev)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < cfg->num_outputs; i++) {
+ if (!strcmp(def_output,
+ cfg->outputs[i].output.name)) {
+ ret = vpbe_set_output(vpbe_dev, i);
+ if (!ret)
+ vpbe_dev->current_out_index = i;
+ return ret;
+ }
+ }
+ return ret;
+}
+
+/**
+ * vpbe_get_output - Get output
+ * @vpbe_dev - vpbe device ptr
+ *
+ * return current vpbe output to the the index
+ */
+static unsigned int vpbe_get_output(struct vpbe_device *vpbe_dev)
+{
+ return vpbe_dev->current_out_index;
+}
+
+/**
+ * vpbe_s_dv_preset - Set the given preset timings in the encoder
+ *
+ * Sets the preset if supported by the current encoder. Return the status.
+ * 0 - success & -EINVAL on error
+ */
+static int vpbe_s_dv_preset(struct vpbe_device *vpbe_dev,
+ struct v4l2_dv_preset *dv_preset)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int out_index = vpbe_dev->current_out_index;
+ int sd_index = vpbe_dev->current_sd_index;
+ int ret;
+
+
+ if (!(cfg->outputs[out_index].output.capabilities &
+ V4L2_OUT_CAP_PRESETS))
+ return -EINVAL;
+
+ ret = vpbe_get_dv_preset_info(vpbe_dev, dv_preset->preset);
+
+ if (ret)
+ return ret;
+
+ mutex_lock(&vpbe_dev->lock);
+
+
+ ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
+ s_dv_preset, dv_preset);
+ /* set the lcd controller output for the given mode */
+ if (!ret) {
+ struct osd_state *osd_device = vpbe_dev->osd_device;
+
+ osd_device->ops.set_left_margin(osd_device,
+ vpbe_dev->current_timings.left_margin);
+ osd_device->ops.set_top_margin(osd_device,
+ vpbe_dev->current_timings.upper_margin);
+ }
+ mutex_unlock(&vpbe_dev->lock);
+
+ return ret;
+}
+
+/**
+ * vpbe_g_dv_preset - Get the preset in the current encoder
+ *
+ * Get the preset in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int vpbe_g_dv_preset(struct vpbe_device *vpbe_dev,
+ struct v4l2_dv_preset *dv_preset)
+{
+ if (vpbe_dev->current_timings.timings_type &
+ VPBE_ENC_DV_PRESET) {
+ dv_preset->preset = vpbe_dev->current_timings.timings.dv_preset;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * vpbe_enum_dv_presets - Enumerate the dv presets in the current encoder
+ *
+ * Get the preset in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int vpbe_enum_dv_presets(struct vpbe_device *vpbe_dev,
+ struct v4l2_dv_enum_preset *preset_info)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int out_index = vpbe_dev->current_out_index;
+ struct vpbe_output *output = &cfg->outputs[out_index];
+ int j = 0;
+ int i;
+
+ if (!(output->output.capabilities & V4L2_OUT_CAP_PRESETS))
+ return -EINVAL;
+
+ for (i = 0; i < output->num_modes; i++) {
+ if (output->modes[i].timings_type == VPBE_ENC_DV_PRESET) {
+ if (j == preset_info->index)
+ break;
+ j++;
+ }
+ }
+
+ if (i == output->num_modes)
+ return -EINVAL;
+
+ return v4l_fill_dv_preset_info(output->modes[i].timings.dv_preset,
+ preset_info);
+}
+
+/**
+ * vpbe_s_std - Set the given standard in the encoder
+ *
+ * Sets the standard if supported by the current encoder. Return the status.
+ * 0 - success & -EINVAL on error
+ */
+static int vpbe_s_std(struct vpbe_device *vpbe_dev, v4l2_std_id *std_id)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int out_index = vpbe_dev->current_out_index;
+ int sd_index = vpbe_dev->current_sd_index;
+ int ret;
+
+ if (!(cfg->outputs[out_index].output.capabilities &
+ V4L2_OUT_CAP_STD))
+ return -EINVAL;
+
+ ret = vpbe_get_std_info(vpbe_dev, *std_id);
+ if (ret)
+ return ret;
+
+ mutex_lock(&vpbe_dev->lock);
+
+ ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
+ s_std_output, *std_id);
+ /* set the lcd controller output for the given mode */
+ if (!ret) {
+ struct osd_state *osd_device = vpbe_dev->osd_device;
+
+ osd_device->ops.set_left_margin(osd_device,
+ vpbe_dev->current_timings.left_margin);
+ osd_device->ops.set_top_margin(osd_device,
+ vpbe_dev->current_timings.upper_margin);
+ }
+ mutex_unlock(&vpbe_dev->lock);
+
+ return ret;
+}
+
+/**
+ * vpbe_g_std - Get the standard in the current encoder
+ *
+ * Get the standard in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int vpbe_g_std(struct vpbe_device *vpbe_dev, v4l2_std_id *std_id)
+{
+ struct vpbe_enc_mode_info cur_timings = vpbe_dev->current_timings;
+
+ if (cur_timings.timings_type & VPBE_ENC_STD) {
+ *std_id = cur_timings.timings.std_id;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * vpbe_set_mode - Set mode in the current encoder using mode info
+ *
+ * Use the mode string to decide what timings to set in the encoder
+ * This is typically useful when fbset command is used to change the current
+ * timings by specifying a string to indicate the timings.
+ */
+static int vpbe_set_mode(struct vpbe_device *vpbe_dev,
+ struct vpbe_enc_mode_info *mode_info)
+{
+ struct vpbe_enc_mode_info *preset_mode = NULL;
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct v4l2_dv_preset dv_preset;
+ struct osd_state *osd_device;
+ int out_index = vpbe_dev->current_out_index;
+ int ret = 0;
+ int i;
+
+ if ((NULL == mode_info) || (NULL == mode_info->name))
+ return -EINVAL;
+
+ for (i = 0; i < cfg->outputs[out_index].num_modes; i++) {
+ if (!strcmp(mode_info->name,
+ cfg->outputs[out_index].modes[i].name)) {
+ preset_mode = &cfg->outputs[out_index].modes[i];
+ /*
+ * it may be one of the 3 timings type. Check and
+ * invoke right API
+ */
+ if (preset_mode->timings_type & VPBE_ENC_STD)
+ return vpbe_s_std(vpbe_dev,
+ &preset_mode->timings.std_id);
+ if (preset_mode->timings_type & VPBE_ENC_DV_PRESET) {
+ dv_preset.preset =
+ preset_mode->timings.dv_preset;
+ return vpbe_s_dv_preset(vpbe_dev, &dv_preset);
+ }
+ }
+ }
+
+ /* Only custom timing should reach here */
+ if (preset_mode == NULL)
+ return -EINVAL;
+
+ mutex_lock(&vpbe_dev->lock);
+
+ osd_device = vpbe_dev->osd_device;
+ vpbe_dev->current_timings = *preset_mode;
+ osd_device->ops.set_left_margin(osd_device,
+ vpbe_dev->current_timings.left_margin);
+ osd_device->ops.set_top_margin(osd_device,
+ vpbe_dev->current_timings.upper_margin);
+
+ mutex_unlock(&vpbe_dev->lock);
+
+ return ret;
+}
+
+static int vpbe_set_default_mode(struct vpbe_device *vpbe_dev)
+{
+ int ret;
+
+ ret = vpbe_get_std_info_by_name(vpbe_dev, def_mode);
+ if (ret)
+ return ret;
+
+ /* set the default mode in the encoder */
+ return vpbe_set_mode(vpbe_dev, &vpbe_dev->current_timings);
+}
+
+static int platform_device_get(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct vpbe_device *vpbe_dev = data;
+
+ if (strcmp("vpbe-osd", pdev->name) == 0)
+ vpbe_dev->osd_device = platform_get_drvdata(pdev);
+
+ return 0;
+}
+
+/**
+ * vpbe_initialize() - Initialize the vpbe display controller
+ * @vpbe_dev - vpbe device ptr
+ *
+ * Master frame buffer device drivers calls this to initialize vpbe
+ * display controller. This will then registers v4l2 device and the sub
+ * devices and sets a current encoder sub device for display. v4l2 display
+ * device driver is the master and frame buffer display device driver is
+ * the slave. Frame buffer display driver checks the initialized during
+ * probe and exit if not initialized. Returns status.
+ */
+static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
+{
+ struct encoder_config_info *enc_info;
+ struct v4l2_subdev **enc_subdev;
+ struct osd_state *osd_device;
+ struct i2c_adapter *i2c_adap;
+ int output_index;
+ int num_encoders;
+ int ret = 0;
+ int err;
+ int i;
+
+ /*
+ * v4l2 abd FBDev frame buffer devices will get the vpbe_dev pointer
+ * from the platform device by iteration of platform drivers and
+ * matching with device name
+ */
+ if (NULL == vpbe_dev || NULL == dev) {
+ printk(KERN_ERR "Null device pointers.\n");
+ return -ENODEV;
+ }
+
+ if (vpbe_dev->initialized)
+ return 0;
+
+ mutex_lock(&vpbe_dev->lock);
+
+ if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) {
+ /* We have dac clock available for platform */
+ vpbe_dev->dac_clk = clk_get(vpbe_dev->pdev, "vpss_dac");
+ if (IS_ERR(vpbe_dev->dac_clk)) {
+ ret = PTR_ERR(vpbe_dev->dac_clk);
+ goto vpbe_unlock;
+ }
+ if (clk_enable(vpbe_dev->dac_clk)) {
+ ret = -ENODEV;
+ goto vpbe_unlock;
+ }
+ }
+
+ /* first enable vpss clocks */
+ vpss_enable_clock(VPSS_VPBE_CLOCK, 1);
+
+ /* First register a v4l2 device */
+ ret = v4l2_device_register(dev, &vpbe_dev->v4l2_dev);
+ if (ret) {
+ v4l2_err(dev->driver,
+ "Unable to register v4l2 device.\n");
+ goto vpbe_fail_clock;
+ }
+ v4l2_info(&vpbe_dev->v4l2_dev, "vpbe v4l2 device registered\n");
+
+ err = bus_for_each_dev(&platform_bus_type, NULL, vpbe_dev,
+ platform_device_get);
+ if (err < 0)
+ return err;
+
+ vpbe_dev->venc = venc_sub_dev_init(&vpbe_dev->v4l2_dev,
+ vpbe_dev->cfg->venc.module_name);
+ /* register venc sub device */
+ if (vpbe_dev->venc == NULL) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "vpbe unable to init venc sub device\n");
+ ret = -ENODEV;
+ goto vpbe_fail_v4l2_device;
+ }
+ /* initialize osd device */
+ osd_device = vpbe_dev->osd_device;
+
+ if (NULL != osd_device->ops.initialize) {
+ err = osd_device->ops.initialize(osd_device);
+ if (err) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "unable to initialize the OSD device");
+ err = -ENOMEM;
+ goto vpbe_fail_v4l2_device;
+ }
+ }
+
+ /*
+ * Register any external encoders that are configured. At index 0 we
+ * store venc sd index.
+ */
+ num_encoders = vpbe_dev->cfg->num_ext_encoders + 1;
+ vpbe_dev->encoders = kmalloc(
+ sizeof(struct v4l2_subdev *)*num_encoders,
+ GFP_KERNEL);
+ if (NULL == vpbe_dev->encoders) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "unable to allocate memory for encoders sub devices");
+ ret = -ENOMEM;
+ goto vpbe_fail_v4l2_device;
+ }
+
+ i2c_adap = i2c_get_adapter(vpbe_dev->cfg->i2c_adapter_id);
+ for (i = 0; i < (vpbe_dev->cfg->num_ext_encoders + 1); i++) {
+ if (i == 0) {
+ /* venc is at index 0 */
+ enc_subdev = &vpbe_dev->encoders[i];
+ *enc_subdev = vpbe_dev->venc;
+ continue;
+ }
+ enc_info = &vpbe_dev->cfg->ext_encoders[i];
+ if (enc_info->is_i2c) {
+ enc_subdev = &vpbe_dev->encoders[i];
+ *enc_subdev = v4l2_i2c_new_subdev_board(
+ &vpbe_dev->v4l2_dev, i2c_adap,
+ &enc_info->board_info, NULL);
+ if (*enc_subdev)
+ v4l2_info(&vpbe_dev->v4l2_dev,
+ "v4l2 sub device %s registered\n",
+ enc_info->module_name);
+ else {
+ v4l2_err(&vpbe_dev->v4l2_dev, "encoder %s"
+ " failed to register",
+ enc_info->module_name);
+ ret = -ENODEV;
+ goto vpbe_fail_sd_register;
+ }
+ } else
+ v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c encoders"
+ " currently not supported");
+ }
+
+ /* set the current encoder and output to that of venc by default */
+ vpbe_dev->current_sd_index = 0;
+ vpbe_dev->current_out_index = 0;
+ output_index = 0;
+
+ mutex_unlock(&vpbe_dev->lock);
+
+ printk(KERN_NOTICE "Setting default output to %s\n", def_output);
+ ret = vpbe_set_default_output(vpbe_dev);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default output %s",
+ def_output);
+ return ret;
+ }
+
+ printk(KERN_NOTICE "Setting default mode to %s\n", def_mode);
+ ret = vpbe_set_default_mode(vpbe_dev);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default mode %s",
+ def_mode);
+ return ret;
+ }
+ vpbe_dev->initialized = 1;
+ /* TBD handling of bootargs for default output and mode */
+ return 0;
+
+vpbe_fail_sd_register:
+ kfree(vpbe_dev->encoders);
+vpbe_fail_v4l2_device:
+ v4l2_device_unregister(&vpbe_dev->v4l2_dev);
+vpbe_fail_clock:
+ if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0)
+ clk_put(vpbe_dev->dac_clk);
+vpbe_unlock:
+ mutex_unlock(&vpbe_dev->lock);
+ return ret;
+}
+
+/**
+ * vpbe_deinitialize() - de-initialize the vpbe display controller
+ * @dev - Master and slave device ptr
+ *
+ * vpbe_master and slave frame buffer devices calls this to de-initialize
+ * the display controller. It is called when master and slave device
+ * driver modules are removed and no longer requires the display controller.
+ */
+static void vpbe_deinitialize(struct device *dev, struct vpbe_device *vpbe_dev)
+{
+ v4l2_device_unregister(&vpbe_dev->v4l2_dev);
+ if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0)
+ clk_put(vpbe_dev->dac_clk);
+
+ kfree(vpbe_dev->encoders);
+ vpbe_dev->initialized = 0;
+ /* disable vpss clocks */
+ vpss_enable_clock(VPSS_VPBE_CLOCK, 0);
+}
+
+static struct vpbe_device_ops vpbe_dev_ops = {
+ .g_cropcap = vpbe_g_cropcap,
+ .enum_outputs = vpbe_enum_outputs,
+ .set_output = vpbe_set_output,
+ .get_output = vpbe_get_output,
+ .s_dv_preset = vpbe_s_dv_preset,
+ .g_dv_preset = vpbe_g_dv_preset,
+ .enum_dv_presets = vpbe_enum_dv_presets,
+ .s_std = vpbe_s_std,
+ .g_std = vpbe_g_std,
+ .initialize = vpbe_initialize,
+ .deinitialize = vpbe_deinitialize,
+ .get_mode_info = vpbe_get_current_mode_info,
+ .set_mode = vpbe_set_mode,
+};
+
+static __devinit int vpbe_probe(struct platform_device *pdev)
+{
+ struct vpbe_device *vpbe_dev;
+ struct vpbe_config *cfg;
+ int ret = -EINVAL;
+
+ if (pdev->dev.platform_data == NULL) {
+ v4l2_err(pdev->dev.driver, "No platform data\n");
+ return -ENODEV;
+ }
+ cfg = pdev->dev.platform_data;
+
+ if (!cfg->module_name[0] ||
+ !cfg->osd.module_name[0] ||
+ !cfg->venc.module_name[0]) {
+ v4l2_err(pdev->dev.driver, "vpbe display module names not"
+ " defined\n");
+ return ret;
+ }
+
+ vpbe_dev = kzalloc(sizeof(*vpbe_dev), GFP_KERNEL);
+ if (vpbe_dev == NULL) {
+ v4l2_err(pdev->dev.driver, "Unable to allocate memory"
+ " for vpbe_device\n");
+ return -ENOMEM;
+ }
+ vpbe_dev->cfg = cfg;
+ vpbe_dev->ops = vpbe_dev_ops;
+ vpbe_dev->pdev = &pdev->dev;
+
+ if (cfg->outputs->num_modes > 0)
+ vpbe_dev->current_timings = vpbe_dev->cfg->outputs[0].modes[0];
+ else
+ return -ENODEV;
+
+ /* set the driver data in platform device */
+ platform_set_drvdata(pdev, vpbe_dev);
+ mutex_init(&vpbe_dev->lock);
+
+ return 0;
+}
+
+static int vpbe_remove(struct platform_device *device)
+{
+ struct vpbe_device *vpbe_dev = platform_get_drvdata(device);
+
+ kfree(vpbe_dev);
+
+ return 0;
+}
+
+static struct platform_driver vpbe_driver = {
+ .driver = {
+ .name = "vpbe_controller",
+ .owner = THIS_MODULE,
+ },
+ .probe = vpbe_probe,
+ .remove = vpbe_remove,
+};
+
+/**
+ * vpbe_init: initialize the vpbe driver
+ *
+ * This function registers device and driver to the kernel
+ */
+static __init int vpbe_init(void)
+{
+ return platform_driver_register(&vpbe_driver);
+}
+
+/**
+ * vpbe_cleanup : cleanup function for vpbe driver
+ *
+ * This will un-registers the device and driver to the kernel
+ */
+static void vpbe_cleanup(void)
+{
+ platform_driver_unregister(&vpbe_driver);
+}
+
+/* Function for module initialization and cleanup */
+module_init(vpbe_init);
+module_exit(vpbe_cleanup);
diff --git a/drivers/media/video/davinci/vpbe_display.c b/drivers/media/video/davinci/vpbe_display.c
new file mode 100644
index 0000000..7f1d83a
--- /dev/null
+++ b/drivers/media/video/davinci/vpbe_display.c
@@ -0,0 +1,1860 @@
+/*
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+
+#include <asm/pgtable.h>
+#include <mach/cputype.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+#include <media/davinci/vpbe_display.h>
+#include <media/davinci/vpbe_types.h>
+#include <media/davinci/vpbe.h>
+#include <media/davinci/vpbe_venc.h>
+#include <media/davinci/vpbe_osd.h>
+#include "vpbe_venc_regs.h"
+
+#define VPBE_DISPLAY_DRIVER "vpbe-v4l2"
+
+static int debug;
+
+#define VPBE_DISPLAY_SD_BUF_SIZE (720*576*2)
+#define VPBE_DEFAULT_NUM_BUFS 3
+
+module_param(debug, int, 0644);
+
+static int venc_is_second_field(struct vpbe_display *disp_dev)
+{
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ int ret;
+ int val;
+
+ ret = v4l2_subdev_call(vpbe_dev->venc,
+ core,
+ ioctl,
+ VENC_GET_FLD,
+ &val);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in getting Field ID 0\n");
+ }
+ return val;
+}
+
+static void vpbe_isr_even_field(struct vpbe_display *disp_obj,
+ struct vpbe_layer *layer)
+{
+ struct timespec timevalue;
+
+ if (layer->cur_frm == layer->next_frm)
+ return;
+ ktime_get_ts(&timevalue);
+ layer->cur_frm->ts.tv_sec = timevalue.tv_sec;
+ layer->cur_frm->ts.tv_usec = timevalue.tv_nsec / NSEC_PER_USEC;
+ layer->cur_frm->state = VIDEOBUF_DONE;
+ wake_up_interruptible(&layer->cur_frm->done);
+ /* Make cur_frm pointing to next_frm */
+ layer->cur_frm = layer->next_frm;
+}
+
+static void vpbe_isr_odd_field(struct vpbe_display *disp_obj,
+ struct vpbe_layer *layer)
+{
+ struct osd_state *osd_device = disp_obj->osd_device;
+ unsigned long addr;
+
+ spin_lock(&disp_obj->dma_queue_lock);
+ if (list_empty(&layer->dma_queue) ||
+ (layer->cur_frm != layer->next_frm)) {
+ spin_unlock(&disp_obj->dma_queue_lock);
+ return;
+ }
+ /*
+ * one field is displayed configure
+ * the next frame if it is available
+ * otherwise hold on current frame
+ * Get next from the buffer queue
+ */
+ layer->next_frm = list_entry(
+ layer->dma_queue.next,
+ struct videobuf_buffer,
+ queue);
+ /* Remove that from the buffer queue */
+ list_del(&layer->next_frm->queue);
+ spin_unlock(&disp_obj->dma_queue_lock);
+ /* Mark state of the frame to active */
+ layer->next_frm->state = VIDEOBUF_ACTIVE;
+ addr = videobuf_to_dma_contig(layer->next_frm);
+ osd_device->ops.start_layer(osd_device,
+ layer->layer_info.id,
+ addr,
+ disp_obj->cbcr_ofst);
+}
+
+/* interrupt service routine */
+static irqreturn_t venc_isr(int irq, void *arg)
+{
+ struct vpbe_display *disp_dev = (struct vpbe_display *)arg;
+ struct vpbe_layer *layer;
+ static unsigned last_event;
+ unsigned event = 0;
+ int fid;
+ int i;
+
+ if ((NULL == arg) || (NULL == disp_dev->dev[0]))
+ return IRQ_HANDLED;
+
+ if (venc_is_second_field(disp_dev))
+ event |= VENC_SECOND_FIELD;
+ else
+ event |= VENC_FIRST_FIELD;
+
+ if (event == (last_event & ~VENC_END_OF_FRAME)) {
+ /*
+ * If the display is non-interlaced, then we need to flag the
+ * end-of-frame event at every interrupt regardless of the
+ * value of the FIDST bit. We can conclude that the display is
+ * non-interlaced if the value of the FIDST bit is unchanged
+ * from the previous interrupt.
+ */
+ event |= VENC_END_OF_FRAME;
+ } else if (event == VENC_SECOND_FIELD) {
+ /* end-of-frame for interlaced display */
+ event |= VENC_END_OF_FRAME;
+ }
+ last_event = event;
+
+ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ layer = disp_dev->dev[i];
+ /* If streaming is started in this layer */
+ if (!layer->started)
+ continue;
+
+ if (layer->layer_first_int) {
+ layer->layer_first_int = 0;
+ continue;
+ }
+ /* Check the field format */
+ if ((V4L2_FIELD_NONE == layer->pix_fmt.field) &&
+ (event & VENC_END_OF_FRAME)) {
+ /* Progressive mode */
+
+ vpbe_isr_even_field(disp_dev, layer);
+ vpbe_isr_odd_field(disp_dev, layer);
+ } else {
+ /* Interlaced mode */
+
+ layer->field_id ^= 1;
+ if (event & VENC_FIRST_FIELD)
+ fid = 0;
+ else
+ fid = 1;
+
+ /*
+ * If field id does not match with store
+ * field id
+ */
+ if (fid != layer->field_id) {
+ /* Make them in sync */
+ layer->field_id = fid;
+ continue;
+ }
+ /*
+ * device field id and local field id are
+ * in sync. If this is even field
+ */
+ if (0 == fid)
+ vpbe_isr_even_field(disp_dev, layer);
+ else /* odd field */
+ vpbe_isr_odd_field(disp_dev, layer);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * vpbe_buffer_prepare()
+ * This is the callback function called from videobuf_qbuf() function
+ * the buffer is prepared and user space virtual address is converted into
+ * physical address
+ */
+static int vpbe_buffer_prepare(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct vpbe_fh *fh = q->priv_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ unsigned long addr;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "vpbe_buffer_prepare\n");
+
+ /* If buffer is not initialized, initialize it */
+ if (VIDEOBUF_NEEDS_INIT == vb->state) {
+ vb->width = layer->pix_fmt.width;
+ vb->height = layer->pix_fmt.height;
+ vb->size = layer->pix_fmt.sizeimage;
+ vb->field = field;
+
+ ret = videobuf_iolock(q, vb, NULL);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Failed to map \
+ user address\n");
+ return -EINVAL;
+ }
+
+ addr = videobuf_to_dma_contig(vb);
+
+ if (q->streaming) {
+ if (!IS_ALIGNED(addr, 8)) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "buffer_prepare:offset is \
+ not aligned to 32 bytes\n");
+ return -EINVAL;
+ }
+ }
+ vb->state = VIDEOBUF_PREPARED;
+ }
+ return 0;
+}
+
+/*
+ * vpbe_buffer_setup()
+ * This function allocates memory for the buffers
+ */
+static int vpbe_buffer_setup(struct videobuf_queue *q,
+ unsigned int *count,
+ unsigned int *size)
+{
+ /* Get the file handle object and layer object */
+ struct vpbe_fh *fh = q->priv_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_buffer_setup\n");
+
+ *size = layer->pix_fmt.sizeimage;
+
+ /* Store number of buffers allocated in numbuffer member */
+ if (*count < VPBE_DEFAULT_NUM_BUFS)
+ *count = layer->numbuffers = VPBE_DEFAULT_NUM_BUFS;
+
+ return 0;
+}
+
+/*
+ * vpbe_buffer_queue()
+ * This function adds the buffer to DMA queue
+ */
+static void vpbe_buffer_queue(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ /* Get the file handle object and layer object */
+ struct vpbe_fh *fh = q->priv_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_display *disp = fh->disp_dev;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ unsigned long flags;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "vpbe_buffer_queue\n");
+
+ /* add the buffer to the DMA queue */
+ spin_lock_irqsave(&disp->dma_queue_lock, flags);
+ list_add_tail(&vb->queue, &layer->dma_queue);
+ spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
+ /* Change state of the buffer */
+ vb->state = VIDEOBUF_QUEUED;
+}
+
+/*
+ * vpbe_buffer_release()
+ * This function is called from the videobuf layer to free memory allocated to
+ * the buffers
+ */
+static void vpbe_buffer_release(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ /* Get the file handle object and layer object */
+ struct vpbe_fh *fh = q->priv_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "vpbe_buffer_release\n");
+
+ if (V4L2_MEMORY_USERPTR != layer->memory)
+ videobuf_dma_contig_free(q, vb);
+
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static struct videobuf_queue_ops video_qops = {
+ .buf_setup = vpbe_buffer_setup,
+ .buf_prepare = vpbe_buffer_prepare,
+ .buf_queue = vpbe_buffer_queue,
+ .buf_release = vpbe_buffer_release,
+};
+
+static
+struct vpbe_layer*
+_vpbe_display_get_other_win_layer(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer)
+{
+ enum vpbe_display_device_id thiswin, otherwin;
+ thiswin = layer->device_id;
+
+ otherwin = (thiswin == VPBE_DISPLAY_DEVICE_0) ?
+ VPBE_DISPLAY_DEVICE_1 : VPBE_DISPLAY_DEVICE_0;
+ return disp_dev->dev[otherwin];
+}
+
+static int vpbe_set_osd_display_params(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer)
+{
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct osd_state *osd_device = disp_dev->osd_device;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ unsigned long addr;
+ int ret;
+
+ addr = videobuf_to_dma_contig(layer->cur_frm);
+ /* Set address in the display registers */
+ osd_device->ops.start_layer(osd_device,
+ layer->layer_info.id,
+ addr,
+ disp_dev->cbcr_ofst);
+
+ ret = osd_device->ops.enable_layer(osd_device,
+ layer->layer_info.id, 0);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in enabling osd window layer 0\n");
+ return -1;
+ }
+
+ /* Enable the window */
+ layer->layer_info.enable = 1;
+ if (cfg->pixfmt == PIXFMT_NV12) {
+ struct vpbe_layer *otherlayer =
+ _vpbe_display_get_other_win_layer(disp_dev, layer);
+
+ ret = osd_device->ops.enable_layer(osd_device,
+ otherlayer->layer_info.id, 1);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in enabling osd window layer 1\n");
+ return -1;
+ }
+ otherlayer->layer_info.enable = 1;
+ }
+ return 0;
+}
+
+static void
+vpbe_disp_calculate_scale_factor(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer,
+ int expected_xsize, int expected_ysize)
+{
+ struct display_layer_info *layer_info = &layer->layer_info;
+ struct v4l2_pix_format *pixfmt = &layer->pix_fmt;
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ int calculated_xsize;
+ int h_exp = 0;
+ int v_exp = 0;
+ int h_scale;
+ int v_scale;
+
+ v4l2_std_id standard_id = vpbe_dev->current_timings.timings.std_id;
+
+ /*
+ * Application initially set the image format. Current display
+ * size is obtained from the vpbe display controller. expected_xsize
+ * and expected_ysize are set through S_CROP ioctl. Based on this,
+ * driver will calculate the scale factors for vertical and
+ * horizontal direction so that the image is displayed scaled
+ * and expanded. Application uses expansion to display the image
+ * in a square pixel. Otherwise it is displayed using displays
+ * pixel aspect ratio.It is expected that application chooses
+ * the crop coordinates for cropped or scaled display. if crop
+ * size is less than the image size, it is displayed cropped or
+ * it is displayed scaled and/or expanded.
+ *
+ * to begin with, set the crop window same as expected. Later we
+ * will override with scaled window size
+ */
+
+ cfg->xsize = pixfmt->width;
+ cfg->ysize = pixfmt->height;
+ layer_info->h_zoom = ZOOM_X1; /* no horizontal zoom */
+ layer_info->v_zoom = ZOOM_X1; /* no horizontal zoom */
+ layer_info->h_exp = H_EXP_OFF; /* no horizontal zoom */
+ layer_info->v_exp = V_EXP_OFF; /* no horizontal zoom */
+
+ if (pixfmt->width < expected_xsize) {
+ h_scale = vpbe_dev->current_timings.xres / pixfmt->width;
+ if (h_scale < 2)
+ h_scale = 1;
+ else if (h_scale >= 4)
+ h_scale = 4;
+ else
+ h_scale = 2;
+ cfg->xsize *= h_scale;
+ if (cfg->xsize < expected_xsize) {
+ if ((standard_id & V4L2_STD_525_60) ||
+ (standard_id & V4L2_STD_625_50)) {
+ calculated_xsize = (cfg->xsize *
+ VPBE_DISPLAY_H_EXP_RATIO_N) /
+ VPBE_DISPLAY_H_EXP_RATIO_D;
+ if (calculated_xsize <= expected_xsize) {
+ h_exp = 1;
+ cfg->xsize = calculated_xsize;
+ }
+ }
+ }
+ if (h_scale == 2)
+ layer_info->h_zoom = ZOOM_X2;
+ else if (h_scale == 4)
+ layer_info->h_zoom = ZOOM_X4;
+ if (h_exp)
+ layer_info->h_exp = H_EXP_9_OVER_8;
+ } else {
+ /* no scaling, only cropping. Set display area to crop area */
+ cfg->xsize = expected_xsize;
+ }
+
+ if (pixfmt->height < expected_ysize) {
+ v_scale = expected_ysize / pixfmt->height;
+ if (v_scale < 2)
+ v_scale = 1;
+ else if (v_scale >= 4)
+ v_scale = 4;
+ else
+ v_scale = 2;
+ cfg->ysize *= v_scale;
+ if (cfg->ysize < expected_ysize) {
+ if ((standard_id & V4L2_STD_625_50)) {
+ calculated_xsize = (cfg->ysize *
+ VPBE_DISPLAY_V_EXP_RATIO_N) /
+ VPBE_DISPLAY_V_EXP_RATIO_D;
+ if (calculated_xsize <= expected_ysize) {
+ v_exp = 1;
+ cfg->ysize = calculated_xsize;
+ }
+ }
+ }
+ if (v_scale == 2)
+ layer_info->v_zoom = ZOOM_X2;
+ else if (v_scale == 4)
+ layer_info->v_zoom = ZOOM_X4;
+ if (v_exp)
+ layer_info->h_exp = V_EXP_6_OVER_5;
+ } else {
+ /* no scaling, only cropping. Set display area to crop area */
+ cfg->ysize = expected_ysize;
+ }
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "crop display xsize = %d, ysize = %d\n",
+ cfg->xsize, cfg->ysize);
+}
+
+static void vpbe_disp_adj_position(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer,
+ int top, int left)
+{
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+
+ cfg->xpos = min((unsigned int)left,
+ vpbe_dev->current_timings.xres - cfg->xsize);
+ cfg->ypos = min((unsigned int)top,
+ vpbe_dev->current_timings.yres - cfg->ysize);
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "new xpos = %d, ypos = %d\n",
+ cfg->xpos, cfg->ypos);
+}
+
+static void vpbe_disp_check_window_params(struct vpbe_display *disp_dev,
+ struct v4l2_rect *c)
+{
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+
+ if ((c->width == 0) ||
+ ((c->width + c->left) > vpbe_dev->current_timings.xres))
+ c->width = vpbe_dev->current_timings.xres - c->left;
+
+ if ((c->height == 0) || ((c->height + c->top) >
+ vpbe_dev->current_timings.yres))
+ c->height = vpbe_dev->current_timings.yres - c->top;
+
+ /* window height must be even for interlaced display */
+ if (vpbe_dev->current_timings.interlaced)
+ c->height &= (~0x01);
+
+}
+
+/**
+ * vpbe_try_format()
+ * If user application provides width and height, and have bytesperline set
+ * to zero, driver calculates bytesperline and sizeimage based on hardware
+ * limits.
+ */
+static int vpbe_try_format(struct vpbe_display *disp_dev,
+ struct v4l2_pix_format *pixfmt, int check)
+{
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ int min_height = 1;
+ int min_width = 32;
+ int max_height;
+ int max_width;
+ int bpp;
+
+ if ((pixfmt->pixelformat != V4L2_PIX_FMT_UYVY) &&
+ (pixfmt->pixelformat != V4L2_PIX_FMT_NV12))
+ /* choose default as V4L2_PIX_FMT_UYVY */
+ pixfmt->pixelformat = V4L2_PIX_FMT_UYVY;
+
+ /* Check the field format */
+ if ((pixfmt->field != V4L2_FIELD_INTERLACED) &&
+ (pixfmt->field != V4L2_FIELD_NONE)) {
+ if (vpbe_dev->current_timings.interlaced)
+ pixfmt->field = V4L2_FIELD_INTERLACED;
+ else
+ pixfmt->field = V4L2_FIELD_NONE;
+ }
+
+ if (pixfmt->field == V4L2_FIELD_INTERLACED)
+ min_height = 2;
+
+ if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12)
+ bpp = 1;
+ else
+ bpp = 2;
+
+ max_width = vpbe_dev->current_timings.xres;
+ max_height = vpbe_dev->current_timings.yres;
+
+ min_width /= bpp;
+
+ if (!pixfmt->width || (pixfmt->width < min_width) ||
+ (pixfmt->width > max_width)) {
+ pixfmt->width = vpbe_dev->current_timings.xres;
+ }
+
+ if (!pixfmt->height || (pixfmt->height < min_height) ||
+ (pixfmt->height > max_height)) {
+ pixfmt->height = vpbe_dev->current_timings.yres;
+ }
+
+ if (pixfmt->bytesperline < (pixfmt->width * bpp))
+ pixfmt->bytesperline = pixfmt->width * bpp;
+
+ /* Make the bytesperline 32 byte aligned */
+ pixfmt->bytesperline = ((pixfmt->width * bpp + 31) & ~31);
+
+ if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12)
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height +
+ (pixfmt->bytesperline * pixfmt->height >> 1);
+ else
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+
+ return 0;
+}
+
+static int vpbe_display_g_priority(struct file *file, void *priv,
+ enum v4l2_priority *p)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+
+ *p = v4l2_prio_max(&layer->prio);
+
+ return 0;
+}
+
+static int vpbe_display_s_priority(struct file *file, void *priv,
+ enum v4l2_priority p)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ int ret;
+
+ ret = v4l2_prio_change(&layer->prio, &fh->prio, p);
+
+ return ret;
+}
+
+static int vpbe_display_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ cap->version = VPBE_DISPLAY_VERSION_CODE;
+ cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ strlcpy(cap->driver, VPBE_DISPLAY_DRIVER, sizeof(cap->driver));
+ strlcpy(cap->bus_info, "platform", sizeof(cap->bus_info));
+ strlcpy(cap->card, vpbe_dev->cfg->module_name, sizeof(cap->card));
+
+ return 0;
+}
+
+static int vpbe_display_s_crop(struct file *file, void *priv,
+ struct v4l2_crop *crop)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_display *disp_dev = fh->disp_dev;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct osd_state *osd_device = disp_dev->osd_device;
+ struct v4l2_rect *rect = &crop->c;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_S_CROP, layer id = %d\n", layer->device_id);
+
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+
+ if (rect->top < 0)
+ rect->top = 0;
+ if (rect->left < 0)
+ rect->left = 0;
+
+ vpbe_disp_check_window_params(disp_dev, rect);
+
+ osd_device->ops.get_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+
+ vpbe_disp_calculate_scale_factor(disp_dev, layer,
+ rect->width,
+ rect->height);
+ vpbe_disp_adj_position(disp_dev, layer, rect->top,
+ rect->left);
+ ret = osd_device->ops.set_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in set layer config:\n");
+ return -EINVAL;
+ }
+
+ /* apply zooming and h or v expansion */
+ osd_device->ops.set_zoom(osd_device,
+ layer->layer_info.id,
+ layer->layer_info.h_zoom,
+ layer->layer_info.v_zoom);
+ ret = osd_device->ops.set_vid_expansion(osd_device,
+ layer->layer_info.h_exp,
+ layer->layer_info.v_exp);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in set vid expansion:\n");
+ return -EINVAL;
+ }
+
+ if ((layer->layer_info.h_zoom != ZOOM_X1) ||
+ (layer->layer_info.v_zoom != ZOOM_X1) ||
+ (layer->layer_info.h_exp != H_EXP_OFF) ||
+ (layer->layer_info.v_exp != V_EXP_OFF))
+ /* Enable expansion filter */
+ osd_device->ops.set_interpolation_filter(osd_device, 1);
+ else
+ osd_device->ops.set_interpolation_filter(osd_device, 0);
+
+ return 0;
+}
+
+static int vpbe_display_g_crop(struct file *file, void *priv,
+ struct v4l2_crop *crop)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct osd_state *osd_device = fh->disp_dev->osd_device;
+ struct v4l2_rect *rect = &crop->c;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_G_CROP, layer id = %d\n",
+ layer->device_id);
+
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buf type\n");
+ ret = -EINVAL;
+ }
+ osd_device->ops.get_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+ rect->top = cfg->ypos;
+ rect->left = cfg->xpos;
+ rect->width = cfg->xsize;
+ rect->height = cfg->ysize;
+
+ return 0;
+}
+
+static int vpbe_display_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *cropcap)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_CROPCAP ioctl\n");
+
+ cropcap->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ cropcap->bounds.left = 0;
+ cropcap->bounds.top = 0;
+ cropcap->bounds.width = vpbe_dev->current_timings.xres;
+ cropcap->bounds.height = vpbe_dev->current_timings.yres;
+ cropcap->pixelaspect = vpbe_dev->current_timings.aspect;
+ cropcap->defrect = cropcap->bounds;
+ return 0;
+}
+
+static int vpbe_display_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_G_FMT, layer id = %d\n",
+ layer->device_id);
+
+ /* If buffer type is video output */
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "invalid type\n");
+ return -EINVAL;
+ }
+ /* Fill in the information about format */
+ fmt->fmt.pix = layer->pix_fmt;
+
+ return 0;
+}
+
+static int vpbe_display_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ unsigned int index = 0;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_ENUM_FMT, layer id = %d\n",
+ layer->device_id);
+ if (fmt->index > 1) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid format index\n");
+ return -EINVAL;
+ }
+
+ /* Fill in the information about format */
+ index = fmt->index;
+ memset(fmt, 0, sizeof(*fmt));
+ fmt->index = index;
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ if (index == 0) {
+ strcpy(fmt->description, "YUV 4:2:2 - UYVY");
+ fmt->pixelformat = V4L2_PIX_FMT_UYVY;
+ } else {
+ strcpy(fmt->description, "Y/CbCr 4:2:0");
+ fmt->pixelformat = V4L2_PIX_FMT_NV12;
+ }
+
+ return 0;
+}
+
+static int vpbe_display_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_display *disp_dev = fh->disp_dev;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+ struct osd_state *osd_device = disp_dev->osd_device;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_S_FMT, layer id = %d\n",
+ layer->device_id);
+
+ /* If streaming is started, return error */
+ if (layer->started) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
+ return -EBUSY;
+ }
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "invalid type\n");
+ return -EINVAL;
+ }
+ /* Check for valid pixel format */
+ ret = vpbe_try_format(disp_dev, pixfmt, 1);
+ if (ret)
+ return ret;
+
+ /* YUV420 is requested, check availability of the
+ other video window */
+
+ layer->pix_fmt = *pixfmt;
+
+ /* Get osd layer config */
+ osd_device->ops.get_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+ /* Store the pixel format in the layer object */
+ cfg->xsize = pixfmt->width;
+ cfg->ysize = pixfmt->height;
+ cfg->line_length = pixfmt->bytesperline;
+ cfg->ypos = 0;
+ cfg->xpos = 0;
+ cfg->interlaced = vpbe_dev->current_timings.interlaced;
+
+ if (V4L2_PIX_FMT_UYVY == pixfmt->pixelformat)
+ cfg->pixfmt = PIXFMT_YCbCrI;
+
+ /* Change of the default pixel format for both video windows */
+ if (V4L2_PIX_FMT_NV12 == pixfmt->pixelformat) {
+ struct vpbe_layer *otherlayer;
+ cfg->pixfmt = PIXFMT_NV12;
+ otherlayer = _vpbe_display_get_other_win_layer(disp_dev,
+ layer);
+ otherlayer->layer_info.config.pixfmt = PIXFMT_NV12;
+ }
+
+ /* Set the layer config in the osd window */
+ ret = osd_device->ops.set_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in S_FMT params:\n");
+ return -EINVAL;
+ }
+
+ /* Readback and fill the local copy of current pix format */
+ osd_device->ops.get_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+
+ return 0;
+}
+
+static int vpbe_display_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_display *disp_dev = fh->disp_dev;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_TRY_FMT\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "invalid type\n");
+ return -EINVAL;
+ }
+
+ /* Check for valid field format */
+ return vpbe_try_format(disp_dev, pixfmt, 0);
+
+}
+
+/**
+ * vpbe_display_s_std - Set the given standard in the encoder
+ *
+ * Sets the standard if supported by the current encoder. Return the status.
+ * 0 - success & -EINVAL on error
+ */
+static int vpbe_display_s_std(struct file *file, void *priv,
+ v4l2_std_id *std_id)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_STD\n");
+
+ /* If streaming is started, return error */
+ if (layer->started) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
+ return -EBUSY;
+ }
+ if (NULL != vpbe_dev->ops.s_std) {
+ ret = vpbe_dev->ops.s_std(vpbe_dev, std_id);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Failed to set standard for sub devices\n");
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vpbe_display_g_std - Get the standard in the current encoder
+ *
+ * Get the standard in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int vpbe_display_g_std(struct file *file, void *priv,
+ v4l2_std_id *std_id)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_STD\n");
+
+ /* Get the standard from the current encoder */
+ if (vpbe_dev->current_timings.timings_type & VPBE_ENC_STD) {
+ *std_id = vpbe_dev->current_timings.timings.std_id;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * vpbe_display_enum_output - enumerate outputs
+ *
+ * Enumerates the outputs available at the vpbe display
+ * returns the status, -EINVAL if end of output list
+ */
+static int vpbe_display_enum_output(struct file *file, void *priv,
+ struct v4l2_output *output)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_OUTPUT\n");
+
+ /* Enumerate outputs */
+
+ if (NULL == vpbe_dev->ops.enum_outputs)
+ return -EINVAL;
+
+ ret = vpbe_dev->ops.enum_outputs(vpbe_dev, output);
+ if (ret) {
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "Failed to enumerate outputs\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vpbe_display_s_output - Set output to
+ * the output specified by the index
+ */
+static int vpbe_display_s_output(struct file *file, void *priv,
+ unsigned int i)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_OUTPUT\n");
+ /* If streaming is started, return error */
+ if (layer->started) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
+ return -EBUSY;
+ }
+ if (NULL == vpbe_dev->ops.set_output)
+ return -EINVAL;
+
+ ret = vpbe_dev->ops.set_output(vpbe_dev, i);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Failed to set output for sub devices\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vpbe_display_g_output - Get output from subdevice
+ * for a given by the index
+ */
+static int vpbe_display_g_output(struct file *file, void *priv,
+ unsigned int *i)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_OUTPUT\n");
+ /* Get the standard from the current encoder */
+ *i = vpbe_dev->current_out_index;
+
+ return 0;
+}
+
+/**
+ * vpbe_display_enum_dv_presets - Enumerate the dv presets
+ *
+ * enum the preset in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int
+vpbe_display_enum_dv_presets(struct file *file, void *priv,
+ struct v4l2_dv_enum_preset *preset)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_DV_PRESETS\n");
+
+ /* Enumerate outputs */
+ if (NULL == vpbe_dev->ops.enum_dv_presets)
+ return -EINVAL;
+
+ ret = vpbe_dev->ops.enum_dv_presets(vpbe_dev, preset);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Failed to enumerate dv presets info\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vpbe_display_s_dv_preset - Set the dv presets
+ *
+ * Set the preset in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int
+vpbe_display_s_dv_preset(struct file *file, void *priv,
+ struct v4l2_dv_preset *preset)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_DV_PRESETS\n");
+
+
+ /* If streaming is started, return error */
+ if (layer->started) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
+ return -EBUSY;
+ }
+
+ /* Set the given standard in the encoder */
+ if (NULL != vpbe_dev->ops.s_dv_preset)
+ return -EINVAL;
+
+ ret = vpbe_dev->ops.s_dv_preset(vpbe_dev, preset);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Failed to set the dv presets info\n");
+ return -EINVAL;
+ }
+ /* set the current norm to zero to be consistent. If STD is used
+ * v4l2 layer will set the norm properly on successful s_std call
+ */
+ layer->video_dev.current_norm = 0;
+
+ return 0;
+}
+
+/**
+ * vpbe_display_g_dv_preset - Set the dv presets
+ *
+ * Get the preset in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int
+vpbe_display_g_dv_preset(struct file *file, void *priv,
+ struct v4l2_dv_preset *dv_preset)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_DV_PRESETS\n");
+
+ /* Get the given standard in the encoder */
+
+ if (vpbe_dev->current_timings.timings_type &
+ VPBE_ENC_DV_PRESET) {
+ dv_preset->preset =
+ vpbe_dev->current_timings.timings.dv_preset;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vpbe_display_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type buf_type)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct osd_state *osd_device = fh->disp_dev->osd_device;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_STREAMOFF,layer id = %d\n",
+ layer->device_id);
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ /* If io is allowed for this file handle, return error */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
+ return -EACCES;
+ }
+
+ /* If streaming is not started, return error */
+ if (!layer->started) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "streaming not started in layer"
+ " id = %d\n", layer->device_id);
+ return -EINVAL;
+ }
+
+ osd_device->ops.disable_layer(osd_device,
+ layer->layer_info.id);
+ layer->started = 0;
+ ret = videobuf_streamoff(&layer->buffer_queue);
+
+ return ret;
+}
+
+static int vpbe_display_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type buf_type)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_display *disp_dev = fh->disp_dev;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct osd_state *osd_device = disp_dev->osd_device;
+ int ret;
+
+ osd_device->ops.disable_layer(osd_device,
+ layer->layer_info.id);
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_STREAMON, layerid=%d\n",
+ layer->device_id);
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ /* If file handle is not allowed IO, return error */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
+ return -EACCES;
+ }
+ /* If Streaming is already started, return error */
+ if (layer->started) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "layer is already streaming\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Call videobuf_streamon to start streaming
+ * in videobuf
+ */
+ ret = videobuf_streamon(&layer->buffer_queue);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "error in videobuf_streamon\n");
+ return ret;
+ }
+ /* If buffer queue is empty, return error */
+ if (list_empty(&layer->dma_queue)) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "buffer queue is empty\n");
+ goto streamoff;
+ }
+ /* Get the next frame from the buffer queue */
+ layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next,
+ struct videobuf_buffer, queue);
+ /* Remove buffer from the buffer queue */
+ list_del(&layer->cur_frm->queue);
+ /* Mark state of the current frame to active */
+ layer->cur_frm->state = VIDEOBUF_ACTIVE;
+ /* Initialize field_id and started member */
+ layer->field_id = 0;
+
+ /* Set parameters in OSD and VENC */
+ ret = vpbe_set_osd_display_params(disp_dev, layer);
+ if (ret < 0)
+ goto streamoff;
+
+ /*
+ * if request format is yuv420 semiplanar, need to
+ * enable both video windows
+ */
+ layer->started = 1;
+
+ layer->layer_first_int = 1;
+
+ return ret;
+streamoff:
+ ret = videobuf_streamoff(&layer->buffer_queue);
+ return ret;
+}
+
+static int vpbe_display_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_DQBUF, layer id = %d\n",
+ layer->device_id);
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+ /* If this file handle is not allowed to do IO, return error */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
+ return -EACCES;
+ }
+ if (file->f_flags & O_NONBLOCK)
+ /* Call videobuf_dqbuf for non blocking mode */
+ ret = videobuf_dqbuf(&layer->buffer_queue, buf, 1);
+ else
+ /* Call videobuf_dqbuf for blocking mode */
+ ret = videobuf_dqbuf(&layer->buffer_queue, buf, 0);
+
+ return ret;
+}
+
+static int vpbe_display_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_QBUF, layer id = %d\n",
+ layer->device_id);
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != p->type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ /* If this file handle is not allowed to do IO, return error */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
+ return -EACCES;
+ }
+
+ return videobuf_qbuf(&layer->buffer_queue, p);
+}
+
+static int vpbe_display_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_QUERYBUF, layer id = %d\n",
+ layer->device_id);
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ /* Call videobuf_querybuf to get information */
+ ret = videobuf_querybuf(&layer->buffer_queue, buf);
+
+ return ret;
+}
+
+static int vpbe_display_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *req_buf)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_reqbufs\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != req_buf->type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ /* If io users of the layer is not zero, return error */
+ if (0 != layer->io_usrs) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "not IO user\n");
+ return -EBUSY;
+ }
+ /* Initialize videobuf queue as per the buffer type */
+ videobuf_queue_dma_contig_init(&layer->buffer_queue,
+ &video_qops,
+ vpbe_dev->pdev,
+ &layer->irqlock,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT,
+ layer->pix_fmt.field,
+ sizeof(struct videobuf_buffer),
+ fh, NULL);
+
+ /* Set io allowed member of file handle to TRUE */
+ fh->io_allowed = 1;
+ /* Increment io usrs member of layer object to 1 */
+ layer->io_usrs = 1;
+ /* Store type of memory requested in layer object */
+ layer->memory = req_buf->memory;
+ /* Initialize buffer queue */
+ INIT_LIST_HEAD(&layer->dma_queue);
+ /* Allocate buffers */
+ ret = videobuf_reqbufs(&layer->buffer_queue, req_buf);
+
+ return ret;
+}
+
+/*
+ * vpbe_display_mmap()
+ * It is used to map kernel space buffers into user spaces
+ */
+static int vpbe_display_mmap(struct file *filep, struct vm_area_struct *vma)
+{
+ /* Get the layer object and file handle object */
+ struct vpbe_fh *fh = filep->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_mmap\n");
+
+ return videobuf_mmap_mapper(&layer->buffer_queue, vma);
+}
+
+/* vpbe_display_poll(): It is used for select/poll system call
+ */
+static unsigned int vpbe_display_poll(struct file *filep, poll_table *wait)
+{
+ struct vpbe_fh *fh = filep->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ unsigned int err = 0;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_poll\n");
+ if (layer->started)
+ err = videobuf_poll_stream(filep, &layer->buffer_queue, wait);
+ return err;
+}
+
+/*
+ * vpbe_display_open()
+ * It creates object of file handle structure and stores it in private_data
+ * member of filepointer
+ */
+static int vpbe_display_open(struct file *file)
+{
+ struct vpbe_fh *fh = NULL;
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_display *disp_dev = layer->disp_dev;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ struct osd_state *osd_device = disp_dev->osd_device;
+ int err;
+
+ /* Allocate memory for the file handle object */
+ fh = kmalloc(sizeof(struct vpbe_fh), GFP_KERNEL);
+ if (fh == NULL) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "unable to allocate memory for file handle object\n");
+ return -ENOMEM;
+ }
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "vpbe display open plane = %d\n",
+ layer->device_id);
+
+ /* store pointer to fh in private_data member of filep */
+ file->private_data = fh;
+ fh->layer = layer;
+ fh->disp_dev = disp_dev;
+
+ if (!layer->usrs) {
+
+ /* First claim the layer for this device */
+ err = osd_device->ops.request_layer(osd_device,
+ layer->layer_info.id);
+ if (err < 0) {
+ /* Couldn't get layer */
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Display Manager failed to allocate layer\n");
+ kfree(fh);
+ return -EINVAL;
+ }
+ }
+ /* Increment layer usrs counter */
+ layer->usrs++;
+ /* Set io_allowed member to false */
+ fh->io_allowed = 0;
+ /* Initialize priority of this instance to default priority */
+ fh->prio = V4L2_PRIORITY_UNSET;
+ v4l2_prio_open(&layer->prio, &fh->prio);
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "vpbe display device opened successfully\n");
+ return 0;
+}
+
+/*
+ * vpbe_display_release()
+ * This function deletes buffer queue, frees the buffers and the davinci
+ * display file * handle
+ */
+static int vpbe_display_release(struct file *file)
+{
+ /* Get the layer object and file handle object */
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct vpbe_display *disp_dev = fh->disp_dev;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ struct osd_state *osd_device = disp_dev->osd_device;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_release\n");
+
+ /* if this instance is doing IO */
+ if (fh->io_allowed) {
+ /* Reset io_usrs member of layer object */
+ layer->io_usrs = 0;
+
+ osd_device->ops.disable_layer(osd_device,
+ layer->layer_info.id);
+ layer->started = 0;
+ /* Free buffers allocated */
+ videobuf_queue_cancel(&layer->buffer_queue);
+ videobuf_mmap_free(&layer->buffer_queue);
+ }
+
+ /* Decrement layer usrs counter */
+ layer->usrs--;
+ /* If this file handle has initialize encoder device, reset it */
+ if (!layer->usrs) {
+ if (cfg->pixfmt == PIXFMT_NV12) {
+ struct vpbe_layer *otherlayer;
+ otherlayer =
+ _vpbe_display_get_other_win_layer(disp_dev, layer);
+ osd_device->ops.disable_layer(osd_device,
+ otherlayer->layer_info.id);
+ osd_device->ops.release_layer(osd_device,
+ otherlayer->layer_info.id);
+ }
+ osd_device->ops.disable_layer(osd_device,
+ layer->layer_info.id);
+ osd_device->ops.release_layer(osd_device,
+ layer->layer_info.id);
+ }
+ /* Close the priority */
+ v4l2_prio_close(&layer->prio, fh->prio);
+ file->private_data = NULL;
+
+ /* Free memory allocated to file handle object */
+ kfree(fh);
+
+ disp_dev->cbcr_ofst = 0;
+
+ return 0;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int vpbe_display_g_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg)
+{
+ struct v4l2_dbg_match *match = &reg->match;
+
+ if (match->type >= 2) {
+ v4l2_subdev_call(vpbe_dev->venc,
+ core,
+ g_register,
+ reg);
+ }
+
+ return 0;
+}
+
+static int vpbe_display_s_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg)
+{
+ return 0;
+}
+#endif
+
+/* vpbe capture ioctl operations */
+static const struct v4l2_ioctl_ops vpbe_ioctl_ops = {
+ .vidioc_querycap = vpbe_display_querycap,
+ .vidioc_g_fmt_vid_out = vpbe_display_g_fmt,
+ .vidioc_enum_fmt_vid_out = vpbe_display_enum_fmt,
+ .vidioc_s_fmt_vid_out = vpbe_display_s_fmt,
+ .vidioc_try_fmt_vid_out = vpbe_display_try_fmt,
+ .vidioc_reqbufs = vpbe_display_reqbufs,
+ .vidioc_querybuf = vpbe_display_querybuf,
+ .vidioc_qbuf = vpbe_display_qbuf,
+ .vidioc_dqbuf = vpbe_display_dqbuf,
+ .vidioc_streamon = vpbe_display_streamon,
+ .vidioc_streamoff = vpbe_display_streamoff,
+ .vidioc_cropcap = vpbe_display_cropcap,
+ .vidioc_g_crop = vpbe_display_g_crop,
+ .vidioc_s_crop = vpbe_display_s_crop,
+ .vidioc_g_priority = vpbe_display_g_priority,
+ .vidioc_s_priority = vpbe_display_s_priority,
+ .vidioc_s_std = vpbe_display_s_std,
+ .vidioc_g_std = vpbe_display_g_std,
+ .vidioc_enum_output = vpbe_display_enum_output,
+ .vidioc_s_output = vpbe_display_s_output,
+ .vidioc_g_output = vpbe_display_g_output,
+ .vidioc_s_dv_preset = vpbe_display_s_dv_preset,
+ .vidioc_g_dv_preset = vpbe_display_g_dv_preset,
+ .vidioc_enum_dv_presets = vpbe_display_enum_dv_presets,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = vpbe_display_g_register,
+ .vidioc_s_register = vpbe_display_s_register,
+#endif
+};
+
+static struct v4l2_file_operations vpbe_fops = {
+ .owner = THIS_MODULE,
+ .open = vpbe_display_open,
+ .release = vpbe_display_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vpbe_display_mmap,
+ .poll = vpbe_display_poll
+};
+
+static int vpbe_device_get(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct vpbe_display *vpbe_disp = data;
+
+ if (strcmp("vpbe_controller", pdev->name) == 0)
+ vpbe_disp->vpbe_dev = platform_get_drvdata(pdev);
+
+ if (strcmp("vpbe-osd", pdev->name) == 0)
+ vpbe_disp->osd_device = platform_get_drvdata(pdev);
+
+ return 0;
+}
+
+static __devinit int init_vpbe_layer(int i, struct vpbe_display *disp_dev,
+ struct platform_device *pdev)
+{
+ struct vpbe_layer *vpbe_display_layer = NULL;
+ struct video_device *vbd = NULL;
+
+ /* Allocate memory for four plane display objects */
+
+ disp_dev->dev[i] =
+ kzalloc(sizeof(struct vpbe_layer), GFP_KERNEL);
+
+ /* If memory allocation fails, return error */
+ if (!disp_dev->dev[i]) {
+ printk(KERN_ERR "ran out of memory\n");
+ return -ENOMEM;
+ }
+ spin_lock_init(&disp_dev->dev[i]->irqlock);
+ mutex_init(&disp_dev->dev[i]->opslock);
+
+ /* Get the pointer to the layer object */
+ vpbe_display_layer = disp_dev->dev[i];
+ vbd = &vpbe_display_layer->video_dev;
+ /* Initialize field of video device */
+ vbd->release = video_device_release_empty;
+ vbd->fops = &vpbe_fops;
+ vbd->ioctl_ops = &vpbe_ioctl_ops;
+ vbd->minor = -1;
+ vbd->v4l2_dev = &disp_dev->vpbe_dev->v4l2_dev;
+ vbd->lock = &vpbe_display_layer->opslock;
+
+ if (disp_dev->vpbe_dev->current_timings.timings_type &
+ VPBE_ENC_STD) {
+ vbd->tvnorms = (V4L2_STD_525_60 | V4L2_STD_625_50);
+ vbd->current_norm =
+ disp_dev->vpbe_dev->
+ current_timings.timings.std_id;
+ } else
+ vbd->current_norm = 0;
+
+ snprintf(vbd->name, sizeof(vbd->name),
+ "DaVinci_VPBE Display_DRIVER_V%d.%d.%d",
+ (VPBE_DISPLAY_VERSION_CODE >> 16) & 0xff,
+ (VPBE_DISPLAY_VERSION_CODE >> 8) & 0xff,
+ (VPBE_DISPLAY_VERSION_CODE) & 0xff);
+
+ vpbe_display_layer->device_id = i;
+
+ vpbe_display_layer->layer_info.id =
+ ((i == VPBE_DISPLAY_DEVICE_0) ? WIN_VID0 : WIN_VID1);
+
+ /* Initialize prio member of layer object */
+ v4l2_prio_init(&vpbe_display_layer->prio);
+
+ return 0;
+}
+
+static __devinit int register_device(struct vpbe_layer *vpbe_display_layer,
+ struct vpbe_display *disp_dev,
+ struct platform_device *pdev) {
+ int err;
+
+ v4l2_info(&disp_dev->vpbe_dev->v4l2_dev,
+ "Trying to register VPBE display device.\n");
+ v4l2_info(&disp_dev->vpbe_dev->v4l2_dev,
+ "layer=%x,layer->video_dev=%x\n",
+ (int)vpbe_display_layer,
+ (int)&vpbe_display_layer->video_dev);
+
+ err = video_register_device(&vpbe_display_layer->video_dev,
+ VFL_TYPE_GRABBER,
+ -1);
+ if (err)
+ return -ENODEV;
+
+ vpbe_display_layer->disp_dev = disp_dev;
+ /* set the driver data in platform device */
+ platform_set_drvdata(pdev, disp_dev);
+ video_set_drvdata(&vpbe_display_layer->video_dev,
+ vpbe_display_layer);
+
+ return 0;
+}
+
+
+
+/*
+ * vpbe_display_probe()
+ * This function creates device entries by register itself to the V4L2 driver
+ * and initializes fields of each layer objects
+ */
+static __devinit int vpbe_display_probe(struct platform_device *pdev)
+{
+ struct vpbe_layer *vpbe_display_layer;
+ struct vpbe_display *disp_dev;
+ struct resource *res = NULL;
+ int k;
+ int i;
+ int err;
+ int irq;
+
+ printk(KERN_DEBUG "vpbe_display_probe\n");
+ /* Allocate memory for vpbe_display */
+ disp_dev = kzalloc(sizeof(struct vpbe_display), GFP_KERNEL);
+ if (!disp_dev) {
+ printk(KERN_ERR "ran out of memory\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&disp_dev->dma_queue_lock);
+ /*
+ * Scan all the platform devices to find the vpbe
+ * controller device and get the vpbe_dev object
+ */
+ err = bus_for_each_dev(&platform_bus_type, NULL, disp_dev,
+ vpbe_device_get);
+ if (err < 0)
+ return err;
+ /* Initialize the vpbe display controller */
+ if (NULL != disp_dev->vpbe_dev->ops.initialize) {
+ err = disp_dev->vpbe_dev->ops.initialize(&pdev->dev,
+ disp_dev->vpbe_dev);
+ if (err) {
+ v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
+ "Error initing vpbe\n");
+ err = -ENOMEM;
+ goto probe_out;
+ }
+ }
+
+ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ if (init_vpbe_layer(i, disp_dev, pdev)) {
+ err = -ENODEV;
+ goto probe_out;
+ }
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
+ "Unable to get VENC interrupt resource\n");
+ err = -ENODEV;
+ goto probe_out;
+ }
+
+ irq = res->start;
+ if (request_irq(irq, venc_isr, IRQF_DISABLED, VPBE_DISPLAY_DRIVER,
+ disp_dev)) {
+ v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
+ "Unable to request interrupt\n");
+ err = -ENODEV;
+ goto probe_out;
+ }
+
+ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ if (register_device(disp_dev->dev[i], disp_dev, pdev)) {
+ err = -ENODEV;
+ goto probe_out;
+ }
+ }
+
+ printk(KERN_DEBUG "Successfully completed the probing of vpbe v4l2 device\n");
+ return 0;
+
+probe_out:
+ free_irq(res->start, disp_dev);
+ for (k = 0; k < VPBE_DISPLAY_MAX_DEVICES; k++) {
+ /* Get the pointer to the layer object */
+ vpbe_display_layer = disp_dev->dev[k];
+ /* Unregister video device */
+ if (vpbe_display_layer) {
+ video_unregister_device(
+ &vpbe_display_layer->video_dev);
+ kfree(disp_dev->dev[k]);
+ }
+ }
+ kfree(disp_dev);
+ return err;
+}
+
+/*
+ * vpbe_display_remove()
+ * It un-register hardware layer from V4L2 driver
+ */
+static int vpbe_display_remove(struct platform_device *pdev)
+{
+ struct vpbe_layer *vpbe_display_layer;
+ struct vpbe_display *disp_dev = platform_get_drvdata(pdev);
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ struct resource *res;
+ int i;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_remove\n");
+
+ /* unregister irq */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ free_irq(res->start, disp_dev);
+
+ /* deinitialize the vpbe display controller */
+ if (NULL != vpbe_dev->ops.deinitialize)
+ vpbe_dev->ops.deinitialize(&pdev->dev, vpbe_dev);
+ /* un-register device */
+ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ /* Get the pointer to the layer object */
+ vpbe_display_layer = disp_dev->dev[i];
+ /* Unregister video device */
+ video_unregister_device(&vpbe_display_layer->video_dev);
+
+ }
+ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ kfree(disp_dev->dev[i]);
+ disp_dev->dev[i] = NULL;
+ }
+
+ return 0;
+}
+
+static struct platform_driver vpbe_display_driver = {
+ .driver = {
+ .name = VPBE_DISPLAY_DRIVER,
+ .owner = THIS_MODULE,
+ .bus = &platform_bus_type,
+ },
+ .probe = vpbe_display_probe,
+ .remove = __devexit_p(vpbe_display_remove),
+};
+
+/*
+ * vpbe_display_init()
+ * This function registers device and driver to the kernel, requests irq
+ * handler and allocates memory for layer objects
+ */
+static __devinit int vpbe_display_init(void)
+{
+ int err;
+
+ printk(KERN_DEBUG "vpbe_display_init\n");
+
+ /* Register driver to the kernel */
+ err = platform_driver_register(&vpbe_display_driver);
+ if (0 != err)
+ return err;
+
+ printk(KERN_DEBUG "vpbe_display_init:"
+ "VPBE V4L2 Display Driver V1.0 loaded\n");
+ return 0;
+}
+
+/*
+ * vpbe_display_cleanup()
+ * This function un-registers device and driver to the kernel, frees requested
+ * irq handler and de-allocates memory allocated for layer objects.
+ */
+static void vpbe_display_cleanup(void)
+{
+ printk(KERN_DEBUG "vpbe_display_cleanup\n");
+
+ /* platform driver unregister */
+ platform_driver_unregister(&vpbe_display_driver);
+}
+
+/* Function for module initialization and cleanup */
+module_init(vpbe_display_init);
+module_exit(vpbe_display_cleanup);
+
+MODULE_DESCRIPTION("TI DM644x/DM355/DM365 VPBE Display controller");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/video/davinci/vpbe_osd.c b/drivers/media/video/davinci/vpbe_osd.c
new file mode 100644
index 0000000..5352884
--- /dev/null
+++ b/drivers/media/video/davinci/vpbe_osd.c
@@ -0,0 +1,1231 @@
+/*
+ * Copyright (C) 2007-2010 Texas Instruments Inc
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ *
+ * Andy Lowe (alowe@mvista.com), MontaVista Software
+ * - Initial version
+ * Murali Karicheri (mkaricheri@gmail.com), Texas Instruments Ltd.
+ * - ported to sub device interface
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+
+#include <mach/io.h>
+#include <mach/cputype.h>
+#include <mach/hardware.h>
+
+#include <media/davinci/vpss.h>
+#include <media/v4l2-device.h>
+#include <media/davinci/vpbe_types.h>
+#include <media/davinci/vpbe_osd.h>
+
+#include <linux/io.h>
+#include "vpbe_osd_regs.h"
+
+#define MODULE_NAME VPBE_OSD_SUBDEV_NAME
+
+/* register access routines */
+static inline u32 osd_read(struct osd_state *sd, u32 offset)
+{
+ struct osd_state *osd = sd;
+
+ return readl(osd->osd_base + offset);
+}
+
+static inline u32 osd_write(struct osd_state *sd, u32 val, u32 offset)
+{
+ struct osd_state *osd = sd;
+
+ writel(val, osd->osd_base + offset);
+
+ return val;
+}
+
+static inline u32 osd_set(struct osd_state *sd, u32 mask, u32 offset)
+{
+ struct osd_state *osd = sd;
+
+ u32 addr = osd->osd_base + offset;
+ u32 val = readl(addr) | mask;
+
+ writel(val, addr);
+
+ return val;
+}
+
+static inline u32 osd_clear(struct osd_state *sd, u32 mask, u32 offset)
+{
+ struct osd_state *osd = sd;
+
+ u32 addr = osd->osd_base + offset;
+ u32 val = readl(addr) & ~mask;
+
+ writel(val, addr);
+
+ return val;
+}
+
+static inline u32 osd_modify(struct osd_state *sd, u32 mask, u32 val,
+ u32 offset)
+{
+ struct osd_state *osd = sd;
+
+ u32 addr = osd->osd_base + offset;
+ u32 new_val = (readl(addr) & ~mask) | (val & mask);
+
+ writel(new_val, addr);
+
+ return new_val;
+}
+
+/* define some macros for layer and pixfmt classification */
+#define is_osd_win(layer) (((layer) == WIN_OSD0) || ((layer) == WIN_OSD1))
+#define is_vid_win(layer) (((layer) == WIN_VID0) || ((layer) == WIN_VID1))
+#define is_rgb_pixfmt(pixfmt) \
+ (((pixfmt) == PIXFMT_RGB565) || ((pixfmt) == PIXFMT_RGB888))
+#define is_yc_pixfmt(pixfmt) \
+ (((pixfmt) == PIXFMT_YCbCrI) || ((pixfmt) == PIXFMT_YCrCbI) || \
+ ((pixfmt) == PIXFMT_NV12))
+#define MAX_WIN_SIZE OSD_VIDWIN0XP_V0X
+#define MAX_LINE_LENGTH (OSD_VIDWIN0OFST_V0LO << 5)
+
+/**
+ * _osd_dm6446_vid0_pingpong() - field inversion fix for DM6446
+ * @sd - ptr to struct osd_state
+ * @field_inversion - inversion flag
+ * @fb_base_phys - frame buffer address
+ * @lconfig - ptr to layer config
+ *
+ * This routine implements a workaround for the field signal inversion silicon
+ * erratum described in Advisory 1.3.8 for the DM6446. The fb_base_phys and
+ * lconfig parameters apply to the vid0 window. This routine should be called
+ * whenever the vid0 layer configuration or start address is modified, or when
+ * the OSD field inversion setting is modified.
+ * Returns: 1 if the ping-pong buffers need to be toggled in the vsync isr, or
+ * 0 otherwise
+ */
+static int _osd_dm6446_vid0_pingpong(struct osd_state *sd,
+ int field_inversion,
+ unsigned long fb_base_phys,
+ const struct osd_layer_config *lconfig)
+{
+ struct osd_platform_data *pdata;
+
+ pdata = (struct osd_platform_data *)sd->dev->platform_data;
+ if (pdata->field_inv_wa_enable) {
+
+ if (!field_inversion || !lconfig->interlaced) {
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN0ADR);
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_PPVWIN0ADR);
+ osd_modify(sd, OSD_MISCCTL_PPSW | OSD_MISCCTL_PPRV, 0,
+ OSD_MISCCTL);
+ return 0;
+ } else {
+ unsigned miscctl = OSD_MISCCTL_PPRV;
+
+ osd_write(sd,
+ (fb_base_phys & ~0x1F) - lconfig->line_length,
+ OSD_VIDWIN0ADR);
+ osd_write(sd,
+ (fb_base_phys & ~0x1F) + lconfig->line_length,
+ OSD_PPVWIN0ADR);
+ osd_modify(sd,
+ OSD_MISCCTL_PPSW | OSD_MISCCTL_PPRV, miscctl,
+ OSD_MISCCTL);
+
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static void _osd_set_field_inversion(struct osd_state *sd, int enable)
+{
+ unsigned fsinv = 0;
+
+ if (enable)
+ fsinv = OSD_MODE_FSINV;
+
+ osd_modify(sd, OSD_MODE_FSINV, fsinv, OSD_MODE);
+}
+
+static void _osd_set_blink_attribute(struct osd_state *sd, int enable,
+ enum osd_blink_interval blink)
+{
+ u32 osdatrmd = 0;
+
+ if (enable) {
+ osdatrmd |= OSD_OSDATRMD_BLNK;
+ osdatrmd |= blink << OSD_OSDATRMD_BLNKINT_SHIFT;
+ }
+ /* caller must ensure that OSD1 is configured in attribute mode */
+ osd_modify(sd, OSD_OSDATRMD_BLNKINT | OSD_OSDATRMD_BLNK, osdatrmd,
+ OSD_OSDATRMD);
+}
+
+static void _osd_set_rom_clut(struct osd_state *sd,
+ enum osd_rom_clut rom_clut)
+{
+ if (rom_clut == ROM_CLUT0)
+ osd_clear(sd, OSD_MISCCTL_RSEL, OSD_MISCCTL);
+ else
+ osd_set(sd, OSD_MISCCTL_RSEL, OSD_MISCCTL);
+}
+
+static void _osd_set_palette_map(struct osd_state *sd,
+ enum osd_win_layer osdwin,
+ unsigned char pixel_value,
+ unsigned char clut_index,
+ enum osd_pix_format pixfmt)
+{
+ static const int map_2bpp[] = { 0, 5, 10, 15 };
+ static const int map_1bpp[] = { 0, 15 };
+ int bmp_offset;
+ int bmp_shift;
+ int bmp_mask;
+ int bmp_reg;
+
+ switch (pixfmt) {
+ case PIXFMT_1BPP:
+ bmp_reg = map_1bpp[pixel_value & 0x1];
+ break;
+ case PIXFMT_2BPP:
+ bmp_reg = map_2bpp[pixel_value & 0x3];
+ break;
+ case PIXFMT_4BPP:
+ bmp_reg = pixel_value & 0xf;
+ break;
+ default:
+ return;
+ }
+
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ bmp_offset = OSD_W0BMP01 + (bmp_reg >> 1) * sizeof(u32);
+ break;
+ case OSDWIN_OSD1:
+ bmp_offset = OSD_W1BMP01 + (bmp_reg >> 1) * sizeof(u32);
+ break;
+ default:
+ return;
+ }
+
+ if (bmp_reg & 1) {
+ bmp_shift = 8;
+ bmp_mask = 0xff << 8;
+ } else {
+ bmp_shift = 0;
+ bmp_mask = 0xff;
+ }
+
+ osd_modify(sd, bmp_mask, clut_index << bmp_shift, bmp_offset);
+}
+
+static void _osd_set_rec601_attenuation(struct osd_state *sd,
+ enum osd_win_layer osdwin, int enable)
+{
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_modify(sd, OSD_OSDWIN0MD_ATN0E,
+ enable ? OSD_OSDWIN0MD_ATN0E : 0,
+ OSD_OSDWIN0MD);
+ break;
+ case OSDWIN_OSD1:
+ osd_modify(sd, OSD_OSDWIN1MD_ATN1E,
+ enable ? OSD_OSDWIN1MD_ATN1E : 0,
+ OSD_OSDWIN1MD);
+ break;
+ }
+}
+
+static void _osd_set_blending_factor(struct osd_state *sd,
+ enum osd_win_layer osdwin,
+ enum osd_blending_factor blend)
+{
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_modify(sd, OSD_OSDWIN0MD_BLND0,
+ blend << OSD_OSDWIN0MD_BLND0_SHIFT, OSD_OSDWIN0MD);
+ break;
+ case OSDWIN_OSD1:
+ osd_modify(sd, OSD_OSDWIN1MD_BLND1,
+ blend << OSD_OSDWIN1MD_BLND1_SHIFT, OSD_OSDWIN1MD);
+ break;
+ }
+}
+
+static void _osd_enable_color_key(struct osd_state *sd,
+ enum osd_win_layer osdwin,
+ unsigned colorkey,
+ enum osd_pix_format pixfmt)
+{
+ switch (pixfmt) {
+ case PIXFMT_RGB565:
+ osd_write(sd, colorkey & OSD_TRANSPVAL_RGBTRANS,
+ OSD_TRANSPVAL);
+ break;
+ default:
+ break;
+ }
+
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_set(sd, OSD_OSDWIN0MD_TE0, OSD_OSDWIN0MD);
+ break;
+ case OSDWIN_OSD1:
+ osd_set(sd, OSD_OSDWIN1MD_TE1, OSD_OSDWIN1MD);
+ break;
+ }
+}
+
+static void _osd_disable_color_key(struct osd_state *sd,
+ enum osd_win_layer osdwin)
+{
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_clear(sd, OSD_OSDWIN0MD_TE0, OSD_OSDWIN0MD);
+ break;
+ case OSDWIN_OSD1:
+ osd_clear(sd, OSD_OSDWIN1MD_TE1, OSD_OSDWIN1MD);
+ break;
+ }
+}
+
+static void _osd_set_osd_clut(struct osd_state *sd,
+ enum osd_win_layer osdwin,
+ enum osd_clut clut)
+{
+ u32 winmd = 0;
+
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ if (clut == RAM_CLUT)
+ winmd |= OSD_OSDWIN0MD_CLUTS0;
+ osd_modify(sd, OSD_OSDWIN0MD_CLUTS0, winmd, OSD_OSDWIN0MD);
+ break;
+ case OSDWIN_OSD1:
+ if (clut == RAM_CLUT)
+ winmd |= OSD_OSDWIN1MD_CLUTS1;
+ osd_modify(sd, OSD_OSDWIN1MD_CLUTS1, winmd, OSD_OSDWIN1MD);
+ break;
+ }
+}
+
+static void _osd_set_zoom(struct osd_state *sd, enum osd_layer layer,
+ enum osd_zoom_factor h_zoom,
+ enum osd_zoom_factor v_zoom)
+{
+ u32 winmd = 0;
+
+ switch (layer) {
+ case WIN_OSD0:
+ winmd |= (h_zoom << OSD_OSDWIN0MD_OHZ0_SHIFT);
+ winmd |= (v_zoom << OSD_OSDWIN0MD_OVZ0_SHIFT);
+ osd_modify(sd, OSD_OSDWIN0MD_OHZ0 | OSD_OSDWIN0MD_OVZ0, winmd,
+ OSD_OSDWIN0MD);
+ break;
+ case WIN_VID0:
+ winmd |= (h_zoom << OSD_VIDWINMD_VHZ0_SHIFT);
+ winmd |= (v_zoom << OSD_VIDWINMD_VVZ0_SHIFT);
+ osd_modify(sd, OSD_VIDWINMD_VHZ0 | OSD_VIDWINMD_VVZ0, winmd,
+ OSD_VIDWINMD);
+ break;
+ case WIN_OSD1:
+ winmd |= (h_zoom << OSD_OSDWIN1MD_OHZ1_SHIFT);
+ winmd |= (v_zoom << OSD_OSDWIN1MD_OVZ1_SHIFT);
+ osd_modify(sd, OSD_OSDWIN1MD_OHZ1 | OSD_OSDWIN1MD_OVZ1, winmd,
+ OSD_OSDWIN1MD);
+ break;
+ case WIN_VID1:
+ winmd |= (h_zoom << OSD_VIDWINMD_VHZ1_SHIFT);
+ winmd |= (v_zoom << OSD_VIDWINMD_VVZ1_SHIFT);
+ osd_modify(sd, OSD_VIDWINMD_VHZ1 | OSD_VIDWINMD_VVZ1, winmd,
+ OSD_VIDWINMD);
+ break;
+ }
+}
+
+static void _osd_disable_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ switch (layer) {
+ case WIN_OSD0:
+ osd_clear(sd, OSD_OSDWIN0MD_OACT0, OSD_OSDWIN0MD);
+ break;
+ case WIN_VID0:
+ osd_clear(sd, OSD_VIDWINMD_ACT0, OSD_VIDWINMD);
+ break;
+ case WIN_OSD1:
+ /* disable attribute mode as well as disabling the window */
+ osd_clear(sd, OSD_OSDWIN1MD_OASW | OSD_OSDWIN1MD_OACT1,
+ OSD_OSDWIN1MD);
+ break;
+ case WIN_VID1:
+ osd_clear(sd, OSD_VIDWINMD_ACT1, OSD_VIDWINMD);
+ break;
+ }
+}
+
+static void osd_disable_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ if (!win->is_enabled) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return;
+ }
+ win->is_enabled = 0;
+
+ _osd_disable_layer(sd, layer);
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+static void _osd_enable_attribute_mode(struct osd_state *sd)
+{
+ /* enable attribute mode for OSD1 */
+ osd_set(sd, OSD_OSDWIN1MD_OASW, OSD_OSDWIN1MD);
+}
+
+static void _osd_enable_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ switch (layer) {
+ case WIN_OSD0:
+ osd_set(sd, OSD_OSDWIN0MD_OACT0, OSD_OSDWIN0MD);
+ break;
+ case WIN_VID0:
+ osd_set(sd, OSD_VIDWINMD_ACT0, OSD_VIDWINMD);
+ break;
+ case WIN_OSD1:
+ /* enable OSD1 and disable attribute mode */
+ osd_modify(sd, OSD_OSDWIN1MD_OASW | OSD_OSDWIN1MD_OACT1,
+ OSD_OSDWIN1MD_OACT1, OSD_OSDWIN1MD);
+ break;
+ case WIN_VID1:
+ osd_set(sd, OSD_VIDWINMD_ACT1, OSD_VIDWINMD);
+ break;
+ }
+}
+
+static int osd_enable_layer(struct osd_state *sd, enum osd_layer layer,
+ int otherwin)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ struct osd_layer_config *cfg = &win->lconfig;
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ /*
+ * use otherwin flag to know this is the other vid window
+ * in YUV420 mode, if is, skip this check
+ */
+ if (!otherwin && (!win->is_allocated ||
+ !win->fb_base_phys ||
+ !cfg->line_length ||
+ !cfg->xsize ||
+ !cfg->ysize)) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return -1;
+ }
+
+ if (win->is_enabled) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return 0;
+ }
+ win->is_enabled = 1;
+
+ if (cfg->pixfmt != PIXFMT_OSD_ATTR)
+ _osd_enable_layer(sd, layer);
+ else {
+ _osd_enable_attribute_mode(sd);
+ _osd_set_blink_attribute(sd, osd->is_blinking, osd->blink);
+ }
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+
+ return 0;
+}
+
+static void _osd_start_layer(struct osd_state *sd, enum osd_layer layer,
+ unsigned long fb_base_phys,
+ unsigned long cbcr_ofst)
+{
+ switch (layer) {
+ case WIN_OSD0:
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_OSDWIN0ADR);
+ break;
+ case WIN_VID0:
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN0ADR);
+ break;
+ case WIN_OSD1:
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_OSDWIN1ADR);
+ break;
+ case WIN_VID1:
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN1ADR);
+ break;
+ }
+}
+
+static void osd_start_layer(struct osd_state *sd, enum osd_layer layer,
+ unsigned long fb_base_phys,
+ unsigned long cbcr_ofst)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ struct osd_layer_config *cfg = &win->lconfig;
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ win->fb_base_phys = fb_base_phys & ~0x1F;
+ _osd_start_layer(sd, layer, fb_base_phys, cbcr_ofst);
+
+ if (layer == WIN_VID0) {
+ osd->pingpong =
+ _osd_dm6446_vid0_pingpong(sd, osd->field_inversion,
+ win->fb_base_phys,
+ cfg);
+ }
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+static void osd_get_layer_config(struct osd_state *sd, enum osd_layer layer,
+ struct osd_layer_config *lconfig)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ *lconfig = win->lconfig;
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+/**
+ * try_layer_config() - Try a specific configuration for the layer
+ * @sd - ptr to struct osd_state
+ * @layer - layer to configure
+ * @lconfig - layer configuration to try
+ *
+ * If the requested lconfig is completely rejected and the value of lconfig on
+ * exit is the current lconfig, then try_layer_config() returns 1. Otherwise,
+ * try_layer_config() returns 0. A return value of 0 does not necessarily mean
+ * that the value of lconfig on exit is identical to the value of lconfig on
+ * entry, but merely that it represents a change from the current lconfig.
+ */
+static int try_layer_config(struct osd_state *sd, enum osd_layer layer,
+ struct osd_layer_config *lconfig)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ int bad_config;
+
+ /* verify that the pixel format is compatible with the layer */
+ switch (lconfig->pixfmt) {
+ case PIXFMT_1BPP:
+ case PIXFMT_2BPP:
+ case PIXFMT_4BPP:
+ case PIXFMT_8BPP:
+ case PIXFMT_RGB565:
+ bad_config = !is_osd_win(layer);
+ break;
+ case PIXFMT_YCbCrI:
+ case PIXFMT_YCrCbI:
+ bad_config = !is_vid_win(layer);
+ break;
+ case PIXFMT_RGB888:
+ bad_config = !is_vid_win(layer);
+ break;
+ case PIXFMT_NV12:
+ bad_config = 1;
+ break;
+ case PIXFMT_OSD_ATTR:
+ bad_config = (layer != WIN_OSD1);
+ break;
+ default:
+ bad_config = 1;
+ break;
+ }
+ if (bad_config) {
+ /*
+ * The requested pixel format is incompatible with the layer,
+ * so keep the current layer configuration.
+ */
+ *lconfig = win->lconfig;
+ return bad_config;
+ }
+
+ /* DM6446: */
+ /* only one OSD window at a time can use RGB pixel formats */
+ if (is_osd_win(layer) && is_rgb_pixfmt(lconfig->pixfmt)) {
+ enum osd_pix_format pixfmt;
+ if (layer == WIN_OSD0)
+ pixfmt = osd->win[WIN_OSD1].lconfig.pixfmt;
+ else
+ pixfmt = osd->win[WIN_OSD0].lconfig.pixfmt;
+
+ if (is_rgb_pixfmt(pixfmt)) {
+ /*
+ * The other OSD window is already configured for an
+ * RGB, so keep the current layer configuration.
+ */
+ *lconfig = win->lconfig;
+ return 1;
+ }
+ }
+
+ /* DM6446: only one video window at a time can use RGB888 */
+ if (is_vid_win(layer) && lconfig->pixfmt == PIXFMT_RGB888) {
+ enum osd_pix_format pixfmt;
+
+ if (layer == WIN_VID0)
+ pixfmt = osd->win[WIN_VID1].lconfig.pixfmt;
+ else
+ pixfmt = osd->win[WIN_VID0].lconfig.pixfmt;
+
+ if (pixfmt == PIXFMT_RGB888) {
+ /*
+ * The other video window is already configured for
+ * RGB888, so keep the current layer configuration.
+ */
+ *lconfig = win->lconfig;
+ return 1;
+ }
+ }
+
+ /* window dimensions must be non-zero */
+ if (!lconfig->line_length || !lconfig->xsize || !lconfig->ysize) {
+ *lconfig = win->lconfig;
+ return 1;
+ }
+
+ /* round line_length up to a multiple of 32 */
+ lconfig->line_length = ((lconfig->line_length + 31) / 32) * 32;
+ lconfig->line_length =
+ min(lconfig->line_length, (unsigned)MAX_LINE_LENGTH);
+ lconfig->xsize = min(lconfig->xsize, (unsigned)MAX_WIN_SIZE);
+ lconfig->ysize = min(lconfig->ysize, (unsigned)MAX_WIN_SIZE);
+ lconfig->xpos = min(lconfig->xpos, (unsigned)MAX_WIN_SIZE);
+ lconfig->ypos = min(lconfig->ypos, (unsigned)MAX_WIN_SIZE);
+ lconfig->interlaced = (lconfig->interlaced != 0);
+ if (lconfig->interlaced) {
+ /* ysize and ypos must be even for interlaced displays */
+ lconfig->ysize &= ~1;
+ lconfig->ypos &= ~1;
+ }
+
+ return 0;
+}
+
+static void _osd_disable_vid_rgb888(struct osd_state *sd)
+{
+ /*
+ * The DM6446 supports RGB888 pixel format in a single video window.
+ * This routine disables RGB888 pixel format for both video windows.
+ * The caller must ensure that neither video window is currently
+ * configured for RGB888 pixel format.
+ */
+ osd_clear(sd, OSD_MISCCTL_RGBEN, OSD_MISCCTL);
+}
+
+static void _osd_enable_vid_rgb888(struct osd_state *sd,
+ enum osd_layer layer)
+{
+ /*
+ * The DM6446 supports RGB888 pixel format in a single video window.
+ * This routine enables RGB888 pixel format for the specified video
+ * window. The caller must ensure that the other video window is not
+ * currently configured for RGB888 pixel format, as this routine will
+ * disable RGB888 pixel format for the other window.
+ */
+ if (layer == WIN_VID0) {
+ osd_modify(sd, OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
+ OSD_MISCCTL_RGBEN, OSD_MISCCTL);
+ } else if (layer == WIN_VID1) {
+ osd_modify(sd, OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
+ OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
+ OSD_MISCCTL);
+ }
+}
+
+static void _osd_set_cbcr_order(struct osd_state *sd,
+ enum osd_pix_format pixfmt)
+{
+ /*
+ * The caller must ensure that all windows using YC pixfmt use the same
+ * Cb/Cr order.
+ */
+ if (pixfmt == PIXFMT_YCbCrI)
+ osd_clear(sd, OSD_MODE_CS, OSD_MODE);
+ else if (pixfmt == PIXFMT_YCrCbI)
+ osd_set(sd, OSD_MODE_CS, OSD_MODE);
+}
+
+static void _osd_set_layer_config(struct osd_state *sd, enum osd_layer layer,
+ const struct osd_layer_config *lconfig)
+{
+ u32 winmd = 0, winmd_mask = 0, bmw = 0;
+
+ _osd_set_cbcr_order(sd, lconfig->pixfmt);
+
+ switch (layer) {
+ case WIN_OSD0:
+ winmd_mask |= OSD_OSDWIN0MD_RGB0E;
+ if (lconfig->pixfmt == PIXFMT_RGB565)
+ winmd |= OSD_OSDWIN0MD_RGB0E;
+
+ winmd_mask |= OSD_OSDWIN0MD_BMW0 | OSD_OSDWIN0MD_OFF0;
+
+ switch (lconfig->pixfmt) {
+ case PIXFMT_1BPP:
+ bmw = 0;
+ break;
+ case PIXFMT_2BPP:
+ bmw = 1;
+ break;
+ case PIXFMT_4BPP:
+ bmw = 2;
+ break;
+ case PIXFMT_8BPP:
+ bmw = 3;
+ break;
+ default:
+ break;
+ }
+ winmd |= (bmw << OSD_OSDWIN0MD_BMW0_SHIFT);
+
+ if (lconfig->interlaced)
+ winmd |= OSD_OSDWIN0MD_OFF0;
+
+ osd_modify(sd, winmd_mask, winmd, OSD_OSDWIN0MD);
+ osd_write(sd, lconfig->line_length >> 5, OSD_OSDWIN0OFST);
+ osd_write(sd, lconfig->xpos, OSD_OSDWIN0XP);
+ osd_write(sd, lconfig->xsize, OSD_OSDWIN0XL);
+ if (lconfig->interlaced) {
+ osd_write(sd, lconfig->ypos >> 1, OSD_OSDWIN0YP);
+ osd_write(sd, lconfig->ysize >> 1, OSD_OSDWIN0YL);
+ } else {
+ osd_write(sd, lconfig->ypos, OSD_OSDWIN0YP);
+ osd_write(sd, lconfig->ysize, OSD_OSDWIN0YL);
+ }
+ break;
+ case WIN_VID0:
+ winmd_mask |= OSD_VIDWINMD_VFF0;
+ if (lconfig->interlaced)
+ winmd |= OSD_VIDWINMD_VFF0;
+
+ osd_modify(sd, winmd_mask, winmd, OSD_VIDWINMD);
+ osd_write(sd, lconfig->line_length >> 5, OSD_VIDWIN0OFST);
+ osd_write(sd, lconfig->xpos, OSD_VIDWIN0XP);
+ osd_write(sd, lconfig->xsize, OSD_VIDWIN0XL);
+ /*
+ * For YUV420P format the register contents are
+ * duplicated in both VID registers
+ */
+ if (lconfig->interlaced) {
+ osd_write(sd, lconfig->ypos >> 1, OSD_VIDWIN0YP);
+ osd_write(sd, lconfig->ysize >> 1, OSD_VIDWIN0YL);
+ } else {
+ osd_write(sd, lconfig->ypos, OSD_VIDWIN0YP);
+ osd_write(sd, lconfig->ysize, OSD_VIDWIN0YL);
+ }
+ break;
+ case WIN_OSD1:
+ /*
+ * The caller must ensure that OSD1 is disabled prior to
+ * switching from a normal mode to attribute mode or from
+ * attribute mode to a normal mode.
+ */
+ if (lconfig->pixfmt == PIXFMT_OSD_ATTR) {
+ winmd_mask |=
+ OSD_OSDWIN1MD_ATN1E | OSD_OSDWIN1MD_RGB1E |
+ OSD_OSDWIN1MD_CLUTS1 |
+ OSD_OSDWIN1MD_BLND1 | OSD_OSDWIN1MD_TE1;
+ } else {
+ winmd_mask |= OSD_OSDWIN1MD_RGB1E;
+ if (lconfig->pixfmt == PIXFMT_RGB565)
+ winmd |= OSD_OSDWIN1MD_RGB1E;
+
+ winmd_mask |= OSD_OSDWIN1MD_BMW1;
+ switch (lconfig->pixfmt) {
+ case PIXFMT_1BPP:
+ bmw = 0;
+ break;
+ case PIXFMT_2BPP:
+ bmw = 1;
+ break;
+ case PIXFMT_4BPP:
+ bmw = 2;
+ break;
+ case PIXFMT_8BPP:
+ bmw = 3;
+ break;
+ default:
+ break;
+ }
+ winmd |= (bmw << OSD_OSDWIN1MD_BMW1_SHIFT);
+ }
+
+ winmd_mask |= OSD_OSDWIN1MD_OFF1;
+ if (lconfig->interlaced)
+ winmd |= OSD_OSDWIN1MD_OFF1;
+
+ osd_modify(sd, winmd_mask, winmd, OSD_OSDWIN1MD);
+ osd_write(sd, lconfig->line_length >> 5, OSD_OSDWIN1OFST);
+ osd_write(sd, lconfig->xpos, OSD_OSDWIN1XP);
+ osd_write(sd, lconfig->xsize, OSD_OSDWIN1XL);
+ if (lconfig->interlaced) {
+ osd_write(sd, lconfig->ypos >> 1, OSD_OSDWIN1YP);
+ osd_write(sd, lconfig->ysize >> 1, OSD_OSDWIN1YL);
+ } else {
+ osd_write(sd, lconfig->ypos, OSD_OSDWIN1YP);
+ osd_write(sd, lconfig->ysize, OSD_OSDWIN1YL);
+ }
+ break;
+ case WIN_VID1:
+ winmd_mask |= OSD_VIDWINMD_VFF1;
+ if (lconfig->interlaced)
+ winmd |= OSD_VIDWINMD_VFF1;
+
+ osd_modify(sd, winmd_mask, winmd, OSD_VIDWINMD);
+ osd_write(sd, lconfig->line_length >> 5, OSD_VIDWIN1OFST);
+ osd_write(sd, lconfig->xpos, OSD_VIDWIN1XP);
+ osd_write(sd, lconfig->xsize, OSD_VIDWIN1XL);
+ /*
+ * For YUV420P format the register contents are
+ * duplicated in both VID registers
+ */
+ osd_modify(sd, OSD_MISCCTL_S420D, ~OSD_MISCCTL_S420D,
+ OSD_MISCCTL);
+
+ if (lconfig->interlaced) {
+ osd_write(sd, lconfig->ypos >> 1, OSD_VIDWIN1YP);
+ osd_write(sd, lconfig->ysize >> 1, OSD_VIDWIN1YL);
+ } else {
+ osd_write(sd, lconfig->ypos, OSD_VIDWIN1YP);
+ osd_write(sd, lconfig->ysize, OSD_VIDWIN1YL);
+ }
+ break;
+ }
+}
+
+static int osd_set_layer_config(struct osd_state *sd, enum osd_layer layer,
+ struct osd_layer_config *lconfig)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ struct osd_layer_config *cfg = &win->lconfig;
+ unsigned long flags;
+ int reject_config;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ reject_config = try_layer_config(sd, layer, lconfig);
+ if (reject_config) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return reject_config;
+ }
+
+ /* update the current Cb/Cr order */
+ if (is_yc_pixfmt(lconfig->pixfmt))
+ osd->yc_pixfmt = lconfig->pixfmt;
+
+ /*
+ * If we are switching OSD1 from normal mode to attribute mode or from
+ * attribute mode to normal mode, then we must disable the window.
+ */
+ if (layer == WIN_OSD1) {
+ if (((lconfig->pixfmt == PIXFMT_OSD_ATTR) &&
+ (cfg->pixfmt != PIXFMT_OSD_ATTR)) ||
+ ((lconfig->pixfmt != PIXFMT_OSD_ATTR) &&
+ (cfg->pixfmt == PIXFMT_OSD_ATTR))) {
+ win->is_enabled = 0;
+ _osd_disable_layer(sd, layer);
+ }
+ }
+
+ _osd_set_layer_config(sd, layer, lconfig);
+
+ if (layer == WIN_OSD1) {
+ struct osd_osdwin_state *osdwin_state =
+ &osd->osdwin[OSDWIN_OSD1];
+
+ if ((lconfig->pixfmt != PIXFMT_OSD_ATTR) &&
+ (cfg->pixfmt == PIXFMT_OSD_ATTR)) {
+ /*
+ * We just switched OSD1 from attribute mode to normal
+ * mode, so we must initialize the CLUT select, the
+ * blend factor, transparency colorkey enable, and
+ * attenuation enable (DM6446 only) bits in the
+ * OSDWIN1MD register.
+ */
+ _osd_set_osd_clut(sd, OSDWIN_OSD1,
+ osdwin_state->clut);
+ _osd_set_blending_factor(sd, OSDWIN_OSD1,
+ osdwin_state->blend);
+ if (osdwin_state->colorkey_blending) {
+ _osd_enable_color_key(sd, OSDWIN_OSD1,
+ osdwin_state->
+ colorkey,
+ lconfig->pixfmt);
+ } else
+ _osd_disable_color_key(sd, OSDWIN_OSD1);
+ _osd_set_rec601_attenuation(sd, OSDWIN_OSD1,
+ osdwin_state->
+ rec601_attenuation);
+ } else if ((lconfig->pixfmt == PIXFMT_OSD_ATTR) &&
+ (cfg->pixfmt != PIXFMT_OSD_ATTR)) {
+ /*
+ * We just switched OSD1 from normal mode to attribute
+ * mode, so we must initialize the blink enable and
+ * blink interval bits in the OSDATRMD register.
+ */
+ _osd_set_blink_attribute(sd, osd->is_blinking,
+ osd->blink);
+ }
+ }
+
+ /*
+ * If we just switched to a 1-, 2-, or 4-bits-per-pixel bitmap format
+ * then configure a default palette map.
+ */
+ if ((lconfig->pixfmt != cfg->pixfmt) &&
+ ((lconfig->pixfmt == PIXFMT_1BPP) ||
+ (lconfig->pixfmt == PIXFMT_2BPP) ||
+ (lconfig->pixfmt == PIXFMT_4BPP))) {
+ enum osd_win_layer osdwin =
+ ((layer == WIN_OSD0) ? OSDWIN_OSD0 : OSDWIN_OSD1);
+ struct osd_osdwin_state *osdwin_state =
+ &osd->osdwin[osdwin];
+ unsigned char clut_index;
+ unsigned char clut_entries = 0;
+
+ switch (lconfig->pixfmt) {
+ case PIXFMT_1BPP:
+ clut_entries = 2;
+ break;
+ case PIXFMT_2BPP:
+ clut_entries = 4;
+ break;
+ case PIXFMT_4BPP:
+ clut_entries = 16;
+ break;
+ default:
+ break;
+ }
+ /*
+ * The default palette map maps the pixel value to the clut
+ * index, i.e. pixel value 0 maps to clut entry 0, pixel value
+ * 1 maps to clut entry 1, etc.
+ */
+ for (clut_index = 0; clut_index < 16; clut_index++) {
+ osdwin_state->palette_map[clut_index] = clut_index;
+ if (clut_index < clut_entries) {
+ _osd_set_palette_map(sd, osdwin, clut_index,
+ clut_index,
+ lconfig->pixfmt);
+ }
+ }
+ }
+
+ *cfg = *lconfig;
+ /* DM6446: configure the RGB888 enable and window selection */
+ if (osd->win[WIN_VID0].lconfig.pixfmt == PIXFMT_RGB888)
+ _osd_enable_vid_rgb888(sd, WIN_VID0);
+ else if (osd->win[WIN_VID1].lconfig.pixfmt == PIXFMT_RGB888)
+ _osd_enable_vid_rgb888(sd, WIN_VID1);
+ else
+ _osd_disable_vid_rgb888(sd);
+
+ if (layer == WIN_VID0) {
+ osd->pingpong =
+ _osd_dm6446_vid0_pingpong(sd, osd->field_inversion,
+ win->fb_base_phys,
+ cfg);
+ }
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+
+ return 0;
+}
+
+static void osd_init_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ enum osd_win_layer osdwin;
+ struct osd_osdwin_state *osdwin_state;
+ struct osd_layer_config *cfg = &win->lconfig;
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ win->is_enabled = 0;
+ _osd_disable_layer(sd, layer);
+
+ win->h_zoom = ZOOM_X1;
+ win->v_zoom = ZOOM_X1;
+ _osd_set_zoom(sd, layer, win->h_zoom, win->v_zoom);
+
+ win->fb_base_phys = 0;
+ _osd_start_layer(sd, layer, win->fb_base_phys, 0);
+
+ cfg->line_length = 0;
+ cfg->xsize = 0;
+ cfg->ysize = 0;
+ cfg->xpos = 0;
+ cfg->ypos = 0;
+ cfg->interlaced = 0;
+ switch (layer) {
+ case WIN_OSD0:
+ case WIN_OSD1:
+ osdwin = (layer == WIN_OSD0) ? OSDWIN_OSD0 : OSDWIN_OSD1;
+ osdwin_state = &osd->osdwin[osdwin];
+ /*
+ * Other code relies on the fact that OSD windows default to a
+ * bitmap pixel format when they are deallocated, so don't
+ * change this default pixel format.
+ */
+ cfg->pixfmt = PIXFMT_8BPP;
+ _osd_set_layer_config(sd, layer, cfg);
+ osdwin_state->clut = RAM_CLUT;
+ _osd_set_osd_clut(sd, osdwin, osdwin_state->clut);
+ osdwin_state->colorkey_blending = 0;
+ _osd_disable_color_key(sd, osdwin);
+ osdwin_state->blend = OSD_8_VID_0;
+ _osd_set_blending_factor(sd, osdwin, osdwin_state->blend);
+ osdwin_state->rec601_attenuation = 0;
+ _osd_set_rec601_attenuation(sd, osdwin,
+ osdwin_state->
+ rec601_attenuation);
+ if (osdwin == OSDWIN_OSD1) {
+ osd->is_blinking = 0;
+ osd->blink = BLINK_X1;
+ }
+ break;
+ case WIN_VID0:
+ case WIN_VID1:
+ cfg->pixfmt = osd->yc_pixfmt;
+ _osd_set_layer_config(sd, layer, cfg);
+ break;
+ }
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+static void osd_release_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ if (!win->is_allocated) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+ osd_init_layer(sd, layer);
+ spin_lock_irqsave(&osd->lock, flags);
+
+ win->is_allocated = 0;
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+static int osd_request_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ if (win->is_allocated) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return -1;
+ }
+ win->is_allocated = 1;
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+
+ return 0;
+}
+
+static void _osd_init(struct osd_state *sd)
+{
+ osd_write(sd, 0, OSD_MODE);
+ osd_write(sd, 0, OSD_VIDWINMD);
+ osd_write(sd, 0, OSD_OSDWIN0MD);
+ osd_write(sd, 0, OSD_OSDWIN1MD);
+ osd_write(sd, 0, OSD_RECTCUR);
+ osd_write(sd, 0, OSD_MISCCTL);
+}
+
+static void osd_set_left_margin(struct osd_state *sd, u32 val)
+{
+ osd_write(sd, val, OSD_BASEPX);
+}
+
+static void osd_set_top_margin(struct osd_state *sd, u32 val)
+{
+ osd_write(sd, val, OSD_BASEPY);
+}
+
+static int osd_initialize(struct osd_state *osd)
+{
+ if (osd == NULL)
+ return -ENODEV;
+ _osd_init(osd);
+
+ /* set default Cb/Cr order */
+ osd->yc_pixfmt = PIXFMT_YCbCrI;
+
+ _osd_set_field_inversion(osd, osd->field_inversion);
+ _osd_set_rom_clut(osd, osd->rom_clut);
+
+ osd_init_layer(osd, WIN_OSD0);
+ osd_init_layer(osd, WIN_VID0);
+ osd_init_layer(osd, WIN_OSD1);
+ osd_init_layer(osd, WIN_VID1);
+
+ return 0;
+}
+
+static const struct vpbe_osd_ops osd_ops = {
+ .initialize = osd_initialize,
+ .request_layer = osd_request_layer,
+ .release_layer = osd_release_layer,
+ .enable_layer = osd_enable_layer,
+ .disable_layer = osd_disable_layer,
+ .set_layer_config = osd_set_layer_config,
+ .get_layer_config = osd_get_layer_config,
+ .start_layer = osd_start_layer,
+ .set_left_margin = osd_set_left_margin,
+ .set_top_margin = osd_set_top_margin,
+};
+
+static int osd_probe(struct platform_device *pdev)
+{
+ struct osd_platform_data *pdata;
+ struct osd_state *osd;
+ struct resource *res;
+ int ret = 0;
+
+ osd = kzalloc(sizeof(struct osd_state), GFP_KERNEL);
+ if (osd == NULL)
+ return -ENOMEM;
+
+ osd->dev = &pdev->dev;
+ pdata = (struct osd_platform_data *)pdev->dev.platform_data;
+ osd->vpbe_type = (enum vpbe_version)pdata->vpbe_type;
+ if (NULL == pdev->dev.platform_data) {
+ dev_err(osd->dev, "No platform data defined for OSD"
+ " sub device\n");
+ ret = -ENOENT;
+ goto free_mem;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(osd->dev, "Unable to get OSD register address map\n");
+ ret = -ENODEV;
+ goto free_mem;
+ }
+ osd->osd_base_phys = res->start;
+ osd->osd_size = res->end - res->start + 1;
+ if (!request_mem_region(osd->osd_base_phys, osd->osd_size,
+ MODULE_NAME)) {
+ dev_err(osd->dev, "Unable to reserve OSD MMIO region\n");
+ ret = -ENODEV;
+ goto free_mem;
+ }
+ osd->osd_base = (unsigned long)ioremap_nocache(res->start,
+ osd->osd_size);
+ if (!osd->osd_base) {
+ dev_err(osd->dev, "Unable to map the OSD region\n");
+ ret = -ENODEV;
+ goto release_mem_region;
+ }
+ spin_lock_init(&osd->lock);
+ osd->ops = osd_ops;
+ platform_set_drvdata(pdev, osd);
+ dev_notice(osd->dev, "OSD sub device probe success\n");
+ return ret;
+
+release_mem_region:
+ release_mem_region(osd->osd_base_phys, osd->osd_size);
+free_mem:
+ kfree(osd);
+ return ret;
+}
+
+static int osd_remove(struct platform_device *pdev)
+{
+ struct osd_state *osd = platform_get_drvdata(pdev);
+
+ iounmap((void *)osd->osd_base);
+ release_mem_region(osd->osd_base_phys, osd->osd_size);
+ kfree(osd);
+ return 0;
+}
+
+static struct platform_driver osd_driver = {
+ .probe = osd_probe,
+ .remove = osd_remove,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int osd_init(void)
+{
+ if (platform_driver_register(&osd_driver)) {
+ printk(KERN_ERR "Unable to register davinci osd driver\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void osd_exit(void)
+{
+ platform_driver_unregister(&osd_driver);
+}
+
+module_init(osd_init);
+module_exit(osd_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DaVinci OSD Manager Driver");
+MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/video/davinci/vpbe_osd_regs.h b/drivers/media/video/davinci/vpbe_osd_regs.h
new file mode 100644
index 0000000..584520f
--- /dev/null
+++ b/drivers/media/video/davinci/vpbe_osd_regs.h
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2006-2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _VPBE_OSD_REGS_H
+#define _VPBE_OSD_REGS_H
+
+/* VPBE Global Registers */
+#define VPBE_PID 0x0
+#define VPBE_PCR 0x4
+
+/* VPSS CLock Registers */
+#define VPSSCLK_PID 0x00
+#define VPSSCLK_CLKCTRL 0x04
+
+/* VPSS Buffer Logic Registers */
+#define VPSSBL_PID 0x00
+#define VPSSBL_PCR 0x04
+#define VPSSBL_BCR 0x08
+#define VPSSBL_INTSTAT 0x0C
+#define VPSSBL_INTSEL 0x10
+#define VPSSBL_EVTSEL 0x14
+#define VPSSBL_MEMCTRL 0x18
+#define VPSSBL_CCDCMUX 0x1C
+
+/* DM365 ISP5 system configuration */
+#define ISP5_PID 0x0
+#define ISP5_PCCR 0x4
+#define ISP5_BCR 0x8
+#define ISP5_INTSTAT 0xC
+#define ISP5_INTSEL1 0x10
+#define ISP5_INTSEL2 0x14
+#define ISP5_INTSEL3 0x18
+#define ISP5_EVTSEL 0x1c
+#define ISP5_CCDCMUX 0x20
+
+/* VPBE On-Screen Display Subsystem Registers (OSD) */
+#define OSD_MODE 0x00
+#define OSD_VIDWINMD 0x04
+#define OSD_OSDWIN0MD 0x08
+#define OSD_OSDWIN1MD 0x0C
+#define OSD_OSDATRMD 0x0C
+#define OSD_RECTCUR 0x10
+#define OSD_VIDWIN0OFST 0x18
+#define OSD_VIDWIN1OFST 0x1C
+#define OSD_OSDWIN0OFST 0x20
+#define OSD_OSDWIN1OFST 0x24
+#define OSD_VIDWINADH 0x28
+#define OSD_VIDWIN0ADL 0x2C
+#define OSD_VIDWIN0ADR 0x2C
+#define OSD_VIDWIN1ADL 0x30
+#define OSD_VIDWIN1ADR 0x30
+#define OSD_OSDWINADH 0x34
+#define OSD_OSDWIN0ADL 0x38
+#define OSD_OSDWIN0ADR 0x38
+#define OSD_OSDWIN1ADL 0x3C
+#define OSD_OSDWIN1ADR 0x3C
+#define OSD_BASEPX 0x40
+#define OSD_BASEPY 0x44
+#define OSD_VIDWIN0XP 0x48
+#define OSD_VIDWIN0YP 0x4C
+#define OSD_VIDWIN0XL 0x50
+#define OSD_VIDWIN0YL 0x54
+#define OSD_VIDWIN1XP 0x58
+#define OSD_VIDWIN1YP 0x5C
+#define OSD_VIDWIN1XL 0x60
+#define OSD_VIDWIN1YL 0x64
+#define OSD_OSDWIN0XP 0x68
+#define OSD_OSDWIN0YP 0x6C
+#define OSD_OSDWIN0XL 0x70
+#define OSD_OSDWIN0YL 0x74
+#define OSD_OSDWIN1XP 0x78
+#define OSD_OSDWIN1YP 0x7C
+#define OSD_OSDWIN1XL 0x80
+#define OSD_OSDWIN1YL 0x84
+#define OSD_CURXP 0x88
+#define OSD_CURYP 0x8C
+#define OSD_CURXL 0x90
+#define OSD_CURYL 0x94
+#define OSD_W0BMP01 0xA0
+#define OSD_W0BMP23 0xA4
+#define OSD_W0BMP45 0xA8
+#define OSD_W0BMP67 0xAC
+#define OSD_W0BMP89 0xB0
+#define OSD_W0BMPAB 0xB4
+#define OSD_W0BMPCD 0xB8
+#define OSD_W0BMPEF 0xBC
+#define OSD_W1BMP01 0xC0
+#define OSD_W1BMP23 0xC4
+#define OSD_W1BMP45 0xC8
+#define OSD_W1BMP67 0xCC
+#define OSD_W1BMP89 0xD0
+#define OSD_W1BMPAB 0xD4
+#define OSD_W1BMPCD 0xD8
+#define OSD_W1BMPEF 0xDC
+#define OSD_VBNDRY 0xE0
+#define OSD_EXTMODE 0xE4
+#define OSD_MISCCTL 0xE8
+#define OSD_CLUTRAMYCB 0xEC
+#define OSD_CLUTRAMCR 0xF0
+#define OSD_TRANSPVAL 0xF4
+#define OSD_TRANSPVALL 0xF4
+#define OSD_TRANSPVALU 0xF8
+#define OSD_TRANSPBMPIDX 0xFC
+#define OSD_PPVWIN0ADR 0xFC
+
+/* bit definitions */
+#define VPBE_PCR_VENC_DIV (1 << 1)
+#define VPBE_PCR_CLK_OFF (1 << 0)
+
+#define VPSSBL_INTSTAT_HSSIINT (1 << 14)
+#define VPSSBL_INTSTAT_CFALDINT (1 << 13)
+#define VPSSBL_INTSTAT_IPIPE_INT5 (1 << 12)
+#define VPSSBL_INTSTAT_IPIPE_INT4 (1 << 11)
+#define VPSSBL_INTSTAT_IPIPE_INT3 (1 << 10)
+#define VPSSBL_INTSTAT_IPIPE_INT2 (1 << 9)
+#define VPSSBL_INTSTAT_IPIPE_INT1 (1 << 8)
+#define VPSSBL_INTSTAT_IPIPE_INT0 (1 << 7)
+#define VPSSBL_INTSTAT_IPIPEIFINT (1 << 6)
+#define VPSSBL_INTSTAT_OSDINT (1 << 5)
+#define VPSSBL_INTSTAT_VENCINT (1 << 4)
+#define VPSSBL_INTSTAT_H3AINT (1 << 3)
+#define VPSSBL_INTSTAT_CCDC_VDINT2 (1 << 2)
+#define VPSSBL_INTSTAT_CCDC_VDINT1 (1 << 1)
+#define VPSSBL_INTSTAT_CCDC_VDINT0 (1 << 0)
+
+/* DM365 ISP5 bit definitions */
+#define ISP5_INTSTAT_VENCINT (1 << 21)
+#define ISP5_INTSTAT_OSDINT (1 << 20)
+
+/* VMOD TVTYP options for HDMD=0 */
+#define SDTV_NTSC 0
+#define SDTV_PAL 1
+/* VMOD TVTYP options for HDMD=1 */
+#define HDTV_525P 0
+#define HDTV_625P 1
+#define HDTV_1080I 2
+#define HDTV_720P 3
+
+#define OSD_MODE_CS (1 << 15)
+#define OSD_MODE_OVRSZ (1 << 14)
+#define OSD_MODE_OHRSZ (1 << 13)
+#define OSD_MODE_EF (1 << 12)
+#define OSD_MODE_VVRSZ (1 << 11)
+#define OSD_MODE_VHRSZ (1 << 10)
+#define OSD_MODE_FSINV (1 << 9)
+#define OSD_MODE_BCLUT (1 << 8)
+#define OSD_MODE_CABG_SHIFT 0
+#define OSD_MODE_CABG (0xff << 0)
+
+#define OSD_VIDWINMD_VFINV (1 << 15)
+#define OSD_VIDWINMD_V1EFC (1 << 14)
+#define OSD_VIDWINMD_VHZ1_SHIFT 12
+#define OSD_VIDWINMD_VHZ1 (3 << 12)
+#define OSD_VIDWINMD_VVZ1_SHIFT 10
+#define OSD_VIDWINMD_VVZ1 (3 << 10)
+#define OSD_VIDWINMD_VFF1 (1 << 9)
+#define OSD_VIDWINMD_ACT1 (1 << 8)
+#define OSD_VIDWINMD_V0EFC (1 << 6)
+#define OSD_VIDWINMD_VHZ0_SHIFT 4
+#define OSD_VIDWINMD_VHZ0 (3 << 4)
+#define OSD_VIDWINMD_VVZ0_SHIFT 2
+#define OSD_VIDWINMD_VVZ0 (3 << 2)
+#define OSD_VIDWINMD_VFF0 (1 << 1)
+#define OSD_VIDWINMD_ACT0 (1 << 0)
+
+#define OSD_OSDWIN0MD_ATN0E (1 << 14)
+#define OSD_OSDWIN0MD_RGB0E (1 << 13)
+#define OSD_OSDWIN0MD_BMP0MD_SHIFT 13
+#define OSD_OSDWIN0MD_BMP0MD (3 << 13)
+#define OSD_OSDWIN0MD_CLUTS0 (1 << 12)
+#define OSD_OSDWIN0MD_OHZ0_SHIFT 10
+#define OSD_OSDWIN0MD_OHZ0 (3 << 10)
+#define OSD_OSDWIN0MD_OVZ0_SHIFT 8
+#define OSD_OSDWIN0MD_OVZ0 (3 << 8)
+#define OSD_OSDWIN0MD_BMW0_SHIFT 6
+#define OSD_OSDWIN0MD_BMW0 (3 << 6)
+#define OSD_OSDWIN0MD_BLND0_SHIFT 3
+#define OSD_OSDWIN0MD_BLND0 (7 << 3)
+#define OSD_OSDWIN0MD_TE0 (1 << 2)
+#define OSD_OSDWIN0MD_OFF0 (1 << 1)
+#define OSD_OSDWIN0MD_OACT0 (1 << 0)
+
+#define OSD_OSDWIN1MD_OASW (1 << 15)
+#define OSD_OSDWIN1MD_ATN1E (1 << 14)
+#define OSD_OSDWIN1MD_RGB1E (1 << 13)
+#define OSD_OSDWIN1MD_BMP1MD_SHIFT 13
+#define OSD_OSDWIN1MD_BMP1MD (3 << 13)
+#define OSD_OSDWIN1MD_CLUTS1 (1 << 12)
+#define OSD_OSDWIN1MD_OHZ1_SHIFT 10
+#define OSD_OSDWIN1MD_OHZ1 (3 << 10)
+#define OSD_OSDWIN1MD_OVZ1_SHIFT 8
+#define OSD_OSDWIN1MD_OVZ1 (3 << 8)
+#define OSD_OSDWIN1MD_BMW1_SHIFT 6
+#define OSD_OSDWIN1MD_BMW1 (3 << 6)
+#define OSD_OSDWIN1MD_BLND1_SHIFT 3
+#define OSD_OSDWIN1MD_BLND1 (7 << 3)
+#define OSD_OSDWIN1MD_TE1 (1 << 2)
+#define OSD_OSDWIN1MD_OFF1 (1 << 1)
+#define OSD_OSDWIN1MD_OACT1 (1 << 0)
+
+#define OSD_OSDATRMD_OASW (1 << 15)
+#define OSD_OSDATRMD_OHZA_SHIFT 10
+#define OSD_OSDATRMD_OHZA (3 << 10)
+#define OSD_OSDATRMD_OVZA_SHIFT 8
+#define OSD_OSDATRMD_OVZA (3 << 8)
+#define OSD_OSDATRMD_BLNKINT_SHIFT 6
+#define OSD_OSDATRMD_BLNKINT (3 << 6)
+#define OSD_OSDATRMD_OFFA (1 << 1)
+#define OSD_OSDATRMD_BLNK (1 << 0)
+
+#define OSD_RECTCUR_RCAD_SHIFT 8
+#define OSD_RECTCUR_RCAD (0xff << 8)
+#define OSD_RECTCUR_CLUTSR (1 << 7)
+#define OSD_RECTCUR_RCHW_SHIFT 4
+#define OSD_RECTCUR_RCHW (7 << 4)
+#define OSD_RECTCUR_RCVW_SHIFT 1
+#define OSD_RECTCUR_RCVW (7 << 1)
+#define OSD_RECTCUR_RCACT (1 << 0)
+
+#define OSD_VIDWIN0OFST_V0LO (0x1ff << 0)
+
+#define OSD_VIDWIN1OFST_V1LO (0x1ff << 0)
+
+#define OSD_OSDWIN0OFST_O0LO (0x1ff << 0)
+
+#define OSD_OSDWIN1OFST_O1LO (0x1ff << 0)
+
+#define OSD_WINOFST_AH_SHIFT 9
+
+#define OSD_VIDWIN0OFST_V0AH (0xf << 9)
+#define OSD_VIDWIN1OFST_V1AH (0xf << 9)
+#define OSD_OSDWIN0OFST_O0AH (0xf << 9)
+#define OSD_OSDWIN1OFST_O1AH (0xf << 9)
+
+#define OSD_VIDWINADH_V1AH_SHIFT 8
+#define OSD_VIDWINADH_V1AH (0x7f << 8)
+#define OSD_VIDWINADH_V0AH_SHIFT 0
+#define OSD_VIDWINADH_V0AH (0x7f << 0)
+
+#define OSD_VIDWIN0ADL_V0AL (0xffff << 0)
+
+#define OSD_VIDWIN1ADL_V1AL (0xffff << 0)
+
+#define OSD_OSDWINADH_O1AH_SHIFT 8
+#define OSD_OSDWINADH_O1AH (0x7f << 8)
+#define OSD_OSDWINADH_O0AH_SHIFT 0
+#define OSD_OSDWINADH_O0AH (0x7f << 0)
+
+#define OSD_OSDWIN0ADL_O0AL (0xffff << 0)
+
+#define OSD_OSDWIN1ADL_O1AL (0xffff << 0)
+
+#define OSD_BASEPX_BPX (0x3ff << 0)
+
+#define OSD_BASEPY_BPY (0x1ff << 0)
+
+#define OSD_VIDWIN0XP_V0X (0x7ff << 0)
+
+#define OSD_VIDWIN0YP_V0Y (0x7ff << 0)
+
+#define OSD_VIDWIN0XL_V0W (0x7ff << 0)
+
+#define OSD_VIDWIN0YL_V0H (0x7ff << 0)
+
+#define OSD_VIDWIN1XP_V1X (0x7ff << 0)
+
+#define OSD_VIDWIN1YP_V1Y (0x7ff << 0)
+
+#define OSD_VIDWIN1XL_V1W (0x7ff << 0)
+
+#define OSD_VIDWIN1YL_V1H (0x7ff << 0)
+
+#define OSD_OSDWIN0XP_W0X (0x7ff << 0)
+
+#define OSD_OSDWIN0YP_W0Y (0x7ff << 0)
+
+#define OSD_OSDWIN0XL_W0W (0x7ff << 0)
+
+#define OSD_OSDWIN0YL_W0H (0x7ff << 0)
+
+#define OSD_OSDWIN1XP_W1X (0x7ff << 0)
+
+#define OSD_OSDWIN1YP_W1Y (0x7ff << 0)
+
+#define OSD_OSDWIN1XL_W1W (0x7ff << 0)
+
+#define OSD_OSDWIN1YL_W1H (0x7ff << 0)
+
+#define OSD_CURXP_RCSX (0x7ff << 0)
+
+#define OSD_CURYP_RCSY (0x7ff << 0)
+
+#define OSD_CURXL_RCSW (0x7ff << 0)
+
+#define OSD_CURYL_RCSH (0x7ff << 0)
+
+#define OSD_EXTMODE_EXPMDSEL (1 << 15)
+#define OSD_EXTMODE_SCRNHEXP_SHIFT 13
+#define OSD_EXTMODE_SCRNHEXP (3 << 13)
+#define OSD_EXTMODE_SCRNVEXP (1 << 12)
+#define OSD_EXTMODE_OSD1BLDCHR (1 << 11)
+#define OSD_EXTMODE_OSD0BLDCHR (1 << 10)
+#define OSD_EXTMODE_ATNOSD1EN (1 << 9)
+#define OSD_EXTMODE_ATNOSD0EN (1 << 8)
+#define OSD_EXTMODE_OSDHRSZ15 (1 << 7)
+#define OSD_EXTMODE_VIDHRSZ15 (1 << 6)
+#define OSD_EXTMODE_ZMFILV1HEN (1 << 5)
+#define OSD_EXTMODE_ZMFILV1VEN (1 << 4)
+#define OSD_EXTMODE_ZMFILV0HEN (1 << 3)
+#define OSD_EXTMODE_ZMFILV0VEN (1 << 2)
+#define OSD_EXTMODE_EXPFILHEN (1 << 1)
+#define OSD_EXTMODE_EXPFILVEN (1 << 0)
+
+#define OSD_MISCCTL_BLDSEL (1 << 15)
+#define OSD_MISCCTL_S420D (1 << 14)
+#define OSD_MISCCTL_BMAPT (1 << 13)
+#define OSD_MISCCTL_DM365M (1 << 12)
+#define OSD_MISCCTL_RGBEN (1 << 7)
+#define OSD_MISCCTL_RGBWIN (1 << 6)
+#define OSD_MISCCTL_DMANG (1 << 6)
+#define OSD_MISCCTL_TMON (1 << 5)
+#define OSD_MISCCTL_RSEL (1 << 4)
+#define OSD_MISCCTL_CPBSY (1 << 3)
+#define OSD_MISCCTL_PPSW (1 << 2)
+#define OSD_MISCCTL_PPRV (1 << 1)
+
+#define OSD_CLUTRAMYCB_Y_SHIFT 8
+#define OSD_CLUTRAMYCB_Y (0xff << 8)
+#define OSD_CLUTRAMYCB_CB_SHIFT 0
+#define OSD_CLUTRAMYCB_CB (0xff << 0)
+
+#define OSD_CLUTRAMCR_CR_SHIFT 8
+#define OSD_CLUTRAMCR_CR (0xff << 8)
+#define OSD_CLUTRAMCR_CADDR_SHIFT 0
+#define OSD_CLUTRAMCR_CADDR (0xff << 0)
+
+#define OSD_TRANSPVAL_RGBTRANS (0xffff << 0)
+
+#define OSD_TRANSPVALL_RGBL (0xffff << 0)
+
+#define OSD_TRANSPVALU_Y_SHIFT 8
+#define OSD_TRANSPVALU_Y (0xff << 8)
+#define OSD_TRANSPVALU_RGBU_SHIFT 0
+#define OSD_TRANSPVALU_RGBU (0xff << 0)
+
+#define OSD_TRANSPBMPIDX_BMP1_SHIFT 8
+#define OSD_TRANSPBMPIDX_BMP1 (0xff << 8)
+#define OSD_TRANSPBMPIDX_BMP0_SHIFT 0
+#define OSD_TRANSPBMPIDX_BMP0 0xff
+
+#endif /* _DAVINCI_VPBE_H_ */
diff --git a/drivers/media/video/davinci/vpbe_venc.c b/drivers/media/video/davinci/vpbe_venc.c
new file mode 100644
index 0000000..03a3e5c
--- /dev/null
+++ b/drivers/media/video/davinci/vpbe_venc.c
@@ -0,0 +1,566 @@
+/*
+ * Copyright (C) 2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+
+#include <mach/hardware.h>
+#include <mach/mux.h>
+#include <mach/io.h>
+#include <mach/i2c.h>
+
+#include <linux/io.h>
+
+#include <media/davinci/vpbe_types.h>
+#include <media/davinci/vpbe_venc.h>
+#include <media/davinci/vpss.h>
+#include <media/v4l2-device.h>
+
+#include "vpbe_venc_regs.h"
+
+#define MODULE_NAME VPBE_VENC_SUBDEV_NAME
+
+static int debug = 2;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level 0-2");
+
+struct venc_state {
+ struct v4l2_subdev sd;
+ struct venc_callback *callback;
+ struct venc_platform_data *pdata;
+ struct device *pdev;
+ u32 output;
+ v4l2_std_id std;
+ spinlock_t lock;
+ void __iomem *venc_base;
+ void __iomem *vdaccfg_reg;
+};
+
+static inline struct venc_state *to_state(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct venc_state, sd);
+}
+
+static inline u32 venc_read(struct v4l2_subdev *sd, u32 offset)
+{
+ struct venc_state *venc = to_state(sd);
+
+ return readl(venc->venc_base + offset);
+}
+
+static inline u32 venc_write(struct v4l2_subdev *sd, u32 offset, u32 val)
+{
+ struct venc_state *venc = to_state(sd);
+
+ writel(val, (venc->venc_base + offset));
+
+ return val;
+}
+
+static inline u32 venc_modify(struct v4l2_subdev *sd, u32 offset,
+ u32 val, u32 mask)
+{
+ u32 new_val = (venc_read(sd, offset) & ~mask) | (val & mask);
+
+ venc_write(sd, offset, new_val);
+
+ return new_val;
+}
+
+static inline u32 vdaccfg_write(struct v4l2_subdev *sd, u32 val)
+{
+ struct venc_state *venc = to_state(sd);
+
+ writel(val, venc->vdaccfg_reg);
+
+ val = readl(venc->vdaccfg_reg);
+
+ return val;
+}
+
+/* This function sets the dac of the VPBE for various outputs
+ */
+static int venc_set_dac(struct v4l2_subdev *sd, u32 out_index)
+{
+ switch (out_index) {
+ case 0:
+ v4l2_dbg(debug, 1, sd, "Setting output to Composite\n");
+ venc_write(sd, VENC_DACSEL, 0);
+ break;
+ case 1:
+ v4l2_dbg(debug, 1, sd, "Setting output to S-Video\n");
+ venc_write(sd, VENC_DACSEL, 0x210);
+ break;
+ case 2:
+ venc_write(sd, VENC_DACSEL, 0x543);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void venc_enabledigitaloutput(struct v4l2_subdev *sd, int benable)
+{
+ v4l2_dbg(debug, 2, sd, "venc_enabledigitaloutput\n");
+
+ if (benable) {
+ venc_write(sd, VENC_VMOD, 0);
+ venc_write(sd, VENC_CVBS, 0);
+ venc_write(sd, VENC_LCDOUT, 0);
+ venc_write(sd, VENC_HSPLS, 0);
+ venc_write(sd, VENC_HSTART, 0);
+ venc_write(sd, VENC_HVALID, 0);
+ venc_write(sd, VENC_HINT, 0);
+ venc_write(sd, VENC_VSPLS, 0);
+ venc_write(sd, VENC_VSTART, 0);
+ venc_write(sd, VENC_VVALID, 0);
+ venc_write(sd, VENC_VINT, 0);
+ venc_write(sd, VENC_YCCCTL, 0);
+ venc_write(sd, VENC_DACSEL, 0);
+
+ } else {
+ venc_write(sd, VENC_VMOD, 0);
+ /* disable VCLK output pin enable */
+ venc_write(sd, VENC_VIDCTL, 0x141);
+
+ /* Disable output sync pins */
+ venc_write(sd, VENC_SYNCCTL, 0);
+
+ /* Disable DCLOCK */
+ venc_write(sd, VENC_DCLKCTL, 0);
+ venc_write(sd, VENC_DRGBX1, 0x0000057C);
+
+ /* Disable LCD output control (accepting default polarity) */
+ venc_write(sd, VENC_LCDOUT, 0);
+ venc_write(sd, VENC_CMPNT, 0x100);
+ venc_write(sd, VENC_HSPLS, 0);
+ venc_write(sd, VENC_HINT, 0);
+ venc_write(sd, VENC_HSTART, 0);
+ venc_write(sd, VENC_HVALID, 0);
+
+ venc_write(sd, VENC_VSPLS, 0);
+ venc_write(sd, VENC_VINT, 0);
+ venc_write(sd, VENC_VSTART, 0);
+ venc_write(sd, VENC_VVALID, 0);
+
+ venc_write(sd, VENC_HSDLY, 0);
+ venc_write(sd, VENC_VSDLY, 0);
+
+ venc_write(sd, VENC_YCCCTL, 0);
+ venc_write(sd, VENC_VSTARTA, 0);
+
+ /* Set OSD clock and OSD Sync Adavance registers */
+ venc_write(sd, VENC_OSDCLK0, 1);
+ venc_write(sd, VENC_OSDCLK1, 2);
+ }
+}
+
+/*
+ * setting NTSC mode
+ */
+static int venc_set_ntsc(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ struct venc_platform_data *pdata = venc->pdata;
+
+ v4l2_dbg(debug, 2, sd, "venc_set_ntsc\n");
+
+ /* Setup clock at VPSS & VENC for SD */
+ vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 1);
+ if (pdata->setup_clock(VPBE_ENC_STD, V4L2_STD_525_60) < 0)
+ return -EINVAL;
+
+ venc_enabledigitaloutput(sd, 0);
+
+ /* to set VENC CLK DIV to 1 - final clock is 54 MHz */
+ venc_modify(sd, VENC_VIDCTL, 0, 1 << 1);
+ /* Set REC656 Mode */
+ venc_write(sd, VENC_YCCCTL, 0x1);
+ venc_modify(sd, VENC_VDPRO, 0, VENC_VDPRO_DAFRQ);
+ venc_modify(sd, VENC_VDPRO, 0, VENC_VDPRO_DAUPS);
+
+ venc_write(sd, VENC_VMOD, 0);
+ venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD, (0 << VENC_VMOD_VMD), VENC_VMOD_VMD);
+ venc_modify(sd, VENC_VMOD, (0 << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+ venc_write(sd, VENC_DACTST, 0x0);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+
+ return 0;
+}
+
+/*
+ * setting PAL mode
+ */
+static int venc_set_pal(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+
+ v4l2_dbg(debug, 2, sd, "venc_set_pal\n");
+
+ /* Setup clock at VPSS & VENC for SD */
+ vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 1);
+ if (venc->pdata->setup_clock(VPBE_ENC_STD, V4L2_STD_625_50) < 0)
+ return -EINVAL;
+
+ venc_enabledigitaloutput(sd, 0);
+
+ /* to set VENC CLK DIV to 1 - final clock is 54 MHz */
+ venc_modify(sd, VENC_VIDCTL, 0, 1 << 1);
+ /* Set REC656 Mode */
+ venc_write(sd, VENC_YCCCTL, 0x1);
+
+ venc_modify(sd, VENC_SYNCCTL, 1 << VENC_SYNCCTL_OVD_SHIFT,
+ VENC_SYNCCTL_OVD);
+ venc_write(sd, VENC_VMOD, 0);
+ venc_modify(sd, VENC_VMOD,
+ (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD,
+ (0 << VENC_VMOD_VMD), VENC_VMOD_VMD);
+ venc_modify(sd, VENC_VMOD,
+ (1 << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+ venc_write(sd, VENC_DACTST, 0x0);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+
+ return 0;
+}
+
+/*
+ * venc_set_480p59_94
+ *
+ * This function configures the video encoder to EDTV(525p) component setting.
+ */
+static int venc_set_480p59_94(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ struct venc_platform_data *pdata = venc->pdata;
+
+ v4l2_dbg(debug, 2, sd, "venc_set_480p59_94\n");
+
+ /* Setup clock at VPSS & VENC for SD */
+ if (pdata->setup_clock(VPBE_ENC_DV_PRESET, V4L2_DV_480P59_94) < 0)
+ return -EINVAL;
+
+ venc_enabledigitaloutput(sd, 0);
+
+ venc_write(sd, VENC_OSDCLK0, 0);
+ venc_write(sd, VENC_OSDCLK1, 1);
+ venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
+ VENC_VDPRO_DAFRQ);
+ venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
+ VENC_VDPRO_DAUPS);
+ venc_write(sd, VENC_VMOD, 0);
+ venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
+ venc_modify(sd, VENC_VMOD, (HDTV_525P << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VDMD_YCBCR8 <<
+ VENC_VMOD_VDMD_SHIFT, VENC_VMOD_VDMD);
+
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+
+ return 0;
+}
+
+/*
+ * venc_set_625p
+ *
+ * This function configures the video encoder to HDTV(625p) component setting
+ */
+static int venc_set_576p50(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ struct venc_platform_data *pdata = venc->pdata;
+
+ v4l2_dbg(debug, 2, sd, "venc_set_576p50\n");
+
+ /* Setup clock at VPSS & VENC for SD */
+ if (pdata->setup_clock(VPBE_ENC_DV_PRESET, V4L2_DV_576P50) < 0)
+ return -EINVAL;
+
+ venc_enabledigitaloutput(sd, 0);
+
+ venc_write(sd, VENC_OSDCLK0, 0);
+ venc_write(sd, VENC_OSDCLK1, 1);
+
+ venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
+ VENC_VDPRO_DAFRQ);
+ venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
+ VENC_VDPRO_DAUPS);
+
+ venc_write(sd, VENC_VMOD, 0);
+ venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
+ venc_modify(sd, VENC_VMOD, (HDTV_625P << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VDMD_YCBCR8 <<
+ VENC_VMOD_VDMD_SHIFT, VENC_VMOD_VDMD);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+
+ return 0;
+}
+
+static int venc_s_std_output(struct v4l2_subdev *sd, v4l2_std_id norm)
+{
+ v4l2_dbg(debug, 1, sd, "venc_s_std_output\n");
+
+ if (norm & V4L2_STD_525_60)
+ return venc_set_ntsc(sd);
+ else if (norm & V4L2_STD_625_50)
+ return venc_set_pal(sd);
+
+ return -EINVAL;
+}
+
+static int venc_s_dv_preset(struct v4l2_subdev *sd,
+ struct v4l2_dv_preset *dv_preset)
+{
+ v4l2_dbg(debug, 1, sd, "venc_s_dv_preset\n");
+
+ if (dv_preset->preset == V4L2_DV_576P50)
+ return venc_set_576p50(sd);
+ else if (dv_preset->preset == V4L2_DV_480P59_94)
+ return venc_set_480p59_94(sd);
+
+ return -EINVAL;
+}
+
+static int venc_s_routing(struct v4l2_subdev *sd, u32 input, u32 output,
+ u32 config)
+{
+ struct venc_state *venc = to_state(sd);
+ int ret;
+
+ v4l2_dbg(debug, 1, sd, "venc_s_routing\n");
+
+ ret = venc_set_dac(sd, output);
+ if (!ret)
+ venc->output = output;
+
+ return ret;
+}
+
+static long venc_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd,
+ void *arg)
+{
+ u32 val;
+
+ switch (cmd) {
+ case VENC_GET_FLD:
+ val = venc_read(sd, VENC_VSTAT);
+ *((int *)arg) = ((val & VENC_VSTAT_FIDST) ==
+ VENC_VSTAT_FIDST);
+ break;
+ default:
+ v4l2_err(sd, "Wrong IOCTL cmd\n");
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops venc_core_ops = {
+ .ioctl = venc_ioctl,
+};
+
+static const struct v4l2_subdev_video_ops venc_video_ops = {
+ .s_routing = venc_s_routing,
+ .s_std_output = venc_s_std_output,
+ .s_dv_preset = venc_s_dv_preset,
+};
+
+static const struct v4l2_subdev_ops venc_ops = {
+ .core = &venc_core_ops,
+ .video = &venc_video_ops,
+};
+
+static int venc_initialize(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ int ret;
+
+ /* Set default to output to composite and std to NTSC */
+ venc->output = 0;
+ venc->std = V4L2_STD_525_60;
+
+ ret = venc_s_routing(sd, 0, venc->output, 0);
+ if (ret < 0) {
+ v4l2_err(sd, "Error setting output during init\n");
+ return -EINVAL;
+ }
+
+ ret = venc_s_std_output(sd, venc->std);
+ if (ret < 0) {
+ v4l2_err(sd, "Error setting std during init\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int venc_device_get(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct venc_state **venc = data;
+
+ if (strcmp(MODULE_NAME, pdev->name) == 0)
+ *venc = platform_get_drvdata(pdev);
+
+ return 0;
+}
+
+struct v4l2_subdev *venc_sub_dev_init(struct v4l2_device *v4l2_dev,
+ const char *venc_name)
+{
+ struct venc_state *venc;
+ int err;
+
+ err = bus_for_each_dev(&platform_bus_type, NULL, &venc,
+ venc_device_get);
+ if (venc == NULL)
+ return NULL;
+
+ v4l2_subdev_init(&venc->sd, &venc_ops);
+
+ strcpy(venc->sd.name, venc_name);
+ if (v4l2_device_register_subdev(v4l2_dev, &venc->sd) < 0) {
+ v4l2_err(v4l2_dev,
+ "vpbe unable to register venc sub device\n");
+ return NULL;
+ }
+ if (venc_initialize(&venc->sd)) {
+ v4l2_err(v4l2_dev,
+ "vpbe venc initialization failed\n");
+ return NULL;
+ }
+
+ return &venc->sd;
+}
+EXPORT_SYMBOL(venc_sub_dev_init);
+
+static int venc_probe(struct platform_device *pdev)
+{
+ struct venc_state *venc;
+ struct resource *res;
+ int ret;
+
+ venc = kzalloc(sizeof(struct venc_state), GFP_KERNEL);
+ if (venc == NULL)
+ return -ENOMEM;
+
+ venc->pdev = &pdev->dev;
+ venc->pdata = pdev->dev.platform_data;
+ if (NULL == venc->pdata) {
+ dev_err(venc->pdev, "Unable to get platform data for"
+ " VENC sub device");
+ ret = -ENOENT;
+ goto free_mem;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(venc->pdev,
+ "Unable to get VENC register address map\n");
+ ret = -ENODEV;
+ goto free_mem;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), "venc")) {
+ dev_err(venc->pdev, "Unable to reserve VENC MMIO region\n");
+ ret = -ENODEV;
+ goto free_mem;
+ }
+
+ venc->venc_base = ioremap_nocache(res->start, resource_size(res));
+ if (!venc->venc_base) {
+ dev_err(venc->pdev, "Unable to map VENC IO space\n");
+ ret = -ENODEV;
+ goto release_venc_mem_region;
+ }
+
+ spin_lock_init(&venc->lock);
+ platform_set_drvdata(pdev, venc);
+ dev_notice(venc->pdev, "VENC sub device probe success\n");
+ return 0;
+
+release_venc_mem_region:
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+free_mem:
+ kfree(venc);
+ return ret;
+}
+
+static int venc_remove(struct platform_device *pdev)
+{
+ struct venc_state *venc = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iounmap((void *)venc->venc_base);
+ release_mem_region(res->start, resource_size(res));
+ kfree(venc);
+
+ return 0;
+}
+
+static struct platform_driver venc_driver = {
+ .probe = venc_probe,
+ .remove = venc_remove,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int venc_init(void)
+{
+ if (platform_driver_register(&venc_driver)) {
+ printk(KERN_ERR "Unable to register venc driver\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void venc_exit(void)
+{
+ platform_driver_unregister(&venc_driver);
+ return;
+}
+
+module_init(venc_init);
+module_exit(venc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("VPBE VENC Driver");
+MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/video/davinci/vpbe_venc_regs.h b/drivers/media/video/davinci/vpbe_venc_regs.h
new file mode 100644
index 0000000..947cb15
--- /dev/null
+++ b/drivers/media/video/davinci/vpbe_venc_regs.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2006-2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2..
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _VPBE_VENC_REGS_H
+#define _VPBE_VENC_REGS_H
+
+/* VPBE Video Encoder / Digital LCD Subsystem Registers (VENC) */
+#define VENC_VMOD 0x00
+#define VENC_VIDCTL 0x04
+#define VENC_VDPRO 0x08
+#define VENC_SYNCCTL 0x0C
+#define VENC_HSPLS 0x10
+#define VENC_VSPLS 0x14
+#define VENC_HINT 0x18
+#define VENC_HSTART 0x1C
+#define VENC_HVALID 0x20
+#define VENC_VINT 0x24
+#define VENC_VSTART 0x28
+#define VENC_VVALID 0x2C
+#define VENC_HSDLY 0x30
+#define VENC_VSDLY 0x34
+#define VENC_YCCCTL 0x38
+#define VENC_RGBCTL 0x3C
+#define VENC_RGBCLP 0x40
+#define VENC_LINECTL 0x44
+#define VENC_CULLLINE 0x48
+#define VENC_LCDOUT 0x4C
+#define VENC_BRTS 0x50
+#define VENC_BRTW 0x54
+#define VENC_ACCTL 0x58
+#define VENC_PWMP 0x5C
+#define VENC_PWMW 0x60
+#define VENC_DCLKCTL 0x64
+#define VENC_DCLKPTN0 0x68
+#define VENC_DCLKPTN1 0x6C
+#define VENC_DCLKPTN2 0x70
+#define VENC_DCLKPTN3 0x74
+#define VENC_DCLKPTN0A 0x78
+#define VENC_DCLKPTN1A 0x7C
+#define VENC_DCLKPTN2A 0x80
+#define VENC_DCLKPTN3A 0x84
+#define VENC_DCLKHS 0x88
+#define VENC_DCLKHSA 0x8C
+#define VENC_DCLKHR 0x90
+#define VENC_DCLKVS 0x94
+#define VENC_DCLKVR 0x98
+#define VENC_CAPCTL 0x9C
+#define VENC_CAPDO 0xA0
+#define VENC_CAPDE 0xA4
+#define VENC_ATR0 0xA8
+#define VENC_ATR1 0xAC
+#define VENC_ATR2 0xB0
+#define VENC_VSTAT 0xB8
+#define VENC_RAMADR 0xBC
+#define VENC_RAMPORT 0xC0
+#define VENC_DACTST 0xC4
+#define VENC_YCOLVL 0xC8
+#define VENC_SCPROG 0xCC
+#define VENC_CVBS 0xDC
+#define VENC_CMPNT 0xE0
+#define VENC_ETMG0 0xE4
+#define VENC_ETMG1 0xE8
+#define VENC_ETMG2 0xEC
+#define VENC_ETMG3 0xF0
+#define VENC_DACSEL 0xF4
+#define VENC_ARGBX0 0x100
+#define VENC_ARGBX1 0x104
+#define VENC_ARGBX2 0x108
+#define VENC_ARGBX3 0x10C
+#define VENC_ARGBX4 0x110
+#define VENC_DRGBX0 0x114
+#define VENC_DRGBX1 0x118
+#define VENC_DRGBX2 0x11C
+#define VENC_DRGBX3 0x120
+#define VENC_DRGBX4 0x124
+#define VENC_VSTARTA 0x128
+#define VENC_OSDCLK0 0x12C
+#define VENC_OSDCLK1 0x130
+#define VENC_HVLDCL0 0x134
+#define VENC_HVLDCL1 0x138
+#define VENC_OSDHADV 0x13C
+#define VENC_CLKCTL 0x140
+#define VENC_GAMCTL 0x144
+#define VENC_XHINTVL 0x174
+
+/* bit definitions */
+#define VPBE_PCR_VENC_DIV (1 << 1)
+#define VPBE_PCR_CLK_OFF (1 << 0)
+
+#define VENC_VMOD_VDMD_SHIFT 12
+#define VENC_VMOD_VDMD_YCBCR16 0
+#define VENC_VMOD_VDMD_YCBCR8 1
+#define VENC_VMOD_VDMD_RGB666 2
+#define VENC_VMOD_VDMD_RGB8 3
+#define VENC_VMOD_VDMD_EPSON 4
+#define VENC_VMOD_VDMD_CASIO 5
+#define VENC_VMOD_VDMD_UDISPQVGA 6
+#define VENC_VMOD_VDMD_STNLCD 7
+#define VENC_VMOD_VIE_SHIFT 1
+#define VENC_VMOD_VDMD (7 << 12)
+#define VENC_VMOD_ITLCL (1 << 11)
+#define VENC_VMOD_ITLC (1 << 10)
+#define VENC_VMOD_NSIT (1 << 9)
+#define VENC_VMOD_HDMD (1 << 8)
+#define VENC_VMOD_TVTYP_SHIFT 6
+#define VENC_VMOD_TVTYP (3 << 6)
+#define VENC_VMOD_SLAVE (1 << 5)
+#define VENC_VMOD_VMD (1 << 4)
+#define VENC_VMOD_BLNK (1 << 3)
+#define VENC_VMOD_VIE (1 << 1)
+#define VENC_VMOD_VENC (1 << 0)
+
+/* VMOD TVTYP options for HDMD=0 */
+#define SDTV_NTSC 0
+#define SDTV_PAL 1
+/* VMOD TVTYP options for HDMD=1 */
+#define HDTV_525P 0
+#define HDTV_625P 1
+#define HDTV_1080I 2
+#define HDTV_720P 3
+
+#define VENC_VIDCTL_VCLKP (1 << 14)
+#define VENC_VIDCTL_VCLKE_SHIFT 13
+#define VENC_VIDCTL_VCLKE (1 << 13)
+#define VENC_VIDCTL_VCLKZ_SHIFT 12
+#define VENC_VIDCTL_VCLKZ (1 << 12)
+#define VENC_VIDCTL_SYDIR_SHIFT 8
+#define VENC_VIDCTL_SYDIR (1 << 8)
+#define VENC_VIDCTL_DOMD_SHIFT 4
+#define VENC_VIDCTL_DOMD (3 << 4)
+#define VENC_VIDCTL_YCDIR_SHIFT 0
+#define VENC_VIDCTL_YCDIR (1 << 0)
+
+#define VENC_VDPRO_ATYCC_SHIFT 5
+#define VENC_VDPRO_ATYCC (1 << 5)
+#define VENC_VDPRO_ATCOM_SHIFT 4
+#define VENC_VDPRO_ATCOM (1 << 4)
+#define VENC_VDPRO_DAFRQ (1 << 3)
+#define VENC_VDPRO_DAUPS (1 << 2)
+#define VENC_VDPRO_CUPS (1 << 1)
+#define VENC_VDPRO_YUPS (1 << 0)
+
+#define VENC_SYNCCTL_VPL_SHIFT 3
+#define VENC_SYNCCTL_VPL (1 << 3)
+#define VENC_SYNCCTL_HPL_SHIFT 2
+#define VENC_SYNCCTL_HPL (1 << 2)
+#define VENC_SYNCCTL_SYEV_SHIFT 1
+#define VENC_SYNCCTL_SYEV (1 << 1)
+#define VENC_SYNCCTL_SYEH_SHIFT 0
+#define VENC_SYNCCTL_SYEH (1 << 0)
+#define VENC_SYNCCTL_OVD_SHIFT 14
+#define VENC_SYNCCTL_OVD (1 << 14)
+
+#define VENC_DCLKCTL_DCKEC_SHIFT 11
+#define VENC_DCLKCTL_DCKEC (1 << 11)
+#define VENC_DCLKCTL_DCKPW_SHIFT 0
+#define VENC_DCLKCTL_DCKPW (0x3f << 0)
+
+#define VENC_VSTAT_FIDST (1 << 4)
+
+#define VENC_CMPNT_MRGB_SHIFT 14
+#define VENC_CMPNT_MRGB (1 << 14)
+
+#endif /* _VPBE_VENC_REGS_H */
diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c
index d93ad74..49e4deb 100644
--- a/drivers/media/video/davinci/vpif_capture.c
+++ b/drivers/media/video/davinci/vpif_capture.c
@@ -33,7 +33,6 @@
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/io.h>
-#include <linux/version.h>
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -44,6 +43,7 @@
MODULE_DESCRIPTION("TI DaVinci VPIF Capture driver");
MODULE_LICENSE("GPL");
+MODULE_VERSION(VPIF_CAPTURE_VERSION);
#define vpif_err(fmt, arg...) v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg)
#define vpif_dbg(level, debug, fmt, arg...) \
@@ -1677,7 +1677,6 @@ static int vpif_querycap(struct file *file, void *priv,
{
struct vpif_capture_config *config = vpif_dev->platform_data;
- cap->version = VPIF_CAPTURE_VERSION_CODE;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
strlcpy(cap->driver, "vpif capture", sizeof(cap->driver));
strlcpy(cap->bus_info, "DM646x Platform", sizeof(cap->bus_info));
@@ -2211,10 +2210,8 @@ static __init int vpif_probe(struct platform_device *pdev)
vfd->v4l2_dev = &vpif_obj.v4l2_dev;
vfd->release = video_device_release;
snprintf(vfd->name, sizeof(vfd->name),
- "DM646x_VPIFCapture_DRIVER_V%d.%d.%d",
- (VPIF_CAPTURE_VERSION_CODE >> 16) & 0xff,
- (VPIF_CAPTURE_VERSION_CODE >> 8) & 0xff,
- (VPIF_CAPTURE_VERSION_CODE) & 0xff);
+ "DM646x_VPIFCapture_DRIVER_V%s",
+ VPIF_CAPTURE_VERSION);
/* Set video_dev to the video device */
ch->video_dev = vfd;
}
diff --git a/drivers/media/video/davinci/vpif_capture.h b/drivers/media/video/davinci/vpif_capture.h
index 7a4196d..064550f 100644
--- a/drivers/media/video/davinci/vpif_capture.h
+++ b/drivers/media/video/davinci/vpif_capture.h
@@ -23,7 +23,6 @@
/* Header files */
#include <linux/videodev2.h>
-#include <linux/version.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/videobuf-core.h>
@@ -33,11 +32,7 @@
#include "vpif.h"
/* Macros */
-#define VPIF_MAJOR_RELEASE 0
-#define VPIF_MINOR_RELEASE 0
-#define VPIF_BUILD 1
-#define VPIF_CAPTURE_VERSION_CODE ((VPIF_MAJOR_RELEASE << 16) | \
- (VPIF_MINOR_RELEASE << 8) | VPIF_BUILD)
+#define VPIF_CAPTURE_VERSION "0.0.2"
#define VPIF_VALID_FIELD(field) (((V4L2_FIELD_ANY == field) || \
(V4L2_FIELD_NONE == field)) || \
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index cdf659a..286f029 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -29,7 +29,6 @@
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/io.h>
-#include <linux/version.h>
#include <linux/slab.h>
#include <asm/irq.h>
@@ -47,6 +46,7 @@
MODULE_DESCRIPTION("TI DaVinci VPIF Display driver");
MODULE_LICENSE("GPL");
+MODULE_VERSION(VPIF_DISPLAY_VERSION);
#define DM646X_V4L2_STD (V4L2_STD_525_60 | V4L2_STD_625_50)
@@ -701,7 +701,6 @@ static int vpif_querycap(struct file *file, void *priv,
{
struct vpif_display_config *config = vpif_dev->platform_data;
- cap->version = VPIF_DISPLAY_VERSION_CODE;
cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
strlcpy(cap->driver, "vpif display", sizeof(cap->driver));
strlcpy(cap->bus_info, "Platform", sizeof(cap->bus_info));
@@ -1740,10 +1739,8 @@ static __init int vpif_probe(struct platform_device *pdev)
vfd->v4l2_dev = &vpif_obj.v4l2_dev;
vfd->release = video_device_release;
snprintf(vfd->name, sizeof(vfd->name),
- "DM646x_VPIFDisplay_DRIVER_V%d.%d.%d",
- (VPIF_DISPLAY_VERSION_CODE >> 16) & 0xff,
- (VPIF_DISPLAY_VERSION_CODE >> 8) & 0xff,
- (VPIF_DISPLAY_VERSION_CODE) & 0xff);
+ "DM646x_VPIFDisplay_DRIVER_V%s",
+ VPIF_DISPLAY_VERSION);
/* Set video_dev to the video device */
ch->video_dev = vfd;
diff --git a/drivers/media/video/davinci/vpif_display.h b/drivers/media/video/davinci/vpif_display.h
index b53aaa8..5d1936d 100644
--- a/drivers/media/video/davinci/vpif_display.h
+++ b/drivers/media/video/davinci/vpif_display.h
@@ -18,7 +18,6 @@
/* Header files */
#include <linux/videodev2.h>
-#include <linux/version.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/videobuf-core.h>
@@ -27,12 +26,7 @@
#include "vpif.h"
/* Macros */
-#define VPIF_MAJOR_RELEASE (0)
-#define VPIF_MINOR_RELEASE (0)
-#define VPIF_BUILD (1)
-
-#define VPIF_DISPLAY_VERSION_CODE \
- ((VPIF_MAJOR_RELEASE << 16) | (VPIF_MINOR_RELEASE << 8) | VPIF_BUILD)
+#define VPIF_DISPLAY_VERSION "0.0.2"
#define VPIF_VALID_FIELD(field) \
(((V4L2_FIELD_ANY == field) || (V4L2_FIELD_NONE == field)) || \
diff --git a/drivers/media/video/em28xx/Kconfig b/drivers/media/video/em28xx/Kconfig
index 3cb78f2..281ee42 100644
--- a/drivers/media/video/em28xx/Kconfig
+++ b/drivers/media/video/em28xx/Kconfig
@@ -3,7 +3,6 @@ config VIDEO_EM28XX
depends on VIDEO_DEV && I2C
select VIDEO_TUNER
select VIDEO_TVEEPROM
- depends on RC_CORE
select VIDEOBUF_VMALLOC
select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO
select VIDEO_TVP5150 if VIDEO_HELPER_CHIPS_AUTO
@@ -40,7 +39,18 @@ config VIDEO_EM28XX_DVB
select DVB_S921 if !DVB_FE_CUSTOMISE
select DVB_DRXD if !DVB_FE_CUSTOMISE
select DVB_CXD2820R if !DVB_FE_CUSTOMISE
+ select DVB_DRXK if !DVB_FE_CUSTOMISE
+ select DVB_TDA18271C2DD if !DVB_FE_CUSTOMISE
select VIDEOBUF_DVB
---help---
This adds support for DVB cards based on the
Empiatech em28xx chips.
+
+config VIDEO_EM28XX_RC
+ bool "EM28XX Remote Controller support"
+ depends on RC_CORE
+ depends on VIDEO_EM28XX
+ depends on !(RC_CORE=m && VIDEO_EM28XX=y)
+ default y
+ ---help---
+ Enables Remote Controller support on em28xx driver.
diff --git a/drivers/media/video/em28xx/Makefile b/drivers/media/video/em28xx/Makefile
index d0f093d..38aaa00 100644
--- a/drivers/media/video/em28xx/Makefile
+++ b/drivers/media/video/em28xx/Makefile
@@ -1,5 +1,7 @@
-em28xx-objs := em28xx-video.o em28xx-i2c.o em28xx-cards.o em28xx-core.o \
- em28xx-input.o em28xx-vbi.o
+em28xx-y := em28xx-video.o em28xx-i2c.o em28xx-cards.o
+em28xx-y += em28xx-core.o em28xx-vbi.o
+
+em28xx-$(CONFIG_VIDEO_EM28XX_RC) += em28xx-input.o
em28xx-alsa-objs := em28xx-audio.o
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index 3c48a72..cff0768 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -3,9 +3,9 @@
*
* Copyright (C) 2006 Markus Rechberger <mrechberger@gmail.com>
*
- * Copyright (C) 2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Copyright (C) 2007-2011 Mauro Carvalho Chehab <mchehab@redhat.com>
* - Port to work with the in-kernel driver
- * - Several cleanups
+ * - Cleanups, fixes, alsa-controls, etc.
*
* This driver is based on my previous au600 usb pstn audio driver
* and inherits all the copyrights
@@ -41,6 +41,7 @@
#include <sound/info.h>
#include <sound/initval.h>
#include <sound/control.h>
+#include <sound/tlv.h>
#include <media/v4l2-common.h>
#include "em28xx.h"
@@ -212,9 +213,12 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
for (i = 0; i < EM28XX_AUDIO_BUFS; i++) {
errCode = usb_submit_urb(dev->adev.urb[i], GFP_ATOMIC);
if (errCode) {
+ em28xx_errdev("submit of audio urb failed\n");
em28xx_deinit_isoc_audio(dev);
+ atomic_set(&dev->stream_started, 0);
return errCode;
}
+
}
return 0;
@@ -245,6 +249,7 @@ static struct snd_pcm_hardware snd_em28xx_hw_capture = {
.info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BATCH |
SNDRV_PCM_INFO_MMAP_VALID,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
@@ -276,24 +281,27 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
return -ENODEV;
}
- /* Sets volume, mute, etc */
+ runtime->hw = snd_em28xx_hw_capture;
+ if ((dev->alt == 0 || dev->audio_ifnum) && dev->adev.users == 0) {
+ if (dev->audio_ifnum)
+ dev->alt = 1;
+ else
+ dev->alt = 7;
- dev->mute = 0;
- mutex_lock(&dev->lock);
- ret = em28xx_audio_analog_set(dev);
- if (ret < 0)
- goto err;
+ dprintk("changing alternate number on interface %d to %d\n",
+ dev->audio_ifnum, dev->alt);
+ usb_set_interface(dev->udev, dev->audio_ifnum, dev->alt);
- runtime->hw = snd_em28xx_hw_capture;
- if (dev->alt == 0 && dev->adev.users == 0) {
- int errCode;
- dev->alt = 7;
- dprintk("changing alternate number to 7\n");
- errCode = usb_set_interface(dev->udev, 0, 7);
- }
+ /* Sets volume, mute, etc */
+ dev->mute = 0;
+ mutex_lock(&dev->lock);
+ ret = em28xx_audio_analog_set(dev);
+ if (ret < 0)
+ goto err;
- dev->adev.users++;
- mutex_unlock(&dev->lock);
+ dev->adev.users++;
+ mutex_unlock(&dev->lock);
+ }
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
dev->adev.capture_pcm_substream = substream;
@@ -342,6 +350,8 @@ static int snd_em28xx_hw_capture_params(struct snd_pcm_substream *substream,
ret = snd_pcm_alloc_vmalloc_buffer(substream,
params_buffer_bytes(hw_params));
+ if (ret < 0)
+ return ret;
format = params_format(hw_params);
rate = params_rate(hw_params);
channels = params_channels(hw_params);
@@ -393,20 +403,24 @@ static int snd_em28xx_capture_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct em28xx *dev = snd_pcm_substream_chip(substream);
- int retval;
+ int retval = 0;
switch (cmd) {
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */
+ case SNDRV_PCM_TRIGGER_RESUME: /* fall through */
case SNDRV_PCM_TRIGGER_START:
atomic_set(&dev->stream_started, 1);
break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */
+ case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */
case SNDRV_PCM_TRIGGER_STOP:
- atomic_set(&dev->stream_started, 1);
+ atomic_set(&dev->stream_started, 0);
break;
default:
retval = -EINVAL;
}
schedule_work(&dev->wq_trigger);
- return 0;
+ return retval;
}
static snd_pcm_uframes_t snd_em28xx_capture_pointer(struct snd_pcm_substream
@@ -432,6 +446,179 @@ static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
return vmalloc_to_page(pageptr);
}
+/*
+ * AC97 volume control support
+ */
+static int em28xx_vol_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *info)
+{
+ info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ info->count = 2;
+ info->value.integer.min = 0;
+ info->value.integer.max = 0x1f;
+
+ return 0;
+}
+
+static int em28xx_vol_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *value)
+{
+ struct em28xx *dev = snd_kcontrol_chip(kcontrol);
+ u16 val = (0x1f - (value->value.integer.value[0] & 0x1f)) |
+ (0x1f - (value->value.integer.value[1] & 0x1f)) << 8;
+ int rc;
+
+ mutex_lock(&dev->lock);
+ rc = em28xx_read_ac97(dev, kcontrol->private_value);
+ if (rc < 0)
+ goto err;
+
+ val |= rc & 0x8000; /* Preserve the mute flag */
+
+ rc = em28xx_write_ac97(dev, kcontrol->private_value, val);
+ if (rc < 0)
+ goto err;
+
+ dprintk("%sleft vol %d, right vol %d (0x%04x) to ac97 volume control 0x%04x\n",
+ (val & 0x8000) ? "muted " : "",
+ 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f),
+ val, (int)kcontrol->private_value);
+
+err:
+ mutex_unlock(&dev->lock);
+ return rc;
+}
+
+static int em28xx_vol_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *value)
+{
+ struct em28xx *dev = snd_kcontrol_chip(kcontrol);
+ int val;
+
+ mutex_lock(&dev->lock);
+ val = em28xx_read_ac97(dev, kcontrol->private_value);
+ mutex_unlock(&dev->lock);
+ if (val < 0)
+ return val;
+
+ dprintk("%sleft vol %d, right vol %d (0x%04x) from ac97 volume control 0x%04x\n",
+ (val & 0x8000) ? "muted " : "",
+ 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f),
+ val, (int)kcontrol->private_value);
+
+ value->value.integer.value[0] = 0x1f - (val & 0x1f);
+ value->value.integer.value[1] = 0x1f - ((val << 8) & 0x1f);
+
+ return 0;
+}
+
+static int em28xx_vol_put_mute(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *value)
+{
+ struct em28xx *dev = snd_kcontrol_chip(kcontrol);
+ u16 val = value->value.integer.value[0];
+ int rc;
+
+ mutex_lock(&dev->lock);
+ rc = em28xx_read_ac97(dev, kcontrol->private_value);
+ if (rc < 0)
+ goto err;
+
+ if (val)
+ rc &= 0x1f1f;
+ else
+ rc |= 0x8000;
+
+ rc = em28xx_write_ac97(dev, kcontrol->private_value, rc);
+ if (rc < 0)
+ goto err;
+
+ dprintk("%sleft vol %d, right vol %d (0x%04x) to ac97 volume control 0x%04x\n",
+ (val & 0x8000) ? "muted " : "",
+ 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f),
+ val, (int)kcontrol->private_value);
+
+err:
+ mutex_unlock(&dev->lock);
+ return rc;
+}
+
+static int em28xx_vol_get_mute(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *value)
+{
+ struct em28xx *dev = snd_kcontrol_chip(kcontrol);
+ int val;
+
+ mutex_lock(&dev->lock);
+ val = em28xx_read_ac97(dev, kcontrol->private_value);
+ mutex_unlock(&dev->lock);
+ if (val < 0)
+ return val;
+
+ if (val & 0x8000)
+ value->value.integer.value[0] = 0;
+ else
+ value->value.integer.value[0] = 1;
+
+ dprintk("%sleft vol %d, right vol %d (0x%04x) from ac97 volume control 0x%04x\n",
+ (val & 0x8000) ? "muted " : "",
+ 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f),
+ val, (int)kcontrol->private_value);
+
+ return 0;
+}
+
+static const DECLARE_TLV_DB_SCALE(em28xx_db_scale, -3450, 150, 0);
+
+static int em28xx_cvol_new(struct snd_card *card, struct em28xx *dev,
+ char *name, int id)
+{
+ int err;
+ char ctl_name[44];
+ struct snd_kcontrol *kctl;
+ struct snd_kcontrol_new tmp;
+
+ memset (&tmp, 0, sizeof(tmp));
+ tmp.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ tmp.private_value = id,
+ tmp.name = ctl_name,
+
+ /* Add Mute Control */
+ sprintf(ctl_name, "%s Switch", name);
+ tmp.get = em28xx_vol_get_mute;
+ tmp.put = em28xx_vol_put_mute;
+ tmp.info = snd_ctl_boolean_mono_info;
+ kctl = snd_ctl_new1(&tmp, dev);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
+ return err;
+ dprintk("Added control %s for ac97 volume control 0x%04x\n",
+ ctl_name, id);
+
+ memset (&tmp, 0, sizeof(tmp));
+ tmp.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ tmp.private_value = id,
+ tmp.name = ctl_name,
+
+ /* Add Volume Control */
+ sprintf(ctl_name, "%s Volume", name);
+ tmp.get = em28xx_vol_get;
+ tmp.put = em28xx_vol_put;
+ tmp.info = em28xx_vol_info;
+ tmp.tlv.p = em28xx_db_scale,
+ kctl = snd_ctl_new1(&tmp, dev);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
+ return err;
+ dprintk("Added control %s for ac97 volume control 0x%04x\n",
+ ctl_name, id);
+
+ return 0;
+}
+
+/*
+ * register/unregister code and data
+ */
static struct snd_pcm_ops snd_em28xx_pcm_capture = {
.open = snd_em28xx_capture_open,
.close = snd_em28xx_pcm_close,
@@ -452,17 +639,17 @@ static int em28xx_audio_init(struct em28xx *dev)
static int devnr;
int err;
- if (dev->has_alsa_audio != 1) {
+ if (!dev->has_alsa_audio || dev->audio_ifnum < 0) {
/* This device does not support the extension (in this case
the device is expecting the snd-usb-audio module or
doesn't have analog audio support at all) */
return 0;
}
- printk(KERN_INFO "em28xx-audio.c: probing for em28x1 "
- "non standard usbaudio\n");
+ printk(KERN_INFO "em28xx-audio.c: probing for em28xx Audio Vendor Class\n");
printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2006 Markus "
"Rechberger\n");
+ printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2007-2011 Mauro Carvalho Chehab\n");
err = snd_card_create(index[devnr], "Em28xx Audio", THIS_MODULE, 0,
&card);
@@ -488,6 +675,22 @@ static int em28xx_audio_init(struct em28xx *dev)
INIT_WORK(&dev->wq_trigger, audio_trigger);
+ if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
+ em28xx_cvol_new(card, dev, "Video", AC97_VIDEO_VOL);
+ em28xx_cvol_new(card, dev, "Line In", AC97_LINEIN_VOL);
+ em28xx_cvol_new(card, dev, "Phone", AC97_PHONE_VOL);
+ em28xx_cvol_new(card, dev, "Microphone", AC97_PHONE_VOL);
+ em28xx_cvol_new(card, dev, "CD", AC97_CD_VOL);
+ em28xx_cvol_new(card, dev, "AUX", AC97_AUX_VOL);
+ em28xx_cvol_new(card, dev, "PCM", AC97_PCM_OUT_VOL);
+
+ em28xx_cvol_new(card, dev, "Master", AC97_MASTER_VOL);
+ em28xx_cvol_new(card, dev, "Line", AC97_LINE_LEVEL_VOL);
+ em28xx_cvol_new(card, dev, "Mono", AC97_MASTER_MONO_VOL);
+ em28xx_cvol_new(card, dev, "LFE", AC97_LFE_MASTER_VOL);
+ em28xx_cvol_new(card, dev, "Surround", AC97_SURR_MASTER_VOL);
+ }
+
err = snd_card_register(card);
if (err < 0) {
snd_card_free(card);
@@ -538,7 +741,7 @@ static void __exit em28xx_alsa_unregister(void)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Markus Rechberger <mrechberger@gmail.com>");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
MODULE_DESCRIPTION("Em28xx Audio driver");
module_init(em28xx_alsa_register);
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 4e37375..3e3959f 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -289,7 +289,7 @@ static struct em28xx_reg_seq leadership_reset[] = {
{ -1, -1, -1, -1},
};
-/* 2013:024f PCTV Systems nanoStick T2 290e
+/* 2013:024f PCTV nanoStick T2 290e
* GPIO_6 - demod reset
* GPIO_7 - LED
*/
@@ -300,6 +300,23 @@ static struct em28xx_reg_seq pctv_290e[] = {
{-1, -1, -1, -1},
};
+#if 0
+static struct em28xx_reg_seq terratec_h5_gpio[] = {
+ {EM28XX_R08_GPIO, 0xff, 0xff, 10},
+ {EM2874_R80_GPIO, 0xf6, 0xff, 100},
+ {EM2874_R80_GPIO, 0xf2, 0xff, 50},
+ {EM2874_R80_GPIO, 0xf6, 0xff, 50},
+ { -1, -1, -1, -1},
+};
+
+static struct em28xx_reg_seq terratec_h5_digital[] = {
+ {EM2874_R80_GPIO, 0xf6, 0xff, 10},
+ {EM2874_R80_GPIO, 0xe6, 0xff, 100},
+ {EM2874_R80_GPIO, 0xa6, 0xff, 10},
+ { -1, -1, -1, -1},
+};
+#endif
+
/*
* Board definitions
*/
@@ -843,6 +860,19 @@ struct em28xx_board em28xx_boards[] = {
.gpio = terratec_cinergy_USB_XS_FR_analog,
} },
},
+ [EM2884_BOARD_TERRATEC_H5] = {
+ .name = "Terratec Cinergy H5",
+ .has_dvb = 1,
+#if 0
+ .tuner_type = TUNER_PHILIPS_TDA8290,
+ .tuner_addr = 0x41,
+ .dvb_gpio = terratec_h5_digital, /* FIXME: probably wrong */
+ .tuner_gpio = terratec_h5_gpio,
+#endif
+ .i2c_speed = EM2874_I2C_SECONDARY_BUS_SELECT |
+ EM28XX_I2C_CLK_WAIT_ENABLE |
+ EM28XX_I2C_FREQ_400_KHZ,
+ },
[EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900] = {
.name = "Hauppauge WinTV HVR 900",
.tda9887_conf = TDA9887_PRESENT,
@@ -1259,7 +1289,7 @@ struct em28xx_board em28xx_boards[] = {
} },
},
- [EM2874_LEADERSHIP_ISDBT] = {
+ [EM2874_BOARD_LEADERSHIP_ISDBT] = {
.i2c_speed = EM2874_I2C_SECONDARY_BUS_SELECT |
EM28XX_I2C_CLK_WAIT_ENABLE |
EM28XX_I2C_FREQ_100_KHZ,
@@ -1319,7 +1349,6 @@ struct em28xx_board em28xx_boards[] = {
},
[EM2880_BOARD_KWORLD_DVB_305U] = {
.name = "KWorld DVB-T 305U",
- .valid = EM28XX_BOARD_NOT_VALIDATED,
.tuner_type = TUNER_XC2028,
.tuner_gpio = default_tuner_gpio,
.decoder = EM28XX_TVP5150,
@@ -1770,16 +1799,16 @@ struct em28xx_board em28xx_boards[] = {
.dvb_gpio = kworld_a340_digital,
.tuner_gpio = default_tuner_gpio,
},
- /* 2013:024f PCTV Systems nanoStick T2 290e.
+ /* 2013:024f PCTV nanoStick T2 290e.
* Empia EM28174, Sony CXD2820R and NXP TDA18271HD/C2 */
[EM28174_BOARD_PCTV_290E] = {
+ .name = "PCTV nanoStick T2 290e",
.i2c_speed = EM2874_I2C_SECONDARY_BUS_SELECT |
EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_100_KHZ,
- .xclk = EM28XX_XCLK_FREQUENCY_12MHZ,
- .name = "PCTV Systems nanoStick T2 290e",
.tuner_type = TUNER_ABSENT,
.tuner_gpio = pctv_290e,
.has_dvb = 1,
+ .ir_codes = RC_MAP_PINNACLE_PCTV_HD,
},
};
const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
@@ -1855,8 +1884,10 @@ struct usb_device_id em28xx_id_table[] = {
{ USB_DEVICE(0x0ccd, 0x0042),
.driver_info = EM2882_BOARD_TERRATEC_HYBRID_XS },
{ USB_DEVICE(0x0ccd, 0x0043),
- .driver_info = EM2870_BOARD_TERRATEC_XS },
- { USB_DEVICE(0x0ccd, 0x0047),
+ .driver_info = EM2884_BOARD_TERRATEC_H5 },
+ { USB_DEVICE(0x0ccd, 0x10a2), /* Rev. 1 */
+ .driver_info = EM2884_BOARD_TERRATEC_H5 },
+ { USB_DEVICE(0x0ccd, 0x10ad), /* Rev. 2 */
.driver_info = EM2880_BOARD_TERRATEC_PRODIGY_XS },
{ USB_DEVICE(0x0ccd, 0x0084),
.driver_info = EM2860_BOARD_TERRATEC_AV350 },
@@ -1937,7 +1968,7 @@ static struct em28xx_hash_table em28xx_i2c_hash[] = {
{0x77800080, EM2860_BOARD_TVP5150_REFERENCE_DESIGN, TUNER_ABSENT},
{0xc51200e3, EM2820_BOARD_GADMEI_TVR200, TUNER_LG_PAL_NEW_TAPC},
{0x4ba50080, EM2861_BOARD_GADMEI_UTV330PLUS, TUNER_TNF_5335MF},
- {0x6b800080, EM2874_LEADERSHIP_ISDBT, TUNER_ABSENT},
+ {0x6b800080, EM2874_BOARD_LEADERSHIP_ISDBT, TUNER_ABSENT},
};
/* I2C possible address to saa7115, tvp5150, msp3400, tvaudio */
@@ -2660,10 +2691,9 @@ void em28xx_card_setup(struct em28xx *dev)
.addr = 0xba >> 1,
.platform_data = &pdata,
};
- struct v4l2_subdev *sd;
pdata.xtal = dev->sensor_xtal;
- sd = v4l2_i2c_new_subdev_board(&dev->v4l2_dev, &dev->i2c_adap,
+ v4l2_i2c_new_subdev_board(&dev->v4l2_dev, &dev->i2c_adap,
&mt9v011_info, NULL);
}
@@ -2842,11 +2872,26 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
em28xx_info("chip ID is em2882/em2883\n");
dev->wait_after_write = 0;
break;
+ case CHIP_ID_EM2884:
+ em28xx_info("chip ID is em2884\n");
+ dev->reg_gpio_num = EM2874_R80_GPIO;
+ dev->wait_after_write = 0;
+ break;
default:
em28xx_info("em28xx chip ID = %d\n", dev->chip_id);
}
}
+ if (dev->is_audio_only) {
+ errCode = em28xx_audio_setup(dev);
+ if (errCode)
+ return -ENODEV;
+ em28xx_add_into_devlist(dev);
+ em28xx_init_extension(dev);
+
+ return 0;
+ }
+
/* Prepopulate cached GPO register content */
retval = em28xx_read_reg(dev, dev->reg_gpo_num);
if (retval >= 0)
@@ -2947,6 +2992,9 @@ fail_reg_devices:
return retval;
}
+/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
+#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
+
/*
* em28xx_usb_probe()
* checks for supported devices
@@ -2956,15 +3004,15 @@ static int em28xx_usb_probe(struct usb_interface *interface,
{
const struct usb_endpoint_descriptor *endpoint;
struct usb_device *udev;
- struct usb_interface *uif;
struct em28xx *dev = NULL;
int retval;
- int i, nr, ifnum, isoc_pipe;
+ bool is_audio_only = false, has_audio = false;
+ int i, nr, isoc_pipe;
+ const int ifnum = interface->altsetting[0].desc.bInterfaceNumber;
char *speed;
char descr[255] = "";
udev = usb_get_dev(interface_to_usbdev(interface));
- ifnum = interface->altsetting[0].desc.bInterfaceNumber;
/* Check to see next free device and mark as used */
nr = find_first_zero_bit(&em28xx_devused, EM28XX_MAXBOARDS);
@@ -2984,6 +3032,19 @@ static int em28xx_usb_probe(struct usb_interface *interface,
goto err;
}
+ /* Get endpoints */
+ for (i = 0; i < interface->num_altsetting; i++) {
+ int ep;
+
+ for (ep = 0; ep < interface->altsetting[i].desc.bNumEndpoints; ep++) {
+ struct usb_host_endpoint *e;
+ e = &interface->altsetting[i].endpoint[ep];
+
+ if (e->desc.bEndpointAddress == 0x83)
+ has_audio = true;
+ }
+ }
+
endpoint = &interface->cur_altsetting->endpoint[0].desc;
/* check if the device has the iso in endpoint at the correct place */
@@ -3003,19 +3064,22 @@ static int em28xx_usb_probe(struct usb_interface *interface,
check_interface = 0;
if (!check_interface) {
- em28xx_err(DRIVER_NAME " video device (%04x:%04x): "
- "interface %i, class %i found.\n",
- le16_to_cpu(udev->descriptor.idVendor),
- le16_to_cpu(udev->descriptor.idProduct),
- ifnum,
- interface->altsetting[0].desc.bInterfaceClass);
-
- em28xx_err(DRIVER_NAME " This is an anciliary "
- "interface not used by the driver\n");
-
- em28xx_devused &= ~(1<<nr);
- retval = -ENODEV;
- goto err;
+ if (has_audio) {
+ is_audio_only = true;
+ } else {
+ em28xx_err(DRIVER_NAME " video device (%04x:%04x): "
+ "interface %i, class %i found.\n",
+ le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct),
+ ifnum,
+ interface->altsetting[0].desc.bInterfaceClass);
+ em28xx_err(DRIVER_NAME " This is an anciliary "
+ "interface not used by the driver\n");
+
+ em28xx_devused &= ~(1<<nr);
+ retval = -ENODEV;
+ goto err;
+ }
}
}
@@ -3045,8 +3109,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
if (*descr)
strlcat(descr, " ", sizeof(descr));
- printk(DRIVER_NAME ": New device %s@ %s Mbps "
- "(%04x:%04x, interface %d, class %d)\n",
+ printk(KERN_INFO DRIVER_NAME
+ ": New device %s@ %s Mbps (%04x:%04x, interface %d, class %d)\n",
descr,
speed,
le16_to_cpu(udev->descriptor.idVendor),
@@ -3054,6 +3118,11 @@ static int em28xx_usb_probe(struct usb_interface *interface,
ifnum,
interface->altsetting->desc.bInterfaceNumber);
+ if (has_audio)
+ printk(KERN_INFO DRIVER_NAME
+ ": Audio Vendor Class interface %i found\n",
+ ifnum);
+
/*
* Make sure we have 480 Mbps of bandwidth, otherwise things like
* video stream wouldn't likely work, since 12 Mbps is generally
@@ -3089,10 +3158,13 @@ static int em28xx_usb_probe(struct usb_interface *interface,
dev->devno = nr;
dev->model = id->driver_info;
dev->alt = -1;
+ dev->is_audio_only = is_audio_only;
+ dev->has_alsa_audio = has_audio;
+ dev->audio_ifnum = ifnum;
/* Checks if audio is provided by some interface */
for (i = 0; i < udev->config->desc.bNumInterfaces; i++) {
- uif = udev->config->interface[i];
+ struct usb_interface *uif = udev->config->interface[i];
if (uif->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
dev->has_audio_class = 1;
break;
@@ -3100,9 +3172,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
}
/* compute alternate max packet sizes */
- uif = udev->actconfig->interface[0];
-
- dev->num_alt = uif->num_altsetting;
+ dev->num_alt = interface->num_altsetting;
dev->alt_max_pkt_size = kmalloc(32 * dev->num_alt, GFP_KERNEL);
if (dev->alt_max_pkt_size == NULL) {
@@ -3114,14 +3184,21 @@ static int em28xx_usb_probe(struct usb_interface *interface,
}
for (i = 0; i < dev->num_alt ; i++) {
- u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
- dev->alt_max_pkt_size[i] =
- (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
+ u16 tmp = le16_to_cpu(interface->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
+ unsigned int size = tmp & 0x7ff;
+
+ if (udev->speed == USB_SPEED_HIGH)
+ size = size * hb_mult(tmp);
+
+ dev->alt_max_pkt_size[i] = size;
}
if ((card[nr] >= 0) && (card[nr] < em28xx_bcount))
dev->model = card[nr];
+ /* save our data pointer in this interface device */
+ usb_set_intfdata(interface, dev);
+
/* allocate device struct */
mutex_init(&dev->lock);
mutex_lock(&dev->lock);
@@ -3133,9 +3210,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
goto err;
}
- /* save our data pointer in this interface device */
- usb_set_intfdata(interface, dev);
-
request_modules(dev);
/* Should be the last thing to do, to avoid newer udev's to
@@ -3164,6 +3238,13 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
if (!dev)
return;
+ if (dev->is_audio_only) {
+ mutex_lock(&dev->lock);
+ em28xx_close_extension(dev);
+ mutex_unlock(&dev->lock);
+ return;
+ }
+
em28xx_info("disconnecting %s\n", dev->vdev->name);
flush_request_modules(dev);
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index e33f145..57b1b5c 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -211,6 +211,7 @@ int em28xx_write_reg(struct em28xx *dev, u16 reg, u8 val)
{
return em28xx_write_regs(dev, reg, &val, 1);
}
+EXPORT_SYMBOL_GPL(em28xx_write_reg);
/*
* em28xx_write_reg_bits()
@@ -286,6 +287,7 @@ int em28xx_read_ac97(struct em28xx *dev, u8 reg)
return ret;
return le16_to_cpu(val);
}
+EXPORT_SYMBOL_GPL(em28xx_read_ac97);
/*
* em28xx_write_ac97()
@@ -313,13 +315,14 @@ int em28xx_write_ac97(struct em28xx *dev, u8 reg, u16 val)
return 0;
}
+EXPORT_SYMBOL_GPL(em28xx_write_ac97);
-struct em28xx_vol_table {
+struct em28xx_vol_itable {
enum em28xx_amux mux;
u8 reg;
};
-static struct em28xx_vol_table inputs[] = {
+static struct em28xx_vol_itable inputs[] = {
{ EM28XX_AMUX_VIDEO, AC97_VIDEO_VOL },
{ EM28XX_AMUX_LINE_IN, AC97_LINEIN_VOL },
{ EM28XX_AMUX_PHONE, AC97_PHONE_VOL },
@@ -403,7 +406,12 @@ static int em28xx_set_audio_source(struct em28xx *dev)
return ret;
}
-static const struct em28xx_vol_table outputs[] = {
+struct em28xx_vol_otable {
+ enum em28xx_aout mux;
+ u8 reg;
+};
+
+static const struct em28xx_vol_otable outputs[] = {
{ EM28XX_AOUT_MASTER, AC97_MASTER_VOL },
{ EM28XX_AOUT_LINE, AC97_LINE_LEVEL_VOL },
{ EM28XX_AOUT_MONO, AC97_MASTER_MONO_VOL },
@@ -492,17 +500,13 @@ int em28xx_audio_setup(struct em28xx *dev)
if (dev->chip_id == CHIP_ID_EM2870 || dev->chip_id == CHIP_ID_EM2874
|| dev->chip_id == CHIP_ID_EM28174) {
/* Digital only device - don't load any alsa module */
- dev->audio_mode.has_audio = 0;
- dev->has_audio_class = 0;
- dev->has_alsa_audio = 0;
+ dev->audio_mode.has_audio = false;
+ dev->has_audio_class = false;
+ dev->has_alsa_audio = false;
return 0;
}
- /* If device doesn't support Usb Audio Class, use vendor class */
- if (!dev->has_audio_class)
- dev->has_alsa_audio = 1;
-
- dev->audio_mode.has_audio = 1;
+ dev->audio_mode.has_audio = true;
/* See how this device is configured */
cfg = em28xx_read_reg(dev, EM28XX_R00_CHIPCFG);
@@ -512,8 +516,8 @@ int em28xx_audio_setup(struct em28xx *dev)
cfg = EM28XX_CHIPCFG_AC97; /* Be conservative */
} else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) == 0x00) {
/* The device doesn't have vendor audio at all */
- dev->has_alsa_audio = 0;
- dev->audio_mode.has_audio = 0;
+ dev->has_alsa_audio = false;
+ dev->audio_mode.has_audio = false;
return 0;
} else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
EM28XX_CHIPCFG_I2S_3_SAMPRATES) {
@@ -542,8 +546,8 @@ int em28xx_audio_setup(struct em28xx *dev)
*/
em28xx_warn("AC97 chip type couldn't be determined\n");
dev->audio_mode.ac97 = EM28XX_NO_AC97;
- dev->has_alsa_audio = 0;
- dev->audio_mode.has_audio = 0;
+ dev->has_alsa_audio = false;
+ dev->audio_mode.has_audio = false;
goto init_audio;
}
@@ -615,7 +619,9 @@ int em28xx_capture_start(struct em28xx *dev, int start)
{
int rc;
- if (dev->chip_id == CHIP_ID_EM2874 || dev->chip_id == CHIP_ID_EM28174) {
+ if (dev->chip_id == CHIP_ID_EM2874 ||
+ dev->chip_id == CHIP_ID_EM2884 ||
+ dev->chip_id == CHIP_ID_EM28174) {
/* The Transport Stream Enable Register moved in em2874 */
if (!start) {
rc = em28xx_write_reg_bits(dev, EM2874_R5F_TS_ENABLE,
@@ -884,6 +890,7 @@ int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio)
}
return rc;
}
+EXPORT_SYMBOL_GPL(em28xx_gpio_set);
int em28xx_set_mode(struct em28xx *dev, enum em28xx_mode set_mode)
{
@@ -917,7 +924,7 @@ EXPORT_SYMBOL_GPL(em28xx_set_mode);
static void em28xx_irq_callback(struct urb *urb)
{
struct em28xx *dev = urb->context;
- int rc, i;
+ int i;
switch (urb->status) {
case 0: /* success */
@@ -934,7 +941,7 @@ static void em28xx_irq_callback(struct urb *urb)
/* Copy data from URB */
spin_lock(&dev->slock);
- rc = dev->isoc_ctl.isoc_copy(dev, urb);
+ dev->isoc_ctl.isoc_copy(dev, urb);
spin_unlock(&dev->slock);
/* Reset urb buffers */
@@ -1106,17 +1113,19 @@ EXPORT_SYMBOL_GPL(em28xx_init_isoc);
int em28xx_isoc_dvb_max_packetsize(struct em28xx *dev)
{
unsigned int chip_cfg2;
- unsigned int packet_size = 564;
-
- if (dev->chip_id == CHIP_ID_EM2874) {
- /* FIXME - for now assume 564 like it was before, but the
- em2874 code should be added to return the proper value... */
- packet_size = 564;
- } else if (dev->chip_id == CHIP_ID_EM28174) {
- /* FIXME same as em2874. 564 was enough for 22 Mbit DVB-T
- but too much for 44 Mbit DVB-C. */
- packet_size = 752;
- } else {
+ unsigned int packet_size;
+
+ switch (dev->chip_id) {
+ case CHIP_ID_EM2710:
+ case CHIP_ID_EM2750:
+ case CHIP_ID_EM2800:
+ case CHIP_ID_EM2820:
+ case CHIP_ID_EM2840:
+ case CHIP_ID_EM2860:
+ /* No DVB support */
+ return -EINVAL;
+ case CHIP_ID_EM2870:
+ case CHIP_ID_EM2883:
/* TS max packet size stored in bits 1-0 of R01 */
chip_cfg2 = em28xx_read_reg(dev, EM28XX_R01_CHIPCFG2);
switch (chip_cfg2 & EM28XX_CHIPCFG2_TS_PACKETSIZE_MASK) {
@@ -1133,9 +1142,24 @@ int em28xx_isoc_dvb_max_packetsize(struct em28xx *dev)
packet_size = 752;
break;
}
+ break;
+ case CHIP_ID_EM2874:
+ /*
+ * FIXME: for now assumes 564 like it was before, but the
+ * em2874 code should be added to return the proper value
+ */
+ packet_size = 564;
+ break;
+ case CHIP_ID_EM2884:
+ case CHIP_ID_EM28174:
+ default:
+ /*
+ * FIXME: same as em2874. 564 was enough for 22 Mbit DVB-T
+ * but not enough for 44 Mbit DVB-C.
+ */
+ packet_size = 752;
}
- em28xx_coredbg("dvb max packet size=%d\n", packet_size);
return packet_size;
}
EXPORT_SYMBOL_GPL(em28xx_isoc_dvb_max_packetsize);
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index 7904ca4..e5916de 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -1,7 +1,7 @@
/*
DVB device driver for em28xx
- (c) 2008 Mauro Carvalho Chehab <mchehab@infradead.org>
+ (c) 2008-2011 Mauro Carvalho Chehab <mchehab@infradead.org>
(c) 2008 Devin Heitmueller <devin.heitmueller@gmail.com>
- Fixes for the driver to properly work with HVR-950
@@ -40,6 +40,8 @@
#include "s921.h"
#include "drxd.h"
#include "cxd2820r.h"
+#include "tda18271c2dd.h"
+#include "drxk.h"
MODULE_DESCRIPTION("driver for em28xx based DVB cards");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
@@ -73,6 +75,11 @@ struct em28xx_dvb {
struct dmx_frontend fe_hw;
struct dmx_frontend fe_mem;
struct dvb_net net;
+
+ /* Due to DRX-K - probably need changes */
+ int (*gate_ctrl)(struct dvb_frontend *, int);
+ struct semaphore pll_mutex;
+ bool dont_attach_fe1;
};
@@ -160,6 +167,11 @@ static int start_streaming(struct em28xx_dvb *dvb)
return rc;
max_dvb_packet_size = em28xx_isoc_dvb_max_packetsize(dev);
+ if (max_dvb_packet_size < 0)
+ return max_dvb_packet_size;
+ dprintk(1, "Using %d buffers each with %d bytes\n",
+ EM28XX_DVB_NUM_BUFS,
+ max_dvb_packet_size);
return em28xx_init_isoc(dev, EM28XX_DVB_MAX_PACKETS,
EM28XX_DVB_NUM_BUFS, max_dvb_packet_size,
@@ -295,6 +307,79 @@ static struct drxd_config em28xx_drxd = {
.disable_i2c_gate_ctrl = 1,
};
+struct drxk_config terratec_h5_drxk = {
+ .adr = 0x29,
+ .single_master = 1,
+ .no_i2c_bridge = 1,
+ .microcode_name = "dvb-usb-terratec-h5-drxk.fw",
+};
+
+static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct em28xx_dvb *dvb = fe->sec_priv;
+ int status;
+
+ if (!dvb)
+ return -EINVAL;
+
+ if (enable) {
+ down(&dvb->pll_mutex);
+ status = dvb->gate_ctrl(fe, 1);
+ } else {
+ status = dvb->gate_ctrl(fe, 0);
+ up(&dvb->pll_mutex);
+ }
+ return status;
+}
+
+static void terratec_h5_init(struct em28xx *dev)
+{
+ int i;
+ struct em28xx_reg_seq terratec_h5_init[] = {
+ {EM28XX_R08_GPIO, 0xff, 0xff, 10},
+ {EM2874_R80_GPIO, 0xf6, 0xff, 100},
+ {EM2874_R80_GPIO, 0xf2, 0xff, 50},
+ {EM2874_R80_GPIO, 0xf6, 0xff, 100},
+ { -1, -1, -1, -1},
+ };
+ struct em28xx_reg_seq terratec_h5_end[] = {
+ {EM2874_R80_GPIO, 0xe6, 0xff, 100},
+ {EM2874_R80_GPIO, 0xa6, 0xff, 50},
+ {EM2874_R80_GPIO, 0xe6, 0xff, 100},
+ { -1, -1, -1, -1},
+ };
+ struct {
+ unsigned char r[4];
+ int len;
+ } regs[] = {
+ {{ 0x06, 0x02, 0x00, 0x31 }, 4},
+ {{ 0x01, 0x02 }, 2},
+ {{ 0x01, 0x02, 0x00, 0xc6 }, 4},
+ {{ 0x01, 0x00 }, 2},
+ {{ 0x01, 0x00, 0xff, 0xaf }, 4},
+ {{ 0x01, 0x00, 0x03, 0xa0 }, 4},
+ {{ 0x01, 0x00 }, 2},
+ {{ 0x01, 0x00, 0x73, 0xaf }, 4},
+ {{ 0x04, 0x00 }, 2},
+ {{ 0x00, 0x04 }, 2},
+ {{ 0x00, 0x04, 0x00, 0x0a }, 4},
+ {{ 0x04, 0x14 }, 2},
+ {{ 0x04, 0x14, 0x00, 0x00 }, 4},
+ };
+
+ em28xx_gpio_set(dev, terratec_h5_init);
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x40);
+ msleep(10);
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x45);
+ msleep(10);
+
+ dev->i2c_client.addr = 0x82 >> 1;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++)
+ i2c_master_send(&dev->i2c_client, regs[i].r, regs[i].len);
+ em28xx_gpio_set(dev, terratec_h5_end);
+};
+
static int mt352_terratec_xs_init(struct dvb_frontend *fe)
{
/* Values extracted from a USB trace of the Terratec Windows driver */
@@ -516,7 +601,7 @@ static void unregister_dvb(struct em28xx_dvb *dvb)
if (dvb->fe[1])
dvb_unregister_frontend(dvb->fe[1]);
dvb_unregister_frontend(dvb->fe[0]);
- if (dvb->fe[1])
+ if (dvb->fe[1] && !dvb->dont_attach_fe1)
dvb_frontend_detach(dvb->fe[1]);
dvb_frontend_detach(dvb->fe[0]);
dvb_unregister_adapter(&dvb->adapter);
@@ -546,7 +631,7 @@ static int dvb_init(struct em28xx *dev)
em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
/* init frontend */
switch (dev->model) {
- case EM2874_LEADERSHIP_ISDBT:
+ case EM2874_BOARD_LEADERSHIP_ISDBT:
dvb->fe[0] = dvb_attach(s921_attach,
&sharp_isdbt, &dev->i2c_adap);
@@ -689,6 +774,41 @@ static int dvb_init(struct em28xx *dev)
}
}
break;
+ case EM2884_BOARD_TERRATEC_H5:
+ terratec_h5_init(dev);
+
+ dvb->dont_attach_fe1 = 1;
+
+ dvb->fe[0] = dvb_attach(drxk_attach, &terratec_h5_drxk, &dev->i2c_adap, &dvb->fe[1]);
+ if (!dvb->fe[0]) {
+ result = -EINVAL;
+ goto out_free;
+ }
+
+ /* FIXME: do we need a pll semaphore? */
+ dvb->fe[0]->sec_priv = dvb;
+ sema_init(&dvb->pll_mutex, 1);
+ dvb->gate_ctrl = dvb->fe[0]->ops.i2c_gate_ctrl;
+ dvb->fe[0]->ops.i2c_gate_ctrl = drxk_gate_ctrl;
+ dvb->fe[1]->id = 1;
+
+ /* Attach tda18271 to DVB-C frontend */
+ if (dvb->fe[0]->ops.i2c_gate_ctrl)
+ dvb->fe[0]->ops.i2c_gate_ctrl(dvb->fe[0], 1);
+ if (!dvb_attach(tda18271c2dd_attach, dvb->fe[0], &dev->i2c_adap, 0x60)) {
+ result = -EINVAL;
+ goto out_free;
+ }
+ if (dvb->fe[0]->ops.i2c_gate_ctrl)
+ dvb->fe[0]->ops.i2c_gate_ctrl(dvb->fe[0], 0);
+
+ /* Hack - needed by drxk/tda18271c2dd */
+ dvb->fe[1]->tuner_priv = dvb->fe[0]->tuner_priv;
+ memcpy(&dvb->fe[1]->ops.tuner_ops,
+ &dvb->fe[0]->ops.tuner_ops,
+ sizeof(dvb->fe[0]->ops.tuner_ops));
+
+ break;
default:
em28xx_errdev("/2: The frontend of your DVB/ATSC card"
" isn't supported yet\n");
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index 4739fc7..36f5a9b 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -181,16 +181,25 @@ static int em2800_i2c_recv_bytes(struct em28xx *dev, unsigned char addr,
/*
* em28xx_i2c_send_bytes()
- * untested for more than 4 bytes
*/
static int em28xx_i2c_send_bytes(void *data, unsigned char addr, char *buf,
short len, int stop)
{
int wrcount = 0;
struct em28xx *dev = (struct em28xx *)data;
+ int write_timeout, ret;
wrcount = dev->em28xx_write_regs_req(dev, stop ? 2 : 3, addr, buf, len);
+ /* Seems to be required after a write */
+ for (write_timeout = EM2800_I2C_WRITE_TIMEOUT; write_timeout > 0;
+ write_timeout -= 5) {
+ ret = dev->em28xx_read_reg(dev, 0x05);
+ if (!ret)
+ break;
+ msleep(5);
+ }
+
return wrcount;
}
@@ -218,9 +227,7 @@ static int em28xx_i2c_recv_bytes(struct em28xx *dev, unsigned char addr,
*/
static int em28xx_i2c_check_for_device(struct em28xx *dev, unsigned char addr)
{
- char msg;
int ret;
- msg = addr;
ret = dev->em28xx_read_reg_req(dev, 2, addr);
if (ret < 0) {
@@ -332,7 +339,9 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned char *eedata, int len)
struct em28xx_eeprom *em_eeprom = (void *)eedata;
int i, err, size = len, block;
- if (dev->chip_id == CHIP_ID_EM2874 || dev->chip_id == CHIP_ID_EM28174) {
+ if (dev->chip_id == CHIP_ID_EM2874 ||
+ dev->chip_id == CHIP_ID_EM28174 ||
+ dev->chip_id == CHIP_ID_EM2884) {
/* Empia switched to a 16-bit addressable eeprom in newer
devices. While we could certainly write a routine to read
the eeprom, there is nothing of use in there that cannot be
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index ba1ba86..5d12b14 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -372,6 +372,7 @@ int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 rc_type)
ir->get_key = default_polling_getkey;
break;
case CHIP_ID_EM2874:
+ case CHIP_ID_EM28174:
ir->get_key = em2874_polling_getkey;
em28xx_write_regs(dev, EM2874_R50_IR_CONFIG, &ir_config, 1);
break;
diff --git a/drivers/media/video/em28xx/em28xx-reg.h b/drivers/media/video/em28xx/em28xx-reg.h
index e92a28e..66f7923 100644
--- a/drivers/media/video/em28xx/em28xx-reg.h
+++ b/drivers/media/video/em28xx/em28xx-reg.h
@@ -201,6 +201,7 @@ enum em28xx_chip_id {
CHIP_ID_EM2870 = 35,
CHIP_ID_EM2883 = 36,
CHIP_ID_EM2874 = 65,
+ CHIP_ID_EM2884 = 68,
CHIP_ID_EM28174 = 113,
};
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 7b6461d..d176dc0 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -32,7 +32,6 @@
#include <linux/bitmap.h>
#include <linux/usb.h>
#include <linux/i2c.h>
-#include <linux/version.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -50,7 +49,8 @@
"Sascha Sommer <saschasommer@freenet.de>"
#define DRIVER_DESC "Empia em28xx based USB video device driver"
-#define EM28XX_VERSION_CODE KERNEL_VERSION(0, 1, 2)
+
+#define EM28XX_VERSION "0.1.3"
#define em28xx_videodbg(fmt, arg...) do {\
if (video_debug) \
@@ -72,6 +72,7 @@ do {\
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION(EM28XX_VERSION);
static unsigned int video_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
static unsigned int vbi_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
@@ -1757,8 +1758,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->version = EM28XX_VERSION_CODE;
-
cap->capabilities =
V4L2_CAP_SLICED_VBI_CAPTURE |
V4L2_CAP_VIDEO_CAPTURE |
@@ -1976,7 +1975,6 @@ static int radio_querycap(struct file *file, void *priv,
strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->version = EM28XX_VERSION_CODE;
cap->capabilities = V4L2_CAP_TUNER;
return 0;
}
@@ -2450,10 +2448,8 @@ int em28xx_register_analog_devices(struct em28xx *dev)
u8 val;
int ret;
- printk(KERN_INFO "%s: v4l2 driver version %d.%d.%d\n",
- dev->name,
- (EM28XX_VERSION_CODE >> 16) & 0xff,
- (EM28XX_VERSION_CODE >> 8) & 0xff, EM28XX_VERSION_CODE & 0xff);
+ printk(KERN_INFO "%s: v4l2 driver version %s\n",
+ dev->name, EM28XX_VERSION);
/* set default norm */
dev->norm = em28xx_video_template.current_norm;
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 3cca331..d80658b 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -117,9 +117,9 @@
#define EM2800_BOARD_VC211A 74
#define EM2882_BOARD_DIKOM_DK300 75
#define EM2870_BOARD_KWORLD_A340 76
-#define EM2874_LEADERSHIP_ISDBT 77
+#define EM2874_BOARD_LEADERSHIP_ISDBT 77
#define EM28174_BOARD_PCTV_290E 78
-
+#define EM2884_BOARD_TERRATEC_H5 79
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
@@ -487,6 +487,8 @@ struct em28xx {
int devno; /* marks the number of this device */
enum em28xx_chip_id chip_id;
+ int audio_ifnum;
+
struct v4l2_device v4l2_dev;
struct em28xx_board board;
@@ -503,6 +505,7 @@ struct em28xx {
unsigned int has_audio_class:1;
unsigned int has_alsa_audio:1;
+ unsigned int is_audio_only:1;
/* Controls audio streaming */
struct work_struct wq_trigger; /* Trigger to start/stop audio for alsa module */
@@ -697,6 +700,9 @@ int em28xx_tuner_callback(void *ptr, int component, int command, int arg);
void em28xx_release_resources(struct em28xx *dev);
/* Provided by em28xx-input.c */
+
+#ifdef CONFIG_VIDEO_EM28XX_RC
+
int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw);
int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw);
int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key,
@@ -709,6 +715,20 @@ void em28xx_deregister_snapshot_button(struct em28xx *dev);
int em28xx_ir_init(struct em28xx *dev);
int em28xx_ir_fini(struct em28xx *dev);
+#else
+
+#define em28xx_get_key_terratec NULL
+#define em28xx_get_key_em_haup NULL
+#define em28xx_get_key_pinnacle_usb_grey NULL
+#define em28xx_get_key_winfast_usbii_deluxe NULL
+
+static inline void em28xx_register_snapshot_button(struct em28xx *dev) {}
+static inline void em28xx_deregister_snapshot_button(struct em28xx *dev) {}
+static inline int em28xx_ir_init(struct em28xx *dev) { return 0; }
+static inline int em28xx_ir_fini(struct em28xx *dev) { return 0; }
+
+#endif
+
/* Provided by em28xx-vbi.c */
extern struct videobuf_queue_ops em28xx_vbi_qops;
diff --git a/drivers/media/video/et61x251/et61x251.h b/drivers/media/video/et61x251/et61x251.h
index bf66189..14bb907 100644
--- a/drivers/media/video/et61x251/et61x251.h
+++ b/drivers/media/video/et61x251/et61x251.h
@@ -21,7 +21,6 @@
#ifndef _ET61X251_H_
#define _ET61X251_H_
-#include <linux/version.h>
#include <linux/usb.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index a982750..9a1e80a 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -18,6 +18,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
***************************************************************************/
+#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -48,8 +49,7 @@
#define ET61X251_MODULE_AUTHOR "(C) 2006-2007 Luca Risolia"
#define ET61X251_AUTHOR_EMAIL "<luca.risolia@studio.unibo.it>"
#define ET61X251_MODULE_LICENSE "GPL"
-#define ET61X251_MODULE_VERSION "1:1.09"
-#define ET61X251_MODULE_VERSION_CODE KERNEL_VERSION(1, 1, 9)
+#define ET61X251_MODULE_VERSION "1.1.10"
/*****************************************************************************/
@@ -1579,7 +1579,7 @@ et61x251_vidioc_querycap(struct et61x251_device* cam, void __user * arg)
{
struct v4l2_capability cap = {
.driver = "et61x251",
- .version = ET61X251_MODULE_VERSION_CODE,
+ .version = LINUX_VERSION_CODE,
.capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING,
};
@@ -2480,16 +2480,8 @@ static long et61x251_ioctl_v4l2(struct file *filp,
case VIDIOC_S_PARM:
return et61x251_vidioc_s_parm(cam, arg);
- case VIDIOC_G_STD:
- case VIDIOC_S_STD:
- case VIDIOC_QUERYSTD:
- case VIDIOC_ENUMSTD:
- case VIDIOC_QUERYMENU:
- case VIDIOC_ENUM_FRAMEINTERVALS:
- return -EINVAL;
-
default:
- return -EINVAL;
+ return -ENOTTY;
}
}
diff --git a/drivers/media/video/fsl-viu.c b/drivers/media/video/fsl-viu.c
index 908d701..27cb197 100644
--- a/drivers/media/video/fsl-viu.c
+++ b/drivers/media/video/fsl-viu.c
@@ -23,19 +23,13 @@
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf-dma-contig.h>
#define DRV_NAME "fsl_viu"
-#define VIU_MAJOR_VERSION 0
-#define VIU_MINOR_VERSION 5
-#define VIU_RELEASE 0
-#define VIU_VERSION KERNEL_VERSION(VIU_MAJOR_VERSION, \
- VIU_MINOR_VERSION, \
- VIU_RELEASE)
+#define VIU_VERSION "0.5.1"
#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
@@ -610,7 +604,6 @@ static int vidioc_querycap(struct file *file, void *priv,
{
strcpy(cap->driver, "viu");
strcpy(cap->card, "viu");
- cap->version = VIU_VERSION;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING |
V4L2_CAP_VIDEO_OVERLAY |
@@ -1684,3 +1677,4 @@ module_exit(viu_exit);
MODULE_DESCRIPTION("Freescale Video-In(VIU)");
MODULE_AUTHOR("Hongjun Chen");
MODULE_LICENSE("GPL");
+MODULE_VERSION(VIU_VERSION);
diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig
index 34ae2c2..43d9a20 100644
--- a/drivers/media/video/gspca/Kconfig
+++ b/drivers/media/video/gspca/Kconfig
@@ -179,6 +179,16 @@ config USB_GSPCA_PAC7311
To compile this driver as a module, choose M here: the
module will be called gspca_pac7311.
+config USB_GSPCA_SE401
+ tristate "SE401 USB Camera Driver"
+ depends on VIDEO_V4L2 && USB_GSPCA
+ help
+ Say Y here if you want support for cameras based on the
+ Endpoints (formerly known as AOX) se401 chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gspca_se401.
+
config USB_GSPCA_SN9C2028
tristate "SONIX Dual-Mode USB Camera Driver"
depends on VIDEO_V4L2 && USB_GSPCA
diff --git a/drivers/media/video/gspca/Makefile b/drivers/media/video/gspca/Makefile
index 802fbe1..d6364a8 100644
--- a/drivers/media/video/gspca/Makefile
+++ b/drivers/media/video/gspca/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_USB_GSPCA_OV534_9) += gspca_ov534_9.o
obj-$(CONFIG_USB_GSPCA_PAC207) += gspca_pac207.o
obj-$(CONFIG_USB_GSPCA_PAC7302) += gspca_pac7302.o
obj-$(CONFIG_USB_GSPCA_PAC7311) += gspca_pac7311.o
+obj-$(CONFIG_USB_GSPCA_SE401) += gspca_se401.o
obj-$(CONFIG_USB_GSPCA_SN9C2028) += gspca_sn9c2028.o
obj-$(CONFIG_USB_GSPCA_SN9C20X) += gspca_sn9c20x.o
obj-$(CONFIG_USB_GSPCA_SONIXB) += gspca_sonixb.o
@@ -58,6 +59,7 @@ gspca_ov534_9-objs := ov534_9.o
gspca_pac207-objs := pac207.o
gspca_pac7302-objs := pac7302.o
gspca_pac7311-objs := pac7311.o
+gspca_se401-objs := se401.o
gspca_sn9c2028-objs := sn9c2028.o
gspca_sn9c20x-objs := sn9c20x.o
gspca_sonixb-objs := sonixb.o
diff --git a/drivers/media/video/gspca/gl860/gl860.h b/drivers/media/video/gspca/gl860/gl860.h
index 49ad4ac..0330a02 100644
--- a/drivers/media/video/gspca/gl860/gl860.h
+++ b/drivers/media/video/gspca/gl860/gl860.h
@@ -18,7 +18,6 @@
*/
#ifndef GL860_DEV_H
#define GL860_DEV_H
-#include <linux/version.h>
#include "gspca.h"
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 08ce994..5da4879 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -24,7 +24,6 @@
#define MODULE_NAME "gspca"
#include <linux/init.h>
-#include <linux/version.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
@@ -51,11 +50,12 @@
#error "DEF_NURBS too big"
#endif
+#define DRIVER_VERSION_NUMBER "2.13.0"
+
MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA USB Camera Driver");
MODULE_LICENSE("GPL");
-
-#define DRIVER_VERSION_NUMBER KERNEL_VERSION(2, 13, 0)
+MODULE_VERSION(DRIVER_VERSION_NUMBER);
#ifdef GSPCA_DEBUG
int gspca_debug = D_ERR | D_PROBE;
@@ -443,8 +443,11 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
} else {
switch (gspca_dev->last_packet_type) {
case DISCARD_PACKET:
- if (packet_type == LAST_PACKET)
+ if (packet_type == LAST_PACKET) {
gspca_dev->last_packet_type = packet_type;
+ gspca_dev->image = NULL;
+ gspca_dev->image_len = 0;
+ }
return;
case LAST_PACKET:
return;
@@ -1278,10 +1281,10 @@ static int vidioc_querycap(struct file *file, void *priv,
ret = -ENODEV;
goto out;
}
- strncpy((char *) cap->driver, gspca_dev->sd_desc->name,
+ strlcpy((char *) cap->driver, gspca_dev->sd_desc->name,
sizeof cap->driver);
if (gspca_dev->dev->product != NULL) {
- strncpy((char *) cap->card, gspca_dev->dev->product,
+ strlcpy((char *) cap->card, gspca_dev->dev->product,
sizeof cap->card);
} else {
snprintf((char *) cap->card, sizeof cap->card,
@@ -1291,7 +1294,6 @@ static int vidioc_querycap(struct file *file, void *priv,
}
usb_make_path(gspca_dev->dev, (char *) cap->bus_info,
sizeof(cap->bus_info));
- cap->version = DRIVER_VERSION_NUMBER;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE
| V4L2_CAP_STREAMING
| V4L2_CAP_READWRITE;
@@ -1460,7 +1462,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
return -EINVAL;
input->type = V4L2_INPUT_TYPE_CAMERA;
input->status = gspca_dev->cam.input_flags;
- strncpy(input->name, gspca_dev->sd_desc->name,
+ strlcpy(input->name, gspca_dev->sd_desc->name,
sizeof input->name);
return 0;
}
@@ -2478,10 +2480,7 @@ EXPORT_SYMBOL(gspca_auto_gain_n_exposure);
/* -- module insert / remove -- */
static int __init gspca_init(void)
{
- info("v%d.%d.%d registered",
- (DRIVER_VERSION_NUMBER >> 16) & 0xff,
- (DRIVER_VERSION_NUMBER >> 8) & 0xff,
- DRIVER_VERSION_NUMBER & 0xff);
+ info("v" DRIVER_VERSION_NUMBER " registered");
return 0;
}
static void __exit gspca_exit(void)
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 057e287..18305c8 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -134,6 +134,7 @@ enum sensors {
SEN_OV7670,
SEN_OV76BE,
SEN_OV8610,
+ SEN_OV9600,
};
/* Note this is a bit of a hack, but the w9968cf driver needs the code for all
@@ -340,6 +341,10 @@ static const unsigned ctrl_dis[] = {
(1 << EXPOSURE) |
(1 << AUTOGAIN) |
(1 << FREQ),
+[SEN_OV9600] = ((1 << NCTRL) - 1) /* no control */
+ ^ ((1 << EXPOSURE) /* but exposure */
+ | (1 << AUTOGAIN)), /* and autogain */
+
};
static const struct v4l2_pix_format ov519_vga_mode[] = {
@@ -525,6 +530,17 @@ static const struct v4l2_pix_format ovfx2_ov3610_mode[] = {
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0},
};
+static const struct v4l2_pix_format ovfx2_ov9600_mode[] = {
+ {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 640,
+ .sizeimage = 640 * 480,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 1},
+ {1280, 1024, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 1280,
+ .sizeimage = 1280 * 1024,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+};
/* Registers common to OV511 / OV518 */
#define R51x_FIFO_PSIZE 0x30 /* 2 bytes wide w/ OV518(+) */
@@ -1807,6 +1823,22 @@ static const struct ov_i2c_regvals norm_7660[] = {
| OV7670_COM8_AEC},
{0xa1, 0xc8}
};
+static const struct ov_i2c_regvals norm_9600[] = {
+ {0x12, 0x80},
+ {0x0c, 0x28},
+ {0x11, 0x80},
+ {0x13, 0xb5},
+ {0x14, 0x3e},
+ {0x1b, 0x04},
+ {0x24, 0xb0},
+ {0x25, 0x90},
+ {0x26, 0x94},
+ {0x35, 0x90},
+ {0x37, 0x07},
+ {0x38, 0x08},
+ {0x01, 0x8e},
+ {0x02, 0x85}
+};
/* 7670. Defaults taken from OmniVision provided data,
* as provided by Jonathan Corbet of OLPC */
@@ -2400,9 +2432,12 @@ static int ov518_i2c_r(struct sd *sd, u8 reg)
/* Initiate 2-byte write cycle */
reg_w(sd, R518_I2C_CTL, 0x03);
+ reg_r8(sd, R518_I2C_CTL);
/* Initiate 2-byte read cycle */
reg_w(sd, R518_I2C_CTL, 0x05);
+ reg_r8(sd, R518_I2C_CTL);
+
value = reg_r(sd, R51x_I2C_DATA);
PDEBUG(D_USBI, "ov518_i2c_r %02x %02x", reg, value);
return value;
@@ -2686,7 +2721,7 @@ static void write_i2c_regvals(struct sd *sd,
*
***************************************************************************/
-/* This initializes the OV2x10 / OV3610 / OV3620 */
+/* This initializes the OV2x10 / OV3610 / OV3620 / OV9600 */
static void ov_hires_configure(struct sd *sd)
{
int high, low;
@@ -2702,19 +2737,32 @@ static void ov_hires_configure(struct sd *sd)
high = i2c_r(sd, 0x0a);
low = i2c_r(sd, 0x0b);
/* info("%x, %x", high, low); */
- if (high == 0x96 && low == 0x40) {
- PDEBUG(D_PROBE, "Sensor is an OV2610");
- sd->sensor = SEN_OV2610;
- } else if (high == 0x96 && low == 0x41) {
- PDEBUG(D_PROBE, "Sensor is an OV2610AE");
- sd->sensor = SEN_OV2610AE;
- } else if (high == 0x36 && (low & 0x0f) == 0x00) {
- PDEBUG(D_PROBE, "Sensor is an OV3610");
- sd->sensor = SEN_OV3610;
- } else {
- err("Error unknown sensor type: %02x%02x",
- high, low);
+ switch (high) {
+ case 0x96:
+ switch (low) {
+ case 0x40:
+ PDEBUG(D_PROBE, "Sensor is a OV2610");
+ sd->sensor = SEN_OV2610;
+ return;
+ case 0x41:
+ PDEBUG(D_PROBE, "Sensor is a OV2610AE");
+ sd->sensor = SEN_OV2610AE;
+ return;
+ case 0xb1:
+ PDEBUG(D_PROBE, "Sensor is a OV9600");
+ sd->sensor = SEN_OV9600;
+ return;
+ }
+ break;
+ case 0x36:
+ if ((low & 0x0f) == 0x00) {
+ PDEBUG(D_PROBE, "Sensor is a OV3610");
+ sd->sensor = SEN_OV3610;
+ return;
+ }
+ break;
}
+ err("Error unknown sensor type: %02x%02x", high, low);
}
/* This initializes the OV8110, OV8610 sensor. The OV8110 uses
@@ -2810,7 +2858,6 @@ static void ov7xx0_configure(struct sd *sd)
case 0x60:
PDEBUG(D_PROBE, "Sensor is a OV7660");
sd->sensor = SEN_OV7660;
- sd->invert_led = 0;
break;
default:
PDEBUG(D_PROBE, "Unknown sensor: 0x76%x", low);
@@ -3289,7 +3336,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
case BRIDGE_OV519:
cam->cam_mode = ov519_vga_mode;
cam->nmodes = ARRAY_SIZE(ov519_vga_mode);
- sd->invert_led = !sd->invert_led;
break;
case BRIDGE_OVFX2:
cam->cam_mode = ov519_vga_mode;
@@ -3400,6 +3446,10 @@ static int sd_init(struct gspca_dev *gspca_dev)
cam->cam_mode = ovfx2_ov3610_mode;
cam->nmodes = ARRAY_SIZE(ovfx2_ov3610_mode);
break;
+ case SEN_OV9600:
+ cam->cam_mode = ovfx2_ov9600_mode;
+ cam->nmodes = ARRAY_SIZE(ovfx2_ov9600_mode);
+ break;
default:
if (sd->sif) {
cam->cam_mode = ov519_sif_mode;
@@ -3497,6 +3547,12 @@ static int sd_init(struct gspca_dev *gspca_dev)
case SEN_OV8610:
write_i2c_regvals(sd, norm_8610, ARRAY_SIZE(norm_8610));
break;
+ case SEN_OV9600:
+ write_i2c_regvals(sd, norm_9600, ARRAY_SIZE(norm_9600));
+
+ /* enable autoexpo */
+/* i2c_w_mask(sd, 0x13, 0x05, 0x05); */
+ break;
}
return gspca_dev->usb_err;
error:
@@ -4085,6 +4141,33 @@ static void mode_init_ov_sensor_regs(struct sd *sd)
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */
break;
+ case SEN_OV9600: {
+ const struct ov_i2c_regvals *vals;
+ static const struct ov_i2c_regvals sxga_15[] = {
+ {0x11, 0x80}, {0x14, 0x3e}, {0x24, 0x85}, {0x25, 0x75}
+ };
+ static const struct ov_i2c_regvals sxga_7_5[] = {
+ {0x11, 0x81}, {0x14, 0x3e}, {0x24, 0x85}, {0x25, 0x75}
+ };
+ static const struct ov_i2c_regvals vga_30[] = {
+ {0x11, 0x81}, {0x14, 0x7e}, {0x24, 0x70}, {0x25, 0x60}
+ };
+ static const struct ov_i2c_regvals vga_15[] = {
+ {0x11, 0x83}, {0x14, 0x3e}, {0x24, 0x80}, {0x25, 0x70}
+ };
+
+ /* frame rates:
+ * 15fps / 7.5 fps for 1280x1024
+ * 30fps / 15fps for 640x480
+ */
+ i2c_w_mask(sd, 0x12, qvga ? 0x40 : 0x00, 0x40);
+ if (qvga)
+ vals = sd->frame_rate < 30 ? vga_15 : vga_30;
+ else
+ vals = sd->frame_rate < 15 ? sxga_7_5 : sxga_15;
+ write_i2c_regvals(sd, vals, ARRAY_SIZE(sxga_15));
+ return;
+ }
default:
return;
}
@@ -4120,6 +4203,7 @@ static void set_ov_sensor_window(struct sd *sd)
case SEN_OV2610AE:
case SEN_OV3610:
case SEN_OV7670:
+ case SEN_OV9600:
mode_init_ov_sensor_regs(sd);
return;
case SEN_OV7660:
@@ -4919,23 +5003,24 @@ static const struct sd_desc sd_desc = {
/* -- module initialisation -- */
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF },
- {USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x041e, 0x4052),
+ .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
{USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 },
- {USB_DEVICE(0x041e, 0x4064),
- .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
+ {USB_DEVICE(0x041e, 0x4064), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4067), .driver_info = BRIDGE_OV519 },
- {USB_DEVICE(0x041e, 0x4068),
+ {USB_DEVICE(0x041e, 0x4068), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x045e, 0x028c),
.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
- {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 },
- {USB_DEVICE(0x054c, 0x0155),
- .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
+ {USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 },
{USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 },
- {USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 },
- {USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x05a9, 0x0519),
+ .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
+ {USB_DEVICE(0x05a9, 0x0530),
+ .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
{USB_DEVICE(0x05a9, 0x2800), .driver_info = BRIDGE_OVFX2 },
{USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 },
diff --git a/drivers/media/video/gspca/se401.c b/drivers/media/video/gspca/se401.c
new file mode 100644
index 0000000..4c283c2
--- /dev/null
+++ b/drivers/media/video/gspca/se401.c
@@ -0,0 +1,774 @@
+/*
+ * GSPCA Endpoints (formerly known as AOX) se401 USB Camera sub Driver
+ *
+ * Copyright (C) 2011 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on the v4l1 se401 driver which is:
+ *
+ * Copyright (c) 2000 Jeroen B. Vreeken (pe1rxq@amsat.org)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define MODULE_NAME "se401"
+
+#define BULK_SIZE 4096
+#define PACKET_SIZE 1024
+#define READ_REQ_SIZE 64
+#define MAX_MODES ((READ_REQ_SIZE - 6) / 4)
+/* The se401 compression algorithm uses a fixed quant factor, which
+ can be configured by setting the high nibble of the SE401_OPERATINGMODE
+ feature. This needs to exactly match what is in libv4l! */
+#define SE401_QUANT_FACT 8
+
+#include <linux/input.h>
+#include <linux/slab.h>
+#include "gspca.h"
+#include "se401.h"
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Endpoints se401");
+MODULE_LICENSE("GPL");
+
+/* controls */
+enum e_ctrl {
+ BRIGHTNESS,
+ GAIN,
+ EXPOSURE,
+ FREQ,
+ NCTRL /* number of controls */
+};
+
+/* exposure change state machine states */
+enum {
+ EXPO_CHANGED,
+ EXPO_DROP_FRAME,
+ EXPO_NO_CHANGE,
+};
+
+/* specific webcam descriptor */
+struct sd {
+ struct gspca_dev gspca_dev; /* !! must be the first item */
+ struct gspca_ctrl ctrls[NCTRL];
+ struct v4l2_pix_format fmts[MAX_MODES];
+ int pixels_read;
+ int packet_read;
+ u8 packet[PACKET_SIZE];
+ u8 restart_stream;
+ u8 button_state;
+ u8 resetlevel;
+ u8 resetlevel_frame_count;
+ int resetlevel_adjust_dir;
+ int expo_change_state;
+};
+
+static void setbrightness(struct gspca_dev *gspca_dev);
+static void setgain(struct gspca_dev *gspca_dev);
+static void setexposure(struct gspca_dev *gspca_dev);
+
+static const struct ctrl sd_ctrls[NCTRL] = {
+[BRIGHTNESS] = {
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+ .default_value = 15,
+ },
+ .set_control = setbrightness
+ },
+[GAIN] = {
+ {
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain",
+ .minimum = 0,
+ .maximum = 50, /* Really 63 but > 50 is not pretty */
+ .step = 1,
+ .default_value = 25,
+ },
+ .set_control = setgain
+ },
+[EXPOSURE] = {
+ {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Exposure",
+ .minimum = 0,
+ .maximum = 32767,
+ .step = 1,
+ .default_value = 15000,
+ },
+ .set_control = setexposure
+ },
+[FREQ] = {
+ {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Light frequency filter",
+ .minimum = 0,
+ .maximum = 2,
+ .step = 1,
+ .default_value = 0,
+ },
+ .set_control = setexposure
+ },
+};
+
+static void se401_write_req(struct gspca_dev *gspca_dev, u16 req, u16 value,
+ int silent)
+{
+ int err;
+
+ if (gspca_dev->usb_err < 0)
+ return;
+
+ err = usb_control_msg(gspca_dev->dev,
+ usb_sndctrlpipe(gspca_dev->dev, 0), req,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, 0, NULL, 0, 1000);
+ if (err < 0) {
+ if (!silent)
+ err("write req failed req %#04x val %#04x error %d",
+ req, value, err);
+ gspca_dev->usb_err = err;
+ }
+}
+
+static void se401_read_req(struct gspca_dev *gspca_dev, u16 req, int silent)
+{
+ int err;
+
+ if (gspca_dev->usb_err < 0)
+ return;
+
+ if (USB_BUF_SZ < READ_REQ_SIZE) {
+ err("USB_BUF_SZ too small!!");
+ gspca_dev->usb_err = -ENOBUFS;
+ return;
+ }
+
+ err = usb_control_msg(gspca_dev->dev,
+ usb_rcvctrlpipe(gspca_dev->dev, 0), req,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, 0, gspca_dev->usb_buf, READ_REQ_SIZE, 1000);
+ if (err < 0) {
+ if (!silent)
+ err("read req failed req %#04x error %d", req, err);
+ gspca_dev->usb_err = err;
+ }
+}
+
+static void se401_set_feature(struct gspca_dev *gspca_dev,
+ u16 selector, u16 param)
+{
+ int err;
+
+ if (gspca_dev->usb_err < 0)
+ return;
+
+ err = usb_control_msg(gspca_dev->dev,
+ usb_sndctrlpipe(gspca_dev->dev, 0),
+ SE401_REQ_SET_EXT_FEATURE,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ param, selector, NULL, 0, 1000);
+ if (err < 0) {
+ err("set feature failed sel %#04x param %#04x error %d",
+ selector, param, err);
+ gspca_dev->usb_err = err;
+ }
+}
+
+static int se401_get_feature(struct gspca_dev *gspca_dev, u16 selector)
+{
+ int err;
+
+ if (gspca_dev->usb_err < 0)
+ return gspca_dev->usb_err;
+
+ if (USB_BUF_SZ < 2) {
+ err("USB_BUF_SZ too small!!");
+ gspca_dev->usb_err = -ENOBUFS;
+ return gspca_dev->usb_err;
+ }
+
+ err = usb_control_msg(gspca_dev->dev,
+ usb_rcvctrlpipe(gspca_dev->dev, 0),
+ SE401_REQ_GET_EXT_FEATURE,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, selector, gspca_dev->usb_buf, 2, 1000);
+ if (err < 0) {
+ err("get feature failed sel %#04x error %d", selector, err);
+ gspca_dev->usb_err = err;
+ return err;
+ }
+ return gspca_dev->usb_buf[0] | (gspca_dev->usb_buf[1] << 8);
+}
+
+static void setbrightness(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (gspca_dev->ctrl_dis & (1 << BRIGHTNESS))
+ return;
+
+ /* HDG: this does not seem to do anything on my cam */
+ se401_write_req(gspca_dev, SE401_REQ_SET_BRT,
+ sd->ctrls[BRIGHTNESS].val, 0);
+}
+
+static void setgain(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ u16 gain = 63 - sd->ctrls[GAIN].val;
+
+ /* red color gain */
+ se401_set_feature(gspca_dev, HV7131_REG_ARCG, gain);
+ /* green color gain */
+ se401_set_feature(gspca_dev, HV7131_REG_AGCG, gain);
+ /* blue color gain */
+ se401_set_feature(gspca_dev, HV7131_REG_ABCG, gain);
+}
+
+static void setexposure(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int integration = sd->ctrls[EXPOSURE].val << 6;
+ u8 expose_h, expose_m, expose_l;
+
+ /* Do this before the set_feature calls, for proper timing wrt
+ the interrupt driven pkt_scan. Note we may still race but that
+ is not a big issue, the expo change state machine is merely for
+ avoiding underexposed frames getting send out, if one sneaks
+ through so be it */
+ sd->expo_change_state = EXPO_CHANGED;
+
+ if (sd->ctrls[FREQ].val == V4L2_CID_POWER_LINE_FREQUENCY_50HZ)
+ integration = integration - integration % 106667;
+ if (sd->ctrls[FREQ].val == V4L2_CID_POWER_LINE_FREQUENCY_60HZ)
+ integration = integration - integration % 88889;
+
+ expose_h = (integration >> 16);
+ expose_m = (integration >> 8);
+ expose_l = integration;
+
+ /* integration time low */
+ se401_set_feature(gspca_dev, HV7131_REG_TITL, expose_l);
+ /* integration time mid */
+ se401_set_feature(gspca_dev, HV7131_REG_TITM, expose_m);
+ /* integration time high */
+ se401_set_feature(gspca_dev, HV7131_REG_TITU, expose_h);
+}
+
+static int sd_config(struct gspca_dev *gspca_dev,
+ const struct usb_device_id *id)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ struct cam *cam = &gspca_dev->cam;
+ u8 *cd = gspca_dev->usb_buf;
+ int i, j, n;
+ int widths[MAX_MODES], heights[MAX_MODES];
+
+ /* Read the camera descriptor */
+ se401_read_req(gspca_dev, SE401_REQ_GET_CAMERA_DESCRIPTOR, 1);
+ if (gspca_dev->usb_err) {
+ /* Sometimes after being idle for a while the se401 won't
+ respond and needs a good kicking */
+ usb_reset_device(gspca_dev->dev);
+ gspca_dev->usb_err = 0;
+ se401_read_req(gspca_dev, SE401_REQ_GET_CAMERA_DESCRIPTOR, 0);
+ }
+
+ /* Some cameras start with their LED on */
+ se401_write_req(gspca_dev, SE401_REQ_LED_CONTROL, 0, 0);
+ if (gspca_dev->usb_err)
+ return gspca_dev->usb_err;
+
+ if (cd[1] != 0x41) {
+ err("Wrong descriptor type");
+ return -ENODEV;
+ }
+
+ if (!(cd[2] & SE401_FORMAT_BAYER)) {
+ err("Bayer format not supported!");
+ return -ENODEV;
+ }
+
+ if (cd[3])
+ info("ExtraFeatures: %d", cd[3]);
+
+ n = cd[4] | (cd[5] << 8);
+ if (n > MAX_MODES) {
+ err("Too many frame sizes");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < n ; i++) {
+ widths[i] = cd[6 + i * 4 + 0] | (cd[6 + i * 4 + 1] << 8);
+ heights[i] = cd[6 + i * 4 + 2] | (cd[6 + i * 4 + 3] << 8);
+ }
+
+ for (i = 0; i < n ; i++) {
+ sd->fmts[i].width = widths[i];
+ sd->fmts[i].height = heights[i];
+ sd->fmts[i].field = V4L2_FIELD_NONE;
+ sd->fmts[i].colorspace = V4L2_COLORSPACE_SRGB;
+ sd->fmts[i].priv = 1;
+
+ /* janggu compression only works for 1/4th or 1/16th res */
+ for (j = 0; j < n; j++) {
+ if (widths[j] / 2 == widths[i] &&
+ heights[j] / 2 == heights[i]) {
+ sd->fmts[i].priv = 2;
+ break;
+ }
+ }
+ /* 1/16th if available too is better then 1/4th, because
+ we then use a larger area of the sensor */
+ for (j = 0; j < n; j++) {
+ if (widths[j] / 4 == widths[i] &&
+ heights[j] / 4 == heights[i]) {
+ sd->fmts[i].priv = 4;
+ break;
+ }
+ }
+
+ if (sd->fmts[i].priv == 1) {
+ /* Not a 1/4th or 1/16th res, use bayer */
+ sd->fmts[i].pixelformat = V4L2_PIX_FMT_SBGGR8;
+ sd->fmts[i].bytesperline = widths[i];
+ sd->fmts[i].sizeimage = widths[i] * heights[i];
+ info("Frame size: %dx%d bayer", widths[i], heights[i]);
+ } else {
+ /* Found a match use janggu compression */
+ sd->fmts[i].pixelformat = V4L2_PIX_FMT_SE401;
+ sd->fmts[i].bytesperline = 0;
+ sd->fmts[i].sizeimage = widths[i] * heights[i] * 3;
+ info("Frame size: %dx%d 1/%dth janggu",
+ widths[i], heights[i],
+ sd->fmts[i].priv * sd->fmts[i].priv);
+ }
+ }
+
+ cam->cam_mode = sd->fmts;
+ cam->nmodes = n;
+ cam->bulk = 1;
+ cam->bulk_size = BULK_SIZE;
+ cam->bulk_nurbs = 4;
+ cam->ctrls = sd->ctrls;
+ gspca_dev->nbalt = 1; /* Ignore the bogus isoc alt settings */
+ sd->resetlevel = 0x2d; /* Set initial resetlevel */
+
+ /* See if the camera supports brightness */
+ se401_read_req(gspca_dev, SE401_REQ_GET_BRT, 1);
+ if (gspca_dev->usb_err) {
+ gspca_dev->ctrl_dis = (1 << BRIGHTNESS);
+ gspca_dev->usb_err = 0;
+ }
+
+ return 0;
+}
+
+/* this function is called at probe and resume time */
+static int sd_init(struct gspca_dev *gspca_dev)
+{
+ return 0;
+}
+
+/* -- start the camera -- */
+static int sd_start(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ int mult = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
+ int mode = 0;
+
+ se401_write_req(gspca_dev, SE401_REQ_CAMERA_POWER, 1, 1);
+ if (gspca_dev->usb_err) {
+ /* Sometimes after being idle for a while the se401 won't
+ respond and needs a good kicking */
+ usb_reset_device(gspca_dev->dev);
+ gspca_dev->usb_err = 0;
+ se401_write_req(gspca_dev, SE401_REQ_CAMERA_POWER, 1, 0);
+ }
+ se401_write_req(gspca_dev, SE401_REQ_LED_CONTROL, 1, 0);
+
+ se401_set_feature(gspca_dev, HV7131_REG_MODE_B, 0x05);
+
+ /* set size + mode */
+ se401_write_req(gspca_dev, SE401_REQ_SET_WIDTH,
+ gspca_dev->width * mult, 0);
+ se401_write_req(gspca_dev, SE401_REQ_SET_HEIGHT,
+ gspca_dev->height * mult, 0);
+ /*
+ * HDG: disabled this as it does not seem to do anything
+ * se401_write_req(gspca_dev, SE401_REQ_SET_OUTPUT_MODE,
+ * SE401_FORMAT_BAYER, 0);
+ */
+
+ switch (mult) {
+ case 1: /* Raw bayer */
+ mode = 0x03; break;
+ case 2: /* 1/4th janggu */
+ mode = SE401_QUANT_FACT << 4; break;
+ case 4: /* 1/16th janggu */
+ mode = (SE401_QUANT_FACT << 4) | 0x02; break;
+ }
+ se401_set_feature(gspca_dev, SE401_OPERATINGMODE, mode);
+
+ setbrightness(gspca_dev);
+ setgain(gspca_dev);
+ setexposure(gspca_dev);
+ se401_set_feature(gspca_dev, HV7131_REG_ARLV, sd->resetlevel);
+
+ sd->packet_read = 0;
+ sd->pixels_read = 0;
+ sd->restart_stream = 0;
+ sd->resetlevel_frame_count = 0;
+ sd->resetlevel_adjust_dir = 0;
+ sd->expo_change_state = EXPO_NO_CHANGE;
+
+ se401_write_req(gspca_dev, SE401_REQ_START_CONTINUOUS_CAPTURE, 0, 0);
+
+ return gspca_dev->usb_err;
+}
+
+static void sd_stopN(struct gspca_dev *gspca_dev)
+{
+ se401_write_req(gspca_dev, SE401_REQ_STOP_CONTINUOUS_CAPTURE, 0, 0);
+ se401_write_req(gspca_dev, SE401_REQ_LED_CONTROL, 0, 0);
+ se401_write_req(gspca_dev, SE401_REQ_CAMERA_POWER, 0, 0);
+}
+
+static void sd_dq_callback(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ unsigned int ahrc, alrc;
+ int oldreset, adjust_dir;
+
+ /* Restart the stream if requested do so by pkt_scan */
+ if (sd->restart_stream) {
+ sd_stopN(gspca_dev);
+ sd_start(gspca_dev);
+ sd->restart_stream = 0;
+ }
+
+ /* Automatically adjust sensor reset level
+ Hyundai have some really nice docs about this and other sensor
+ related stuff on their homepage: www.hei.co.kr */
+ sd->resetlevel_frame_count++;
+ if (sd->resetlevel_frame_count < 20)
+ return;
+
+ /* For some reason this normally read-only register doesn't get reset
+ to zero after reading them just once... */
+ se401_get_feature(gspca_dev, HV7131_REG_HIREFNOH);
+ se401_get_feature(gspca_dev, HV7131_REG_HIREFNOL);
+ se401_get_feature(gspca_dev, HV7131_REG_LOREFNOH);
+ se401_get_feature(gspca_dev, HV7131_REG_LOREFNOL);
+ ahrc = 256*se401_get_feature(gspca_dev, HV7131_REG_HIREFNOH) +
+ se401_get_feature(gspca_dev, HV7131_REG_HIREFNOL);
+ alrc = 256*se401_get_feature(gspca_dev, HV7131_REG_LOREFNOH) +
+ se401_get_feature(gspca_dev, HV7131_REG_LOREFNOL);
+
+ /* Not an exact science, but it seems to work pretty well... */
+ oldreset = sd->resetlevel;
+ if (alrc > 10) {
+ while (alrc >= 10 && sd->resetlevel < 63) {
+ sd->resetlevel++;
+ alrc /= 2;
+ }
+ } else if (ahrc > 20) {
+ while (ahrc >= 20 && sd->resetlevel > 0) {
+ sd->resetlevel--;
+ ahrc /= 2;
+ }
+ }
+ /* Detect ping-pong-ing and halve adjustment to avoid overshoot */
+ if (sd->resetlevel > oldreset)
+ adjust_dir = 1;
+ else
+ adjust_dir = -1;
+ if (sd->resetlevel_adjust_dir &&
+ sd->resetlevel_adjust_dir != adjust_dir)
+ sd->resetlevel = oldreset + (sd->resetlevel - oldreset) / 2;
+
+ if (sd->resetlevel != oldreset) {
+ sd->resetlevel_adjust_dir = adjust_dir;
+ se401_set_feature(gspca_dev, HV7131_REG_ARLV, sd->resetlevel);
+ }
+
+ sd->resetlevel_frame_count = 0;
+}
+
+static void sd_complete_frame(struct gspca_dev *gspca_dev, u8 *data, int len)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ switch (sd->expo_change_state) {
+ case EXPO_CHANGED:
+ /* The exposure was changed while this frame
+ was being send, so this frame is ok */
+ sd->expo_change_state = EXPO_DROP_FRAME;
+ break;
+ case EXPO_DROP_FRAME:
+ /* The exposure was changed while this frame
+ was being captured, drop it! */
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ sd->expo_change_state = EXPO_NO_CHANGE;
+ break;
+ case EXPO_NO_CHANGE:
+ break;
+ }
+ gspca_frame_add(gspca_dev, LAST_PACKET, data, len);
+}
+
+static void sd_pkt_scan_janggu(struct gspca_dev *gspca_dev, u8 *data, int len)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ int imagesize = gspca_dev->width * gspca_dev->height;
+ int i, plen, bits, pixels, info, count;
+
+ if (sd->restart_stream)
+ return;
+
+ /* Sometimes a 1024 bytes garbage bulk packet is send between frames */
+ if (gspca_dev->last_packet_type == LAST_PACKET && len == 1024) {
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ return;
+ }
+
+ i = 0;
+ while (i < len) {
+ /* Read header if not already be present from prev bulk pkt */
+ if (sd->packet_read < 4) {
+ count = 4 - sd->packet_read;
+ if (count > len - i)
+ count = len - i;
+ memcpy(&sd->packet[sd->packet_read], &data[i], count);
+ sd->packet_read += count;
+ i += count;
+ if (sd->packet_read < 4)
+ break;
+ }
+ bits = sd->packet[3] + (sd->packet[2] << 8);
+ pixels = sd->packet[1] + ((sd->packet[0] & 0x3f) << 8);
+ info = (sd->packet[0] & 0xc0) >> 6;
+ plen = ((bits + 47) >> 4) << 1;
+ /* Sanity checks */
+ if (plen > 1024) {
+ err("invalid packet len %d restarting stream", plen);
+ goto error;
+ }
+ if (info == 3) {
+ err("unknown frame info value restarting stream");
+ goto error;
+ }
+
+ /* Read (remainder of) packet contents */
+ count = plen - sd->packet_read;
+ if (count > len - i)
+ count = len - i;
+ memcpy(&sd->packet[sd->packet_read], &data[i], count);
+ sd->packet_read += count;
+ i += count;
+ if (sd->packet_read < plen)
+ break;
+
+ sd->pixels_read += pixels;
+ sd->packet_read = 0;
+
+ switch (info) {
+ case 0: /* Frame data */
+ gspca_frame_add(gspca_dev, INTER_PACKET, sd->packet,
+ plen);
+ break;
+ case 1: /* EOF */
+ if (sd->pixels_read != imagesize) {
+ err("frame size %d expected %d",
+ sd->pixels_read, imagesize);
+ goto error;
+ }
+ sd_complete_frame(gspca_dev, sd->packet, plen);
+ return; /* Discard the rest of the bulk packet !! */
+ case 2: /* SOF */
+ gspca_frame_add(gspca_dev, FIRST_PACKET, sd->packet,
+ plen);
+ sd->pixels_read = pixels;
+ break;
+ }
+ }
+ return;
+
+error:
+ sd->restart_stream = 1;
+ /* Give userspace a 0 bytes frame, so our dq callback gets
+ called and it can restart the stream */
+ gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0);
+ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
+}
+
+static void sd_pkt_scan_bayer(struct gspca_dev *gspca_dev, u8 *data, int len)
+{
+ struct cam *cam = &gspca_dev->cam;
+ int imagesize = cam->cam_mode[gspca_dev->curr_mode].sizeimage;
+
+ if (gspca_dev->image_len == 0) {
+ gspca_frame_add(gspca_dev, FIRST_PACKET, data, len);
+ return;
+ }
+
+ if (gspca_dev->image_len + len >= imagesize) {
+ sd_complete_frame(gspca_dev, data, len);
+ return;
+ }
+
+ gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
+}
+
+static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len)
+{
+ int mult = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
+
+ if (len == 0)
+ return;
+
+ if (mult == 1) /* mult == 1 means raw bayer */
+ sd_pkt_scan_bayer(gspca_dev, data, len);
+ else
+ sd_pkt_scan_janggu(gspca_dev, data, len);
+}
+
+static int sd_querymenu(struct gspca_dev *gspca_dev,
+ struct v4l2_querymenu *menu)
+{
+ switch (menu->id) {
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ switch (menu->index) {
+ case V4L2_CID_POWER_LINE_FREQUENCY_DISABLED:
+ strcpy((char *) menu->name, "NoFliker");
+ return 0;
+ case V4L2_CID_POWER_LINE_FREQUENCY_50HZ:
+ strcpy((char *) menu->name, "50 Hz");
+ return 0;
+ case V4L2_CID_POWER_LINE_FREQUENCY_60HZ:
+ strcpy((char *) menu->name, "60 Hz");
+ return 0;
+ }
+ break;
+ }
+ return -EINVAL;
+}
+
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ u8 state;
+
+ if (len != 2)
+ return -EINVAL;
+
+ switch (data[0]) {
+ case 0:
+ case 1:
+ state = data[0];
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (sd->button_state != state) {
+ input_report_key(gspca_dev->input_dev, KEY_CAMERA, state);
+ input_sync(gspca_dev->input_dev);
+ sd->button_state = state;
+ }
+
+ return 0;
+}
+#endif
+
+/* sub-driver description */
+static const struct sd_desc sd_desc = {
+ .name = MODULE_NAME,
+ .ctrls = sd_ctrls,
+ .nctrls = ARRAY_SIZE(sd_ctrls),
+ .config = sd_config,
+ .init = sd_init,
+ .start = sd_start,
+ .stopN = sd_stopN,
+ .dq_callback = sd_dq_callback,
+ .pkt_scan = sd_pkt_scan,
+ .querymenu = sd_querymenu,
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+ .int_pkt_scan = sd_int_pkt_scan,
+#endif
+};
+
+/* -- module initialisation -- */
+static const struct usb_device_id device_table[] = {
+ {USB_DEVICE(0x03e8, 0x0004)}, /* Endpoints/Aox SE401 */
+ {USB_DEVICE(0x0471, 0x030b)}, /* Philips PCVC665K */
+ {USB_DEVICE(0x047d, 0x5001)}, /* Kensington 67014 */
+ {USB_DEVICE(0x047d, 0x5002)}, /* Kensington 6701(5/7) */
+ {USB_DEVICE(0x047d, 0x5003)}, /* Kensington 67016 */
+ {}
+};
+MODULE_DEVICE_TABLE(usb, device_table);
+
+/* -- device connect -- */
+static int sd_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
+ THIS_MODULE);
+}
+
+static int sd_pre_reset(struct usb_interface *intf)
+{
+ return 0;
+}
+
+static int sd_post_reset(struct usb_interface *intf)
+{
+ return 0;
+}
+
+static struct usb_driver sd_driver = {
+ .name = MODULE_NAME,
+ .id_table = device_table,
+ .probe = sd_probe,
+ .disconnect = gspca_disconnect,
+#ifdef CONFIG_PM
+ .suspend = gspca_suspend,
+ .resume = gspca_resume,
+#endif
+ .pre_reset = sd_pre_reset,
+ .post_reset = sd_post_reset,
+};
+
+/* -- module insert / remove -- */
+static int __init sd_mod_init(void)
+{
+ return usb_register(&sd_driver);
+}
+static void __exit sd_mod_exit(void)
+{
+ usb_deregister(&sd_driver);
+}
+
+module_init(sd_mod_init);
+module_exit(sd_mod_exit);
diff --git a/drivers/media/video/gspca/se401.h b/drivers/media/video/gspca/se401.h
new file mode 100644
index 0000000..96d8ebf
--- /dev/null
+++ b/drivers/media/video/gspca/se401.h
@@ -0,0 +1,90 @@
+/*
+ * GSPCA Endpoints (formerly known as AOX) se401 USB Camera sub Driver
+ *
+ * Copyright (C) 2011 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on the v4l1 se401 driver which is:
+ *
+ * Copyright (c) 2000 Jeroen B. Vreeken (pe1rxq@amsat.org)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define SE401_REQ_GET_CAMERA_DESCRIPTOR 0x06
+#define SE401_REQ_START_CONTINUOUS_CAPTURE 0x41
+#define SE401_REQ_STOP_CONTINUOUS_CAPTURE 0x42
+#define SE401_REQ_CAPTURE_FRAME 0x43
+#define SE401_REQ_GET_BRT 0x44
+#define SE401_REQ_SET_BRT 0x45
+#define SE401_REQ_GET_WIDTH 0x4c
+#define SE401_REQ_SET_WIDTH 0x4d
+#define SE401_REQ_GET_HEIGHT 0x4e
+#define SE401_REQ_SET_HEIGHT 0x4f
+#define SE401_REQ_GET_OUTPUT_MODE 0x50
+#define SE401_REQ_SET_OUTPUT_MODE 0x51
+#define SE401_REQ_GET_EXT_FEATURE 0x52
+#define SE401_REQ_SET_EXT_FEATURE 0x53
+#define SE401_REQ_CAMERA_POWER 0x56
+#define SE401_REQ_LED_CONTROL 0x57
+#define SE401_REQ_BIOS 0xff
+
+#define SE401_BIOS_READ 0x07
+
+#define SE401_FORMAT_BAYER 0x40
+
+/* Hyundai hv7131b registers
+ 7121 and 7141 should be the same (haven't really checked...) */
+/* Mode registers: */
+#define HV7131_REG_MODE_A 0x00
+#define HV7131_REG_MODE_B 0x01
+#define HV7131_REG_MODE_C 0x02
+/* Frame registers: */
+#define HV7131_REG_FRSU 0x10
+#define HV7131_REG_FRSL 0x11
+#define HV7131_REG_FCSU 0x12
+#define HV7131_REG_FCSL 0x13
+#define HV7131_REG_FWHU 0x14
+#define HV7131_REG_FWHL 0x15
+#define HV7131_REG_FWWU 0x16
+#define HV7131_REG_FWWL 0x17
+/* Timing registers: */
+#define HV7131_REG_THBU 0x20
+#define HV7131_REG_THBL 0x21
+#define HV7131_REG_TVBU 0x22
+#define HV7131_REG_TVBL 0x23
+#define HV7131_REG_TITU 0x25
+#define HV7131_REG_TITM 0x26
+#define HV7131_REG_TITL 0x27
+#define HV7131_REG_TMCD 0x28
+/* Adjust Registers: */
+#define HV7131_REG_ARLV 0x30
+#define HV7131_REG_ARCG 0x31
+#define HV7131_REG_AGCG 0x32
+#define HV7131_REG_ABCG 0x33
+#define HV7131_REG_APBV 0x34
+#define HV7131_REG_ASLP 0x54
+/* Offset Registers: */
+#define HV7131_REG_OFSR 0x50
+#define HV7131_REG_OFSG 0x51
+#define HV7131_REG_OFSB 0x52
+/* REset level statistics registers: */
+#define HV7131_REG_LOREFNOH 0x57
+#define HV7131_REG_LOREFNOL 0x58
+#define HV7131_REG_HIREFNOH 0x59
+#define HV7131_REG_HIREFNOL 0x5a
+
+/* se401 registers */
+#define SE401_OPERATINGMODE 0x2000
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 81b8a60..c477ad1 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -2386,7 +2386,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w1(gspca_dev, 0x01, 0x22);
msleep(100);
reg01 = SCL_SEL_OD | S_PDN_INV;
- reg17 &= MCK_SIZE_MASK;
+ reg17 &= ~MCK_SIZE_MASK;
reg17 |= 0x04; /* clock / 4 */
break;
}
@@ -2532,6 +2532,10 @@ static int sd_start(struct gspca_dev *gspca_dev)
if (!mode) { /* if 640x480 */
reg17 &= ~MCK_SIZE_MASK;
reg17 |= 0x04; /* clock / 4 */
+ } else {
+ reg01 &= ~SYS_SEL_48M; /* clk 24Mz */
+ reg17 &= ~MCK_SIZE_MASK;
+ reg17 |= 0x02; /* clock / 2 */
}
break;
case SENSOR_OV7630:
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index b089c0d..6ec2329 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -247,7 +247,6 @@ static const struct cmd spca504A_clicksmart420_init_data[] = {
{0x30, 0x0004, 0x000a},
{0xb0, 0x0001, 0x0000},
-
{0xa1, 0x0080, 0x0001},
{0x30, 0x0049, 0x0000},
{0x30, 0x0060, 0x0005},
@@ -256,8 +255,6 @@ static const struct cmd spca504A_clicksmart420_init_data[] = {
{0x00, 0x0000, 0x2000},
{0x00, 0x0013, 0x2301},
{0x00, 0x0003, 0x2000},
- {0x00, 0x0000, 0x2000},
-
};
/* clicksmart 420 open data ? */
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index 7e762d5..d1d733b 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -1387,7 +1387,7 @@ static int sd_querymenu(struct gspca_dev *gspca_dev,
return 0;
case V4L2_CID_EFFECTS:
if ((unsigned) menu->index < ARRAY_SIZE(effects_control)) {
- strncpy((char *) menu->name,
+ strlcpy((char *) menu->name,
effects_control[menu->index],
sizeof menu->name);
return 0;
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index a27d93b..441dacf 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -17,7 +17,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/usb.h>
#include <linux/mutex.h>
#include <linux/i2c.h>
@@ -474,5 +474,6 @@ module_init(hdpvr_init);
module_exit(hdpvr_exit);
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.2.1");
MODULE_AUTHOR("Janne Grunau");
MODULE_DESCRIPTION("Hauppauge HD PVR driver");
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
index 514aea7..087f7c0 100644
--- a/drivers/media/video/hdpvr/hdpvr-video.c
+++ b/drivers/media/video/hdpvr/hdpvr-video.c
@@ -17,7 +17,6 @@
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/mutex.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include <linux/videodev2.h>
@@ -574,7 +573,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strcpy(cap->driver, "hdpvr");
strcpy(cap->card, "Hauppauge HD PVR");
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->version = HDPVR_VERSION;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_AUDIO |
V4L2_CAP_READWRITE;
diff --git a/drivers/media/video/hdpvr/hdpvr.h b/drivers/media/video/hdpvr/hdpvr.h
index 072f23c..d6439db 100644
--- a/drivers/media/video/hdpvr/hdpvr.h
+++ b/drivers/media/video/hdpvr/hdpvr.h
@@ -18,12 +18,6 @@
#include <media/v4l2-device.h>
#include <media/ir-kbd-i2c.h>
-#define HDPVR_MAJOR_VERSION 0
-#define HDPVR_MINOR_VERSION 2
-#define HDPVR_RELEASE 0
-#define HDPVR_VERSION \
- KERNEL_VERSION(HDPVR_MAJOR_VERSION, HDPVR_MINOR_VERSION, HDPVR_RELEASE)
-
#define HDPVR_MAX 8
#define HDPVR_I2C_MAX_SIZE 128
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index 84bdf0f..8f9cc17 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -36,7 +36,6 @@
* using information provided by Jiun-Kuei Jung @ AVerMedia.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index a7f54b0..38f0522 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -722,8 +722,8 @@ unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
/* If there are subscribed events, then only use the new event
API instead of the old video.h based API. */
- if (!list_empty(&id->fh.events->subscribed)) {
- poll_wait(filp, &id->fh.events->wait, wait);
+ if (!list_empty(&id->fh.subscribed)) {
+ poll_wait(filp, &id->fh.wait, wait);
/* Turn off the old-style vsync events */
clear_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags);
if (v4l2_event_pending(&id->fh))
@@ -750,6 +750,7 @@ unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
int eof = test_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
+ unsigned res = 0;
/* Start a capture if there is none */
if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
@@ -769,12 +770,16 @@ unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
/* add stream's waitq to the poll list */
IVTV_DEBUG_HI_FILE("Encoder poll\n");
poll_wait(filp, &s->waitq, wait);
+ if (v4l2_event_pending(&id->fh))
+ res |= POLLPRI;
+ else
+ poll_wait(filp, &id->fh.wait, wait);
if (s->q_full.length || s->q_io.length)
- return POLLIN | POLLRDNORM;
+ return res | POLLIN | POLLRDNORM;
if (eof)
- return POLLHUP;
- return 0;
+ return res | POLLHUP;
+ return res;
}
void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end)
@@ -961,10 +966,6 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
return -ENOMEM;
}
v4l2_fh_init(&item->fh, s->vdev);
- if (s->type == IVTV_DEC_STREAM_TYPE_YUV ||
- s->type == IVTV_DEC_STREAM_TYPE_MPG) {
- res = v4l2_event_alloc(&item->fh, 60);
- }
if (res < 0) {
v4l2_fh_exit(&item->fh);
kfree(item);
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 120c7d8..3e5c090 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -757,7 +757,6 @@ static int ivtv_querycap(struct file *file, void *fh, struct v4l2_capability *vc
strlcpy(vcap->driver, IVTV_DRIVER_NAME, sizeof(vcap->driver));
strlcpy(vcap->card, itv->card_name, sizeof(vcap->card));
snprintf(vcap->bus_info, sizeof(vcap->bus_info), "PCI:%s", pci_name(itv->pdev));
- vcap->version = IVTV_DRIVER_VERSION; /* version */
vcap->capabilities = itv->v4l2_cap; /* capabilities */
return 0;
}
@@ -1451,11 +1450,11 @@ static int ivtv_subscribe_event(struct v4l2_fh *fh, struct v4l2_event_subscripti
switch (sub->type) {
case V4L2_EVENT_VSYNC:
case V4L2_EVENT_EOS:
- break;
+ case V4L2_EVENT_CTRL:
+ return v4l2_event_subscribe(fh, sub, 0);
default:
return -EINVAL;
}
- return v4l2_event_subscribe(fh, sub);
}
static int ivtv_log_status(struct file *file, void *fh)
diff --git a/drivers/media/video/ivtv/ivtv-version.h b/drivers/media/video/ivtv/ivtv-version.h
index b67a404..a20f346 100644
--- a/drivers/media/video/ivtv/ivtv-version.h
+++ b/drivers/media/video/ivtv/ivtv-version.h
@@ -21,11 +21,6 @@
#define IVTV_VERSION_H
#define IVTV_DRIVER_NAME "ivtv"
-#define IVTV_DRIVER_VERSION_MAJOR 1
-#define IVTV_DRIVER_VERSION_MINOR 4
-#define IVTV_DRIVER_VERSION_PATCHLEVEL 2
-
-#define IVTV_VERSION __stringify(IVTV_DRIVER_VERSION_MAJOR) "." __stringify(IVTV_DRIVER_VERSION_MINOR) "." __stringify(IVTV_DRIVER_VERSION_PATCHLEVEL)
-#define IVTV_DRIVER_VERSION KERNEL_VERSION(IVTV_DRIVER_VERSION_MAJOR,IVTV_DRIVER_VERSION_MINOR,IVTV_DRIVER_VERSION_PATCHLEVEL)
+#define IVTV_VERSION "1.4.3"
#endif
diff --git a/drivers/media/video/m5mols/m5mols_capture.c b/drivers/media/video/m5mols/m5mols_capture.c
index a45d8f0..3248ac8 100644
--- a/drivers/media/video/m5mols/m5mols_capture.c
+++ b/drivers/media/video/m5mols/m5mols_capture.c
@@ -18,7 +18,6 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/version.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/videodev2.h>
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c
index 43c68f5..fb8e4a7 100644
--- a/drivers/media/video/m5mols/m5mols_core.c
+++ b/drivers/media/video/m5mols/m5mols_core.c
@@ -18,7 +18,6 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/version.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/videodev2.h>
diff --git a/drivers/media/video/marvell-ccic/Kconfig b/drivers/media/video/marvell-ccic/Kconfig
new file mode 100644
index 0000000..bf739e3
--- /dev/null
+++ b/drivers/media/video/marvell-ccic/Kconfig
@@ -0,0 +1,23 @@
+config VIDEO_CAFE_CCIC
+ tristate "Marvell 88ALP01 (Cafe) CMOS Camera Controller support"
+ depends on PCI && I2C && VIDEO_V4L2
+ select VIDEO_OV7670
+ select VIDEOBUF2_VMALLOC
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ This is a video4linux2 driver for the Marvell 88ALP01 integrated
+ CMOS camera controller. This is the controller found on first-
+ generation OLPC systems.
+
+config VIDEO_MMP_CAMERA
+ tristate "Marvell Armada 610 integrated camera controller support"
+ depends on ARCH_MMP && I2C && VIDEO_V4L2
+ select VIDEO_OV7670
+ select I2C_GPIO
+ select VIDEOBUF2_DMA_SG
+ ---help---
+ This is a Video4Linux2 driver for the integrated camera
+ controller found on Marvell Armada 610 application
+ processors (and likely beyond). This is the controller found
+ in OLPC XO 1.75 systems.
+
diff --git a/drivers/media/video/marvell-ccic/Makefile b/drivers/media/video/marvell-ccic/Makefile
new file mode 100644
index 0000000..05a792c
--- /dev/null
+++ b/drivers/media/video/marvell-ccic/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_VIDEO_CAFE_CCIC) += cafe_ccic.o
+cafe_ccic-y := cafe-driver.o mcam-core.o
+
+obj-$(CONFIG_VIDEO_MMP_CAMERA) += mmp_camera.o
+mmp_camera-y := mmp-driver.o mcam-core.o
+
diff --git a/drivers/media/video/marvell-ccic/cafe-driver.c b/drivers/media/video/marvell-ccic/cafe-driver.c
new file mode 100644
index 0000000..d030f9b
--- /dev/null
+++ b/drivers/media/video/marvell-ccic/cafe-driver.c
@@ -0,0 +1,654 @@
+/*
+ * A driver for the CMOS camera controller in the Marvell 88ALP01 "cafe"
+ * multifunction chip. Currently works with the Omnivision OV7670
+ * sensor.
+ *
+ * The data sheet for this device can be found at:
+ * http://www.marvell.com/products/pc_connectivity/88alp01/
+ *
+ * Copyright 2006-11 One Laptop Per Child Association, Inc.
+ * Copyright 2006-11 Jonathan Corbet <corbet@lwn.net>
+ *
+ * Written by Jonathan Corbet, corbet@lwn.net.
+ *
+ * v4l2_device/v4l2_subdev conversion by:
+ * Copyright (C) 2009 Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ * This file may be distributed under the terms of the GNU General
+ * Public License, version 2.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-chip-ident.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include "mcam-core.h"
+
+#define CAFE_VERSION 0x000002
+
+
+/*
+ * Parameters.
+ */
+MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
+MODULE_DESCRIPTION("Marvell 88ALP01 CMOS Camera Controller driver");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("Video");
+
+
+
+
+struct cafe_camera {
+ int registered; /* Fully initialized? */
+ struct mcam_camera mcam;
+ struct pci_dev *pdev;
+ wait_queue_head_t smbus_wait; /* Waiting on i2c events */
+};
+
+/*
+ * Most of the camera controller registers are defined in mcam-core.h,
+ * but the Cafe platform has some additional registers of its own;
+ * they are described here.
+ */
+
+/*
+ * "General purpose register" has a couple of GPIOs used for sensor
+ * power and reset on OLPC XO 1.0 systems.
+ */
+#define REG_GPR 0xb4
+#define GPR_C1EN 0x00000020 /* Pad 1 (power down) enable */
+#define GPR_C0EN 0x00000010 /* Pad 0 (reset) enable */
+#define GPR_C1 0x00000002 /* Control 1 value */
+/*
+ * Control 0 is wired to reset on OLPC machines. For ov7x sensors,
+ * it is active low.
+ */
+#define GPR_C0 0x00000001 /* Control 0 value */
+
+/*
+ * These registers control the SMBUS module for communicating
+ * with the sensor.
+ */
+#define REG_TWSIC0 0xb8 /* TWSI (smbus) control 0 */
+#define TWSIC0_EN 0x00000001 /* TWSI enable */
+#define TWSIC0_MODE 0x00000002 /* 1 = 16-bit, 0 = 8-bit */
+#define TWSIC0_SID 0x000003fc /* Slave ID */
+/*
+ * Subtle trickery: the slave ID field starts with bit 2. But the
+ * Linux i2c stack wants to treat the bottommost bit as a separate
+ * read/write bit, which is why slave ID's are usually presented
+ * >>1. For consistency with that behavior, we shift over three
+ * bits instead of two.
+ */
+#define TWSIC0_SID_SHIFT 3
+#define TWSIC0_CLKDIV 0x0007fc00 /* Clock divider */
+#define TWSIC0_MASKACK 0x00400000 /* Mask ack from sensor */
+#define TWSIC0_OVMAGIC 0x00800000 /* Make it work on OV sensors */
+
+#define REG_TWSIC1 0xbc /* TWSI control 1 */
+#define TWSIC1_DATA 0x0000ffff /* Data to/from camchip */
+#define TWSIC1_ADDR 0x00ff0000 /* Address (register) */
+#define TWSIC1_ADDR_SHIFT 16
+#define TWSIC1_READ 0x01000000 /* Set for read op */
+#define TWSIC1_WSTAT 0x02000000 /* Write status */
+#define TWSIC1_RVALID 0x04000000 /* Read data valid */
+#define TWSIC1_ERROR 0x08000000 /* Something screwed up */
+
+/*
+ * Here's the weird global control registers
+ */
+#define REG_GL_CSR 0x3004 /* Control/status register */
+#define GCSR_SRS 0x00000001 /* SW Reset set */
+#define GCSR_SRC 0x00000002 /* SW Reset clear */
+#define GCSR_MRS 0x00000004 /* Master reset set */
+#define GCSR_MRC 0x00000008 /* HW Reset clear */
+#define GCSR_CCIC_EN 0x00004000 /* CCIC Clock enable */
+#define REG_GL_IMASK 0x300c /* Interrupt mask register */
+#define GIMSK_CCIC_EN 0x00000004 /* CCIC Interrupt enable */
+
+#define REG_GL_FCR 0x3038 /* GPIO functional control register */
+#define GFCR_GPIO_ON 0x08 /* Camera GPIO enabled */
+#define REG_GL_GPIOR 0x315c /* GPIO register */
+#define GGPIO_OUT 0x80000 /* GPIO output */
+#define GGPIO_VAL 0x00008 /* Output pin value */
+
+#define REG_LEN (REG_GL_IMASK + 4)
+
+
+/*
+ * Debugging and related.
+ */
+#define cam_err(cam, fmt, arg...) \
+ dev_err(&(cam)->pdev->dev, fmt, ##arg);
+#define cam_warn(cam, fmt, arg...) \
+ dev_warn(&(cam)->pdev->dev, fmt, ##arg);
+
+/* -------------------------------------------------------------------- */
+/*
+ * The I2C/SMBUS interface to the camera itself starts here. The
+ * controller handles SMBUS itself, presenting a relatively simple register
+ * interface; all we have to do is to tell it where to route the data.
+ */
+#define CAFE_SMBUS_TIMEOUT (HZ) /* generous */
+
+static inline struct cafe_camera *to_cam(struct v4l2_device *dev)
+{
+ struct mcam_camera *m = container_of(dev, struct mcam_camera, v4l2_dev);
+ return container_of(m, struct cafe_camera, mcam);
+}
+
+
+static int cafe_smbus_write_done(struct mcam_camera *mcam)
+{
+ unsigned long flags;
+ int c1;
+
+ /*
+ * We must delay after the interrupt, or the controller gets confused
+ * and never does give us good status. Fortunately, we don't do this
+ * often.
+ */
+ udelay(20);
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ c1 = mcam_reg_read(mcam, REG_TWSIC1);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+ return (c1 & (TWSIC1_WSTAT|TWSIC1_ERROR)) != TWSIC1_WSTAT;
+}
+
+static int cafe_smbus_write_data(struct cafe_camera *cam,
+ u16 addr, u8 command, u8 value)
+{
+ unsigned int rval;
+ unsigned long flags;
+ struct mcam_camera *mcam = &cam->mcam;
+
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
+ rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
+ /*
+ * Marvell sez set clkdiv to all 1's for now.
+ */
+ rval |= TWSIC0_CLKDIV;
+ mcam_reg_write(mcam, REG_TWSIC0, rval);
+ (void) mcam_reg_read(mcam, REG_TWSIC1); /* force write */
+ rval = value | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
+ mcam_reg_write(mcam, REG_TWSIC1, rval);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+
+ /* Unfortunately, reading TWSIC1 too soon after sending a command
+ * causes the device to die.
+ * Use a busy-wait because we often send a large quantity of small
+ * commands at-once; using msleep() would cause a lot of context
+ * switches which take longer than 2ms, resulting in a noticeable
+ * boot-time and capture-start delays.
+ */
+ mdelay(2);
+
+ /*
+ * Another sad fact is that sometimes, commands silently complete but
+ * cafe_smbus_write_done() never becomes aware of this.
+ * This happens at random and appears to possible occur with any
+ * command.
+ * We don't understand why this is. We work around this issue
+ * with the timeout in the wait below, assuming that all commands
+ * complete within the timeout.
+ */
+ wait_event_timeout(cam->smbus_wait, cafe_smbus_write_done(mcam),
+ CAFE_SMBUS_TIMEOUT);
+
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ rval = mcam_reg_read(mcam, REG_TWSIC1);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+
+ if (rval & TWSIC1_WSTAT) {
+ cam_err(cam, "SMBUS write (%02x/%02x/%02x) timed out\n", addr,
+ command, value);
+ return -EIO;
+ }
+ if (rval & TWSIC1_ERROR) {
+ cam_err(cam, "SMBUS write (%02x/%02x/%02x) error\n", addr,
+ command, value);
+ return -EIO;
+ }
+ return 0;
+}
+
+
+
+static int cafe_smbus_read_done(struct mcam_camera *mcam)
+{
+ unsigned long flags;
+ int c1;
+
+ /*
+ * We must delay after the interrupt, or the controller gets confused
+ * and never does give us good status. Fortunately, we don't do this
+ * often.
+ */
+ udelay(20);
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ c1 = mcam_reg_read(mcam, REG_TWSIC1);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+ return c1 & (TWSIC1_RVALID|TWSIC1_ERROR);
+}
+
+
+
+static int cafe_smbus_read_data(struct cafe_camera *cam,
+ u16 addr, u8 command, u8 *value)
+{
+ unsigned int rval;
+ unsigned long flags;
+ struct mcam_camera *mcam = &cam->mcam;
+
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
+ rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
+ /*
+ * Marvel sez set clkdiv to all 1's for now.
+ */
+ rval |= TWSIC0_CLKDIV;
+ mcam_reg_write(mcam, REG_TWSIC0, rval);
+ (void) mcam_reg_read(mcam, REG_TWSIC1); /* force write */
+ rval = TWSIC1_READ | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
+ mcam_reg_write(mcam, REG_TWSIC1, rval);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+
+ wait_event_timeout(cam->smbus_wait,
+ cafe_smbus_read_done(mcam), CAFE_SMBUS_TIMEOUT);
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ rval = mcam_reg_read(mcam, REG_TWSIC1);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+
+ if (rval & TWSIC1_ERROR) {
+ cam_err(cam, "SMBUS read (%02x/%02x) error\n", addr, command);
+ return -EIO;
+ }
+ if (!(rval & TWSIC1_RVALID)) {
+ cam_err(cam, "SMBUS read (%02x/%02x) timed out\n", addr,
+ command);
+ return -EIO;
+ }
+ *value = rval & 0xff;
+ return 0;
+}
+
+/*
+ * Perform a transfer over SMBUS. This thing is called under
+ * the i2c bus lock, so we shouldn't race with ourselves...
+ */
+static int cafe_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char rw, u8 command,
+ int size, union i2c_smbus_data *data)
+{
+ struct cafe_camera *cam = i2c_get_adapdata(adapter);
+ int ret = -EINVAL;
+
+ /*
+ * This interface would appear to only do byte data ops. OK
+ * it can do word too, but the cam chip has no use for that.
+ */
+ if (size != I2C_SMBUS_BYTE_DATA) {
+ cam_err(cam, "funky xfer size %d\n", size);
+ return -EINVAL;
+ }
+
+ if (rw == I2C_SMBUS_WRITE)
+ ret = cafe_smbus_write_data(cam, addr, command, data->byte);
+ else if (rw == I2C_SMBUS_READ)
+ ret = cafe_smbus_read_data(cam, addr, command, &data->byte);
+ return ret;
+}
+
+
+static void cafe_smbus_enable_irq(struct cafe_camera *cam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->mcam.dev_lock, flags);
+ mcam_reg_set_bit(&cam->mcam, REG_IRQMASK, TWSIIRQS);
+ spin_unlock_irqrestore(&cam->mcam.dev_lock, flags);
+}
+
+static u32 cafe_smbus_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_SMBUS_READ_BYTE_DATA |
+ I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
+}
+
+static struct i2c_algorithm cafe_smbus_algo = {
+ .smbus_xfer = cafe_smbus_xfer,
+ .functionality = cafe_smbus_func
+};
+
+static int cafe_smbus_setup(struct cafe_camera *cam)
+{
+ struct i2c_adapter *adap;
+ int ret;
+
+ adap = kzalloc(sizeof(*adap), GFP_KERNEL);
+ if (adap == NULL)
+ return -ENOMEM;
+ cam->mcam.i2c_adapter = adap;
+ cafe_smbus_enable_irq(cam);
+ adap->owner = THIS_MODULE;
+ adap->algo = &cafe_smbus_algo;
+ strcpy(adap->name, "cafe_ccic");
+ adap->dev.parent = &cam->pdev->dev;
+ i2c_set_adapdata(adap, cam);
+ ret = i2c_add_adapter(adap);
+ if (ret)
+ printk(KERN_ERR "Unable to register cafe i2c adapter\n");
+ return ret;
+}
+
+static void cafe_smbus_shutdown(struct cafe_camera *cam)
+{
+ i2c_del_adapter(cam->mcam.i2c_adapter);
+ kfree(cam->mcam.i2c_adapter);
+}
+
+
+/*
+ * Controller-level stuff
+ */
+
+static void cafe_ctlr_init(struct mcam_camera *mcam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ /*
+ * Added magic to bring up the hardware on the B-Test board
+ */
+ mcam_reg_write(mcam, 0x3038, 0x8);
+ mcam_reg_write(mcam, 0x315c, 0x80008);
+ /*
+ * Go through the dance needed to wake the device up.
+ * Note that these registers are global and shared
+ * with the NAND and SD devices. Interaction between the
+ * three still needs to be examined.
+ */
+ mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRS|GCSR_MRS); /* Needed? */
+ mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRC|GCSR_MRC);
+ mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRC|GCSR_MRS);
+ /*
+ * Here we must wait a bit for the controller to come around.
+ */
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+ msleep(5);
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+
+ mcam_reg_write(mcam, REG_GL_CSR, GCSR_CCIC_EN|GCSR_SRC|GCSR_MRC);
+ mcam_reg_set_bit(mcam, REG_GL_IMASK, GIMSK_CCIC_EN);
+ /*
+ * Mask all interrupts.
+ */
+ mcam_reg_write(mcam, REG_IRQMASK, 0);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+}
+
+
+static void cafe_ctlr_power_up(struct mcam_camera *mcam)
+{
+ /*
+ * Part one of the sensor dance: turn the global
+ * GPIO signal on.
+ */
+ mcam_reg_write(mcam, REG_GL_FCR, GFCR_GPIO_ON);
+ mcam_reg_write(mcam, REG_GL_GPIOR, GGPIO_OUT|GGPIO_VAL);
+ /*
+ * Put the sensor into operational mode (assumes OLPC-style
+ * wiring). Control 0 is reset - set to 1 to operate.
+ * Control 1 is power down, set to 0 to operate.
+ */
+ mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN); /* pwr up, reset */
+ mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C0);
+}
+
+static void cafe_ctlr_power_down(struct mcam_camera *mcam)
+{
+ mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C1);
+ mcam_reg_write(mcam, REG_GL_FCR, GFCR_GPIO_ON);
+ mcam_reg_write(mcam, REG_GL_GPIOR, GGPIO_OUT);
+}
+
+
+
+/*
+ * The platform interrupt handler.
+ */
+static irqreturn_t cafe_irq(int irq, void *data)
+{
+ struct cafe_camera *cam = data;
+ struct mcam_camera *mcam = &cam->mcam;
+ unsigned int irqs, handled;
+
+ spin_lock(&mcam->dev_lock);
+ irqs = mcam_reg_read(mcam, REG_IRQSTAT);
+ handled = cam->registered && mccic_irq(mcam, irqs);
+ if (irqs & TWSIIRQS) {
+ mcam_reg_write(mcam, REG_IRQSTAT, TWSIIRQS);
+ wake_up(&cam->smbus_wait);
+ handled = 1;
+ }
+ spin_unlock(&mcam->dev_lock);
+ return IRQ_RETVAL(handled);
+}
+
+
+/* -------------------------------------------------------------------------- */
+/*
+ * PCI interface stuff.
+ */
+
+static int cafe_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int ret;
+ struct cafe_camera *cam;
+ struct mcam_camera *mcam;
+
+ /*
+ * Start putting together one of our big camera structures.
+ */
+ ret = -ENOMEM;
+ cam = kzalloc(sizeof(struct cafe_camera), GFP_KERNEL);
+ if (cam == NULL)
+ goto out;
+ cam->pdev = pdev;
+ mcam = &cam->mcam;
+ mcam->chip_id = V4L2_IDENT_CAFE;
+ spin_lock_init(&mcam->dev_lock);
+ init_waitqueue_head(&cam->smbus_wait);
+ mcam->plat_power_up = cafe_ctlr_power_up;
+ mcam->plat_power_down = cafe_ctlr_power_down;
+ mcam->dev = &pdev->dev;
+ /*
+ * Set the clock speed for the XO 1; I don't believe this
+ * driver has ever run anywhere else.
+ */
+ mcam->clock_speed = 45;
+ mcam->use_smbus = 1;
+ /*
+ * Vmalloc mode for buffers is traditional with this driver.
+ * We *might* be able to run DMA_contig, especially on a system
+ * with CMA in it.
+ */
+ mcam->buffer_mode = B_vmalloc;
+ /*
+ * Get set up on the PCI bus.
+ */
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto out_free;
+ pci_set_master(pdev);
+
+ ret = -EIO;
+ mcam->regs = pci_iomap(pdev, 0, 0);
+ if (!mcam->regs) {
+ printk(KERN_ERR "Unable to ioremap cafe-ccic regs\n");
+ goto out_disable;
+ }
+ ret = request_irq(pdev->irq, cafe_irq, IRQF_SHARED, "cafe-ccic", cam);
+ if (ret)
+ goto out_iounmap;
+
+ /*
+ * Initialize the controller and leave it powered up. It will
+ * stay that way until the sensor driver shows up.
+ */
+ cafe_ctlr_init(mcam);
+ cafe_ctlr_power_up(mcam);
+ /*
+ * Set up I2C/SMBUS communications. We have to drop the mutex here
+ * because the sensor could attach in this call chain, leading to
+ * unsightly deadlocks.
+ */
+ ret = cafe_smbus_setup(cam);
+ if (ret)
+ goto out_pdown;
+
+ ret = mccic_register(mcam);
+ if (ret == 0) {
+ cam->registered = 1;
+ return 0;
+ }
+
+ cafe_smbus_shutdown(cam);
+out_pdown:
+ cafe_ctlr_power_down(mcam);
+ free_irq(pdev->irq, cam);
+out_iounmap:
+ pci_iounmap(pdev, mcam->regs);
+out_disable:
+ pci_disable_device(pdev);
+out_free:
+ kfree(cam);
+out:
+ return ret;
+}
+
+
+/*
+ * Shut down an initialized device
+ */
+static void cafe_shutdown(struct cafe_camera *cam)
+{
+ mccic_shutdown(&cam->mcam);
+ cafe_smbus_shutdown(cam);
+ free_irq(cam->pdev->irq, cam);
+ pci_iounmap(cam->pdev, cam->mcam.regs);
+}
+
+
+static void cafe_pci_remove(struct pci_dev *pdev)
+{
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
+ struct cafe_camera *cam = to_cam(v4l2_dev);
+
+ if (cam == NULL) {
+ printk(KERN_WARNING "pci_remove on unknown pdev %p\n", pdev);
+ return;
+ }
+ cafe_shutdown(cam);
+ kfree(cam);
+}
+
+
+#ifdef CONFIG_PM
+/*
+ * Basic power management.
+ */
+static int cafe_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
+ struct cafe_camera *cam = to_cam(v4l2_dev);
+ int ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+ mccic_suspend(&cam->mcam);
+ pci_disable_device(pdev);
+ return 0;
+}
+
+
+static int cafe_pci_resume(struct pci_dev *pdev)
+{
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
+ struct cafe_camera *cam = to_cam(v4l2_dev);
+ int ret = 0;
+
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+
+ if (ret) {
+ cam_warn(cam, "Unable to re-enable device on resume!\n");
+ return ret;
+ }
+ cafe_ctlr_init(&cam->mcam);
+ return mccic_resume(&cam->mcam);
+}
+
+#endif /* CONFIG_PM */
+
+static struct pci_device_id cafe_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL,
+ PCI_DEVICE_ID_MARVELL_88ALP01_CCIC) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, cafe_ids);
+
+static struct pci_driver cafe_pci_driver = {
+ .name = "cafe1000-ccic",
+ .id_table = cafe_ids,
+ .probe = cafe_pci_probe,
+ .remove = cafe_pci_remove,
+#ifdef CONFIG_PM
+ .suspend = cafe_pci_suspend,
+ .resume = cafe_pci_resume,
+#endif
+};
+
+
+
+
+static int __init cafe_init(void)
+{
+ int ret;
+
+ printk(KERN_NOTICE "Marvell M88ALP01 'CAFE' Camera Controller version %d\n",
+ CAFE_VERSION);
+ ret = pci_register_driver(&cafe_pci_driver);
+ if (ret) {
+ printk(KERN_ERR "Unable to register cafe_ccic driver\n");
+ goto out;
+ }
+ ret = 0;
+
+out:
+ return ret;
+}
+
+
+static void __exit cafe_exit(void)
+{
+ pci_unregister_driver(&cafe_pci_driver);
+}
+
+module_init(cafe_init);
+module_exit(cafe_exit);
diff --git a/drivers/media/video/marvell-ccic/mcam-core.c b/drivers/media/video/marvell-ccic/mcam-core.c
new file mode 100644
index 0000000..83c1451
--- /dev/null
+++ b/drivers/media/video/marvell-ccic/mcam-core.c
@@ -0,0 +1,1843 @@
+/*
+ * The Marvell camera core. This device appears in a number of settings,
+ * so it needs platform-specific support outside of the core.
+ *
+ * Copyright 2011 Jonathan Corbet corbet@lwn.net
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/ov7670.h>
+#include <media/videobuf2-vmalloc.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-dma-sg.h>
+
+#include "mcam-core.h"
+
+/*
+ * Basic frame stats - to be deleted shortly
+ */
+static int frames;
+static int singles;
+static int delivered;
+
+#ifdef MCAM_MODE_VMALLOC
+/*
+ * Internal DMA buffer management. Since the controller cannot do S/G I/O,
+ * we must have physically contiguous buffers to bring frames into.
+ * These parameters control how many buffers we use, whether we
+ * allocate them at load time (better chance of success, but nails down
+ * memory) or when somebody tries to use the camera (riskier), and,
+ * for load-time allocation, how big they should be.
+ *
+ * The controller can cycle through three buffers. We could use
+ * more by flipping pointers around, but it probably makes little
+ * sense.
+ */
+
+static int alloc_bufs_at_read;
+module_param(alloc_bufs_at_read, bool, 0444);
+MODULE_PARM_DESC(alloc_bufs_at_read,
+ "Non-zero value causes DMA buffers to be allocated when the "
+ "video capture device is read, rather than at module load "
+ "time. This saves memory, but decreases the chances of "
+ "successfully getting those buffers. This parameter is "
+ "only used in the vmalloc buffer mode");
+
+static int n_dma_bufs = 3;
+module_param(n_dma_bufs, uint, 0644);
+MODULE_PARM_DESC(n_dma_bufs,
+ "The number of DMA buffers to allocate. Can be either two "
+ "(saves memory, makes timing tighter) or three.");
+
+static int dma_buf_size = VGA_WIDTH * VGA_HEIGHT * 2; /* Worst case */
+module_param(dma_buf_size, uint, 0444);
+MODULE_PARM_DESC(dma_buf_size,
+ "The size of the allocated DMA buffers. If actual operating "
+ "parameters require larger buffers, an attempt to reallocate "
+ "will be made.");
+#else /* MCAM_MODE_VMALLOC */
+static const int alloc_bufs_at_read = 0;
+static const int n_dma_bufs = 3; /* Used by S/G_PARM */
+#endif /* MCAM_MODE_VMALLOC */
+
+static int flip;
+module_param(flip, bool, 0444);
+MODULE_PARM_DESC(flip,
+ "If set, the sensor will be instructed to flip the image "
+ "vertically.");
+
+static int buffer_mode = -1;
+module_param(buffer_mode, int, 0444);
+MODULE_PARM_DESC(buffer_mode,
+ "Set the buffer mode to be used; default is to go with what "
+ "the platform driver asks for. Set to 0 for vmalloc, 1 for "
+ "DMA contiguous.");
+
+/*
+ * Status flags. Always manipulated with bit operations.
+ */
+#define CF_BUF0_VALID 0 /* Buffers valid - first three */
+#define CF_BUF1_VALID 1
+#define CF_BUF2_VALID 2
+#define CF_DMA_ACTIVE 3 /* A frame is incoming */
+#define CF_CONFIG_NEEDED 4 /* Must configure hardware */
+#define CF_SINGLE_BUFFER 5 /* Running with a single buffer */
+#define CF_SG_RESTART 6 /* SG restart needed */
+
+#define sensor_call(cam, o, f, args...) \
+ v4l2_subdev_call(cam->sensor, o, f, ##args)
+
+static struct mcam_format_struct {
+ __u8 *desc;
+ __u32 pixelformat;
+ int bpp; /* Bytes per pixel */
+ enum v4l2_mbus_pixelcode mbus_code;
+} mcam_formats[] = {
+ {
+ .desc = "YUYV 4:2:2",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ },
+ {
+ .desc = "RGB 444",
+ .pixelformat = V4L2_PIX_FMT_RGB444,
+ .mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
+ .bpp = 2,
+ },
+ {
+ .desc = "RGB 565",
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .bpp = 2,
+ },
+ {
+ .desc = "Raw RGB Bayer",
+ .pixelformat = V4L2_PIX_FMT_SBGGR8,
+ .mbus_code = V4L2_MBUS_FMT_SBGGR8_1X8,
+ .bpp = 1
+ },
+};
+#define N_MCAM_FMTS ARRAY_SIZE(mcam_formats)
+
+static struct mcam_format_struct *mcam_find_format(u32 pixelformat)
+{
+ unsigned i;
+
+ for (i = 0; i < N_MCAM_FMTS; i++)
+ if (mcam_formats[i].pixelformat == pixelformat)
+ return mcam_formats + i;
+ /* Not found? Then return the first format. */
+ return mcam_formats;
+}
+
+/*
+ * The default format we use until somebody says otherwise.
+ */
+static const struct v4l2_pix_format mcam_def_pix_format = {
+ .width = VGA_WIDTH,
+ .height = VGA_HEIGHT,
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .field = V4L2_FIELD_NONE,
+ .bytesperline = VGA_WIDTH*2,
+ .sizeimage = VGA_WIDTH*VGA_HEIGHT*2,
+};
+
+static const enum v4l2_mbus_pixelcode mcam_def_mbus_code =
+ V4L2_MBUS_FMT_YUYV8_2X8;
+
+
+/*
+ * The two-word DMA descriptor format used by the Armada 610 and like. There
+ * Is a three-word format as well (set C1_DESC_3WORD) where the third
+ * word is a pointer to the next descriptor, but we don't use it. Two-word
+ * descriptors have to be contiguous in memory.
+ */
+struct mcam_dma_desc {
+ u32 dma_addr;
+ u32 segment_len;
+};
+
+/*
+ * Our buffer type for working with videobuf2. Note that the vb2
+ * developers have decreed that struct vb2_buffer must be at the
+ * beginning of this structure.
+ */
+struct mcam_vb_buffer {
+ struct vb2_buffer vb_buf;
+ struct list_head queue;
+ struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */
+ dma_addr_t dma_desc_pa; /* Descriptor physical address */
+ int dma_desc_nent; /* Number of mapped descriptors */
+};
+
+static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_buffer *vb)
+{
+ return container_of(vb, struct mcam_vb_buffer, vb_buf);
+}
+
+/*
+ * Hand a completed buffer back to user space.
+ */
+static void mcam_buffer_done(struct mcam_camera *cam, int frame,
+ struct vb2_buffer *vbuf)
+{
+ vbuf->v4l2_buf.bytesused = cam->pix_format.sizeimage;
+ vbuf->v4l2_buf.sequence = cam->buf_seq[frame];
+ vb2_set_plane_payload(vbuf, 0, cam->pix_format.sizeimage);
+ vb2_buffer_done(vbuf, VB2_BUF_STATE_DONE);
+}
+
+
+
+/*
+ * Debugging and related.
+ */
+#define cam_err(cam, fmt, arg...) \
+ dev_err((cam)->dev, fmt, ##arg);
+#define cam_warn(cam, fmt, arg...) \
+ dev_warn((cam)->dev, fmt, ##arg);
+#define cam_dbg(cam, fmt, arg...) \
+ dev_dbg((cam)->dev, fmt, ##arg);
+
+
+/*
+ * Flag manipulation helpers
+ */
+static void mcam_reset_buffers(struct mcam_camera *cam)
+{
+ int i;
+
+ cam->next_buf = -1;
+ for (i = 0; i < cam->nbufs; i++)
+ clear_bit(i, &cam->flags);
+}
+
+static inline int mcam_needs_config(struct mcam_camera *cam)
+{
+ return test_bit(CF_CONFIG_NEEDED, &cam->flags);
+}
+
+static void mcam_set_config_needed(struct mcam_camera *cam, int needed)
+{
+ if (needed)
+ set_bit(CF_CONFIG_NEEDED, &cam->flags);
+ else
+ clear_bit(CF_CONFIG_NEEDED, &cam->flags);
+}
+
+/* ------------------------------------------------------------------- */
+/*
+ * Make the controller start grabbing images. Everything must
+ * be set up before doing this.
+ */
+static void mcam_ctlr_start(struct mcam_camera *cam)
+{
+ /* set_bit performs a read, so no other barrier should be
+ needed here */
+ mcam_reg_set_bit(cam, REG_CTRL0, C0_ENABLE);
+}
+
+static void mcam_ctlr_stop(struct mcam_camera *cam)
+{
+ mcam_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
+}
+
+/* ------------------------------------------------------------------- */
+
+#ifdef MCAM_MODE_VMALLOC
+/*
+ * Code specific to the vmalloc buffer mode.
+ */
+
+/*
+ * Allocate in-kernel DMA buffers for vmalloc mode.
+ */
+static int mcam_alloc_dma_bufs(struct mcam_camera *cam, int loadtime)
+{
+ int i;
+
+ mcam_set_config_needed(cam, 1);
+ if (loadtime)
+ cam->dma_buf_size = dma_buf_size;
+ else
+ cam->dma_buf_size = cam->pix_format.sizeimage;
+ if (n_dma_bufs > 3)
+ n_dma_bufs = 3;
+
+ cam->nbufs = 0;
+ for (i = 0; i < n_dma_bufs; i++) {
+ cam->dma_bufs[i] = dma_alloc_coherent(cam->dev,
+ cam->dma_buf_size, cam->dma_handles + i,
+ GFP_KERNEL);
+ if (cam->dma_bufs[i] == NULL) {
+ cam_warn(cam, "Failed to allocate DMA buffer\n");
+ break;
+ }
+ (cam->nbufs)++;
+ }
+
+ switch (cam->nbufs) {
+ case 1:
+ dma_free_coherent(cam->dev, cam->dma_buf_size,
+ cam->dma_bufs[0], cam->dma_handles[0]);
+ cam->nbufs = 0;
+ case 0:
+ cam_err(cam, "Insufficient DMA buffers, cannot operate\n");
+ return -ENOMEM;
+
+ case 2:
+ if (n_dma_bufs > 2)
+ cam_warn(cam, "Will limp along with only 2 buffers\n");
+ break;
+ }
+ return 0;
+}
+
+static void mcam_free_dma_bufs(struct mcam_camera *cam)
+{
+ int i;
+
+ for (i = 0; i < cam->nbufs; i++) {
+ dma_free_coherent(cam->dev, cam->dma_buf_size,
+ cam->dma_bufs[i], cam->dma_handles[i]);
+ cam->dma_bufs[i] = NULL;
+ }
+ cam->nbufs = 0;
+}
+
+
+/*
+ * Set up DMA buffers when operating in vmalloc mode
+ */
+static void mcam_ctlr_dma_vmalloc(struct mcam_camera *cam)
+{
+ /*
+ * Store the first two Y buffers (we aren't supporting
+ * planar formats for now, so no UV bufs). Then either
+ * set the third if it exists, or tell the controller
+ * to just use two.
+ */
+ mcam_reg_write(cam, REG_Y0BAR, cam->dma_handles[0]);
+ mcam_reg_write(cam, REG_Y1BAR, cam->dma_handles[1]);
+ if (cam->nbufs > 2) {
+ mcam_reg_write(cam, REG_Y2BAR, cam->dma_handles[2]);
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_TWOBUFS);
+ } else
+ mcam_reg_set_bit(cam, REG_CTRL1, C1_TWOBUFS);
+ if (cam->chip_id == V4L2_IDENT_CAFE)
+ mcam_reg_write(cam, REG_UBAR, 0); /* 32 bits only */
+}
+
+/*
+ * Copy data out to user space in the vmalloc case
+ */
+static void mcam_frame_tasklet(unsigned long data)
+{
+ struct mcam_camera *cam = (struct mcam_camera *) data;
+ int i;
+ unsigned long flags;
+ struct mcam_vb_buffer *buf;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ for (i = 0; i < cam->nbufs; i++) {
+ int bufno = cam->next_buf;
+
+ if (cam->state != S_STREAMING || bufno < 0)
+ break; /* I/O got stopped */
+ if (++(cam->next_buf) >= cam->nbufs)
+ cam->next_buf = 0;
+ if (!test_bit(bufno, &cam->flags))
+ continue;
+ if (list_empty(&cam->buffers)) {
+ singles++;
+ break; /* Leave it valid, hope for better later */
+ }
+ delivered++;
+ clear_bit(bufno, &cam->flags);
+ buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer,
+ queue);
+ list_del_init(&buf->queue);
+ /*
+ * Drop the lock during the big copy. This *should* be safe...
+ */
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ memcpy(vb2_plane_vaddr(&buf->vb_buf, 0), cam->dma_bufs[bufno],
+ cam->pix_format.sizeimage);
+ mcam_buffer_done(cam, bufno, &buf->vb_buf);
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ }
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+
+/*
+ * Make sure our allocated buffers are up to the task.
+ */
+static int mcam_check_dma_buffers(struct mcam_camera *cam)
+{
+ if (cam->nbufs > 0 && cam->dma_buf_size < cam->pix_format.sizeimage)
+ mcam_free_dma_bufs(cam);
+ if (cam->nbufs == 0)
+ return mcam_alloc_dma_bufs(cam, 0);
+ return 0;
+}
+
+static void mcam_vmalloc_done(struct mcam_camera *cam, int frame)
+{
+ tasklet_schedule(&cam->s_tasklet);
+}
+
+#else /* MCAM_MODE_VMALLOC */
+
+static inline int mcam_alloc_dma_bufs(struct mcam_camera *cam, int loadtime)
+{
+ return 0;
+}
+
+static inline void mcam_free_dma_bufs(struct mcam_camera *cam)
+{
+ return;
+}
+
+static inline int mcam_check_dma_buffers(struct mcam_camera *cam)
+{
+ return 0;
+}
+
+
+
+#endif /* MCAM_MODE_VMALLOC */
+
+
+#ifdef MCAM_MODE_DMA_CONTIG
+/* ---------------------------------------------------------------------- */
+/*
+ * DMA-contiguous code.
+ */
+/*
+ * Set up a contiguous buffer for the given frame. Here also is where
+ * the underrun strategy is set: if there is no buffer available, reuse
+ * the buffer from the other BAR and set the CF_SINGLE_BUFFER flag to
+ * keep the interrupt handler from giving that buffer back to user
+ * space. In this way, we always have a buffer to DMA to and don't
+ * have to try to play games stopping and restarting the controller.
+ */
+static void mcam_set_contig_buffer(struct mcam_camera *cam, int frame)
+{
+ struct mcam_vb_buffer *buf;
+ /*
+ * If there are no available buffers, go into single mode
+ */
+ if (list_empty(&cam->buffers)) {
+ buf = cam->vb_bufs[frame ^ 0x1];
+ cam->vb_bufs[frame] = buf;
+ mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR,
+ vb2_dma_contig_plane_paddr(&buf->vb_buf, 0));
+ set_bit(CF_SINGLE_BUFFER, &cam->flags);
+ singles++;
+ return;
+ }
+ /*
+ * OK, we have a buffer we can use.
+ */
+ buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
+ list_del_init(&buf->queue);
+ mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR,
+ vb2_dma_contig_plane_paddr(&buf->vb_buf, 0));
+ cam->vb_bufs[frame] = buf;
+ clear_bit(CF_SINGLE_BUFFER, &cam->flags);
+}
+
+/*
+ * Initial B_DMA_contig setup.
+ */
+static void mcam_ctlr_dma_contig(struct mcam_camera *cam)
+{
+ mcam_reg_set_bit(cam, REG_CTRL1, C1_TWOBUFS);
+ cam->nbufs = 2;
+ mcam_set_contig_buffer(cam, 0);
+ mcam_set_contig_buffer(cam, 1);
+}
+
+/*
+ * Frame completion handling.
+ */
+static void mcam_dma_contig_done(struct mcam_camera *cam, int frame)
+{
+ struct mcam_vb_buffer *buf = cam->vb_bufs[frame];
+
+ if (!test_bit(CF_SINGLE_BUFFER, &cam->flags)) {
+ delivered++;
+ mcam_buffer_done(cam, frame, &buf->vb_buf);
+ }
+ mcam_set_contig_buffer(cam, frame);
+}
+
+#endif /* MCAM_MODE_DMA_CONTIG */
+
+#ifdef MCAM_MODE_DMA_SG
+/* ---------------------------------------------------------------------- */
+/*
+ * Scatter/gather-specific code.
+ */
+
+/*
+ * Set up the next buffer for S/G I/O; caller should be sure that
+ * the controller is stopped and a buffer is available.
+ */
+static void mcam_sg_next_buffer(struct mcam_camera *cam)
+{
+ struct mcam_vb_buffer *buf;
+
+ buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
+ list_del_init(&buf->queue);
+ mcam_reg_write(cam, REG_DMA_DESC_Y, buf->dma_desc_pa);
+ mcam_reg_write(cam, REG_DESC_LEN_Y,
+ buf->dma_desc_nent*sizeof(struct mcam_dma_desc));
+ mcam_reg_write(cam, REG_DESC_LEN_U, 0);
+ mcam_reg_write(cam, REG_DESC_LEN_V, 0);
+ cam->vb_bufs[0] = buf;
+}
+
+/*
+ * Initial B_DMA_sg setup
+ */
+static void mcam_ctlr_dma_sg(struct mcam_camera *cam)
+{
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_3WORD);
+ mcam_sg_next_buffer(cam);
+ mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA);
+ cam->nbufs = 3;
+}
+
+
+/*
+ * Frame completion with S/G is trickier. We can't muck with
+ * a descriptor chain on the fly, since the controller buffers it
+ * internally. So we have to actually stop and restart; Marvell
+ * says this is the way to do it.
+ *
+ * Of course, stopping is easier said than done; experience shows
+ * that the controller can start a frame *after* C0_ENABLE has been
+ * cleared. So when running in S/G mode, the controller is "stopped"
+ * on receipt of the start-of-frame interrupt. That means we can
+ * safely change the DMA descriptor array here and restart things
+ * (assuming there's another buffer waiting to go).
+ */
+static void mcam_dma_sg_done(struct mcam_camera *cam, int frame)
+{
+ struct mcam_vb_buffer *buf = cam->vb_bufs[0];
+
+ /*
+ * Very Bad Not Good Things happen if you don't clear
+ * C1_DESC_ENA before making any descriptor changes.
+ */
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_ENA);
+ /*
+ * If we have another buffer available, put it in and
+ * restart the engine.
+ */
+ if (!list_empty(&cam->buffers)) {
+ mcam_sg_next_buffer(cam);
+ mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA);
+ mcam_ctlr_start(cam);
+ /*
+ * Otherwise set CF_SG_RESTART and the controller will
+ * be restarted once another buffer shows up.
+ */
+ } else {
+ set_bit(CF_SG_RESTART, &cam->flags);
+ singles++;
+ }
+ /*
+ * Now we can give the completed frame back to user space.
+ */
+ delivered++;
+ mcam_buffer_done(cam, frame, &buf->vb_buf);
+}
+
+
+/*
+ * Scatter/gather mode requires stopping the controller between
+ * frames so we can put in a new DMA descriptor array. If no new
+ * buffer exists at frame completion, the controller is left stopped;
+ * this function is charged with gettig things going again.
+ */
+static void mcam_sg_restart(struct mcam_camera *cam)
+{
+ mcam_ctlr_dma_sg(cam);
+ mcam_ctlr_start(cam);
+ clear_bit(CF_SG_RESTART, &cam->flags);
+}
+
+#else /* MCAM_MODE_DMA_SG */
+
+static inline void mcam_sg_restart(struct mcam_camera *cam)
+{
+ return;
+}
+
+#endif /* MCAM_MODE_DMA_SG */
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Buffer-mode-independent controller code.
+ */
+
+/*
+ * Image format setup
+ */
+static void mcam_ctlr_image(struct mcam_camera *cam)
+{
+ int imgsz;
+ struct v4l2_pix_format *fmt = &cam->pix_format;
+
+ imgsz = ((fmt->height << IMGSZ_V_SHIFT) & IMGSZ_V_MASK) |
+ (fmt->bytesperline & IMGSZ_H_MASK);
+ mcam_reg_write(cam, REG_IMGSIZE, imgsz);
+ mcam_reg_write(cam, REG_IMGOFFSET, 0);
+ /* YPITCH just drops the last two bits */
+ mcam_reg_write_mask(cam, REG_IMGPITCH, fmt->bytesperline,
+ IMGP_YP_MASK);
+ /*
+ * Tell the controller about the image format we are using.
+ */
+ switch (cam->pix_format.pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_YUV|C0_YUV_PACKED|C0_YUVE_YUYV,
+ C0_DF_MASK);
+ break;
+
+ case V4L2_PIX_FMT_RGB444:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_RGB|C0_RGBF_444|C0_RGB4_XRGB,
+ C0_DF_MASK);
+ /* Alpha value? */
+ break;
+
+ case V4L2_PIX_FMT_RGB565:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_RGB|C0_RGBF_565|C0_RGB5_BGGR,
+ C0_DF_MASK);
+ break;
+
+ default:
+ cam_err(cam, "Unknown format %x\n", cam->pix_format.pixelformat);
+ break;
+ }
+ /*
+ * Make sure it knows we want to use hsync/vsync.
+ */
+ mcam_reg_write_mask(cam, REG_CTRL0, C0_SIF_HVSYNC,
+ C0_SIFM_MASK);
+}
+
+
+/*
+ * Configure the controller for operation; caller holds the
+ * device mutex.
+ */
+static int mcam_ctlr_configure(struct mcam_camera *cam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ cam->dma_setup(cam);
+ mcam_ctlr_image(cam);
+ mcam_set_config_needed(cam, 0);
+ clear_bit(CF_SG_RESTART, &cam->flags);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ return 0;
+}
+
+static void mcam_ctlr_irq_enable(struct mcam_camera *cam)
+{
+ /*
+ * Clear any pending interrupts, since we do not
+ * expect to have I/O active prior to enabling.
+ */
+ mcam_reg_write(cam, REG_IRQSTAT, FRAMEIRQS);
+ mcam_reg_set_bit(cam, REG_IRQMASK, FRAMEIRQS);
+}
+
+static void mcam_ctlr_irq_disable(struct mcam_camera *cam)
+{
+ mcam_reg_clear_bit(cam, REG_IRQMASK, FRAMEIRQS);
+}
+
+
+
+static void mcam_ctlr_init(struct mcam_camera *cam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ /*
+ * Make sure it's not powered down.
+ */
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
+ /*
+ * Turn off the enable bit. It sure should be off anyway,
+ * but it's good to be sure.
+ */
+ mcam_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
+ /*
+ * Clock the sensor appropriately. Controller clock should
+ * be 48MHz, sensor "typical" value is half that.
+ */
+ mcam_reg_write_mask(cam, REG_CLKCTRL, 2, CLK_DIV_MASK);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+
+/*
+ * Stop the controller, and don't return until we're really sure that no
+ * further DMA is going on.
+ */
+static void mcam_ctlr_stop_dma(struct mcam_camera *cam)
+{
+ unsigned long flags;
+
+ /*
+ * Theory: stop the camera controller (whether it is operating
+ * or not). Delay briefly just in case we race with the SOF
+ * interrupt, then wait until no DMA is active.
+ */
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ clear_bit(CF_SG_RESTART, &cam->flags);
+ mcam_ctlr_stop(cam);
+ cam->state = S_IDLE;
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ msleep(40);
+ if (test_bit(CF_DMA_ACTIVE, &cam->flags))
+ cam_err(cam, "Timeout waiting for DMA to end\n");
+ /* This would be bad news - what now? */
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ mcam_ctlr_irq_disable(cam);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+/*
+ * Power up and down.
+ */
+static void mcam_ctlr_power_up(struct mcam_camera *cam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ cam->plat_power_up(cam);
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ msleep(5); /* Just to be sure */
+}
+
+static void mcam_ctlr_power_down(struct mcam_camera *cam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ /*
+ * School of hard knocks department: be sure we do any register
+ * twiddling on the controller *before* calling the platform
+ * power down routine.
+ */
+ mcam_reg_set_bit(cam, REG_CTRL1, C1_PWRDWN);
+ cam->plat_power_down(cam);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+/* -------------------------------------------------------------------- */
+/*
+ * Communications with the sensor.
+ */
+
+static int __mcam_cam_reset(struct mcam_camera *cam)
+{
+ return sensor_call(cam, core, reset, 0);
+}
+
+/*
+ * We have found the sensor on the i2c. Let's try to have a
+ * conversation.
+ */
+static int mcam_cam_init(struct mcam_camera *cam)
+{
+ struct v4l2_dbg_chip_ident chip;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ if (cam->state != S_NOTREADY)
+ cam_warn(cam, "Cam init with device in funky state %d",
+ cam->state);
+ ret = __mcam_cam_reset(cam);
+ if (ret)
+ goto out;
+ chip.ident = V4L2_IDENT_NONE;
+ chip.match.type = V4L2_CHIP_MATCH_I2C_ADDR;
+ chip.match.addr = cam->sensor_addr;
+ ret = sensor_call(cam, core, g_chip_ident, &chip);
+ if (ret)
+ goto out;
+ cam->sensor_type = chip.ident;
+ if (cam->sensor_type != V4L2_IDENT_OV7670) {
+ cam_err(cam, "Unsupported sensor type 0x%x", cam->sensor_type);
+ ret = -EINVAL;
+ goto out;
+ }
+/* Get/set parameters? */
+ ret = 0;
+ cam->state = S_IDLE;
+out:
+ mcam_ctlr_power_down(cam);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+/*
+ * Configure the sensor to match the parameters we have. Caller should
+ * hold s_mutex
+ */
+static int mcam_cam_set_flip(struct mcam_camera *cam)
+{
+ struct v4l2_control ctrl;
+
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_CID_VFLIP;
+ ctrl.value = flip;
+ return sensor_call(cam, core, s_ctrl, &ctrl);
+}
+
+
+static int mcam_cam_configure(struct mcam_camera *cam)
+{
+ struct v4l2_mbus_framefmt mbus_fmt;
+ int ret;
+
+ v4l2_fill_mbus_format(&mbus_fmt, &cam->pix_format, cam->mbus_code);
+ ret = sensor_call(cam, core, init, 0);
+ if (ret == 0)
+ ret = sensor_call(cam, video, s_mbus_fmt, &mbus_fmt);
+ /*
+ * OV7670 does weird things if flip is set *before* format...
+ */
+ ret += mcam_cam_set_flip(cam);
+ return ret;
+}
+
+/*
+ * Get everything ready, and start grabbing frames.
+ */
+static int mcam_read_setup(struct mcam_camera *cam)
+{
+ int ret;
+ unsigned long flags;
+
+ /*
+ * Configuration. If we still don't have DMA buffers,
+ * make one last, desperate attempt.
+ */
+ if (cam->buffer_mode == B_vmalloc && cam->nbufs == 0 &&
+ mcam_alloc_dma_bufs(cam, 0))
+ return -ENOMEM;
+
+ if (mcam_needs_config(cam)) {
+ mcam_cam_configure(cam);
+ ret = mcam_ctlr_configure(cam);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Turn it loose.
+ */
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ mcam_reset_buffers(cam);
+ mcam_ctlr_irq_enable(cam);
+ cam->state = S_STREAMING;
+ mcam_ctlr_start(cam);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+/*
+ * Videobuf2 interface code.
+ */
+
+static int mcam_vb_queue_setup(struct vb2_queue *vq, unsigned int *nbufs,
+ unsigned int *num_planes, unsigned long sizes[],
+ void *alloc_ctxs[])
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vq);
+ int minbufs = (cam->buffer_mode == B_DMA_contig) ? 3 : 2;
+
+ sizes[0] = cam->pix_format.sizeimage;
+ *num_planes = 1; /* Someday we have to support planar formats... */
+ if (*nbufs < minbufs)
+ *nbufs = minbufs;
+ if (cam->buffer_mode == B_DMA_contig)
+ alloc_ctxs[0] = cam->vb_alloc_ctx;
+ return 0;
+}
+
+
+static void mcam_vb_buf_queue(struct vb2_buffer *vb)
+{
+ struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+ struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long flags;
+ int start;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ start = (cam->state == S_BUFWAIT) && !list_empty(&cam->buffers);
+ list_add(&mvb->queue, &cam->buffers);
+ if (test_bit(CF_SG_RESTART, &cam->flags))
+ mcam_sg_restart(cam);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ if (start)
+ mcam_read_setup(cam);
+}
+
+
+/*
+ * vb2 uses these to release the mutex when waiting in dqbuf. I'm
+ * not actually sure we need to do this (I'm not sure that vb2_dqbuf() needs
+ * to be called with the mutex held), but better safe than sorry.
+ */
+static void mcam_vb_wait_prepare(struct vb2_queue *vq)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vq);
+
+ mutex_unlock(&cam->s_mutex);
+}
+
+static void mcam_vb_wait_finish(struct vb2_queue *vq)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vq);
+
+ mutex_lock(&cam->s_mutex);
+}
+
+/*
+ * These need to be called with the mutex held from vb2
+ */
+static int mcam_vb_start_streaming(struct vb2_queue *vq)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vq);
+
+ if (cam->state != S_IDLE)
+ return -EINVAL;
+ cam->sequence = 0;
+ /*
+ * Videobuf2 sneakily hoards all the buffers and won't
+ * give them to us until *after* streaming starts. But
+ * we can't actually start streaming until we have a
+ * destination. So go into a wait state and hope they
+ * give us buffers soon.
+ */
+ if (cam->buffer_mode != B_vmalloc && list_empty(&cam->buffers)) {
+ cam->state = S_BUFWAIT;
+ return 0;
+ }
+ return mcam_read_setup(cam);
+}
+
+static int mcam_vb_stop_streaming(struct vb2_queue *vq)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vq);
+ unsigned long flags;
+
+ if (cam->state == S_BUFWAIT) {
+ /* They never gave us buffers */
+ cam->state = S_IDLE;
+ return 0;
+ }
+ if (cam->state != S_STREAMING)
+ return -EINVAL;
+ mcam_ctlr_stop_dma(cam);
+ /*
+ * VB2 reclaims the buffers, so we need to forget
+ * about them.
+ */
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ INIT_LIST_HEAD(&cam->buffers);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ return 0;
+}
+
+
+static const struct vb2_ops mcam_vb2_ops = {
+ .queue_setup = mcam_vb_queue_setup,
+ .buf_queue = mcam_vb_buf_queue,
+ .start_streaming = mcam_vb_start_streaming,
+ .stop_streaming = mcam_vb_stop_streaming,
+ .wait_prepare = mcam_vb_wait_prepare,
+ .wait_finish = mcam_vb_wait_finish,
+};
+
+
+#ifdef MCAM_MODE_DMA_SG
+/*
+ * Scatter/gather mode uses all of the above functions plus a
+ * few extras to deal with DMA mapping.
+ */
+static int mcam_vb_sg_buf_init(struct vb2_buffer *vb)
+{
+ struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+ struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+ int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
+
+ mvb->dma_desc = dma_alloc_coherent(cam->dev,
+ ndesc * sizeof(struct mcam_dma_desc),
+ &mvb->dma_desc_pa, GFP_KERNEL);
+ if (mvb->dma_desc == NULL) {
+ cam_err(cam, "Unable to get DMA descriptor array\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
+{
+ struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+ struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0);
+ struct mcam_dma_desc *desc = mvb->dma_desc;
+ struct scatterlist *sg;
+ int i;
+
+ mvb->dma_desc_nent = dma_map_sg(cam->dev, sgd->sglist, sgd->num_pages,
+ DMA_FROM_DEVICE);
+ if (mvb->dma_desc_nent <= 0)
+ return -EIO; /* Not sure what's right here */
+ for_each_sg(sgd->sglist, sg, mvb->dma_desc_nent, i) {
+ desc->dma_addr = sg_dma_address(sg);
+ desc->segment_len = sg_dma_len(sg);
+ desc++;
+ }
+ return 0;
+}
+
+static int mcam_vb_sg_buf_finish(struct vb2_buffer *vb)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0);
+
+ dma_unmap_sg(cam->dev, sgd->sglist, sgd->num_pages, DMA_FROM_DEVICE);
+ return 0;
+}
+
+static void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+ struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+ int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
+
+ dma_free_coherent(cam->dev, ndesc * sizeof(struct mcam_dma_desc),
+ mvb->dma_desc, mvb->dma_desc_pa);
+}
+
+
+static const struct vb2_ops mcam_vb2_sg_ops = {
+ .queue_setup = mcam_vb_queue_setup,
+ .buf_init = mcam_vb_sg_buf_init,
+ .buf_prepare = mcam_vb_sg_buf_prepare,
+ .buf_queue = mcam_vb_buf_queue,
+ .buf_finish = mcam_vb_sg_buf_finish,
+ .buf_cleanup = mcam_vb_sg_buf_cleanup,
+ .start_streaming = mcam_vb_start_streaming,
+ .stop_streaming = mcam_vb_stop_streaming,
+ .wait_prepare = mcam_vb_wait_prepare,
+ .wait_finish = mcam_vb_wait_finish,
+};
+
+#endif /* MCAM_MODE_DMA_SG */
+
+static int mcam_setup_vb2(struct mcam_camera *cam)
+{
+ struct vb2_queue *vq = &cam->vb_queue;
+
+ memset(vq, 0, sizeof(*vq));
+ vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vq->drv_priv = cam;
+ INIT_LIST_HEAD(&cam->buffers);
+ switch (cam->buffer_mode) {
+ case B_DMA_contig:
+#ifdef MCAM_MODE_DMA_CONTIG
+ vq->ops = &mcam_vb2_ops;
+ vq->mem_ops = &vb2_dma_contig_memops;
+ cam->vb_alloc_ctx = vb2_dma_contig_init_ctx(cam->dev);
+ vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ cam->dma_setup = mcam_ctlr_dma_contig;
+ cam->frame_complete = mcam_dma_contig_done;
+#endif
+ break;
+ case B_DMA_sg:
+#ifdef MCAM_MODE_DMA_SG
+ vq->ops = &mcam_vb2_sg_ops;
+ vq->mem_ops = &vb2_dma_sg_memops;
+ vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ cam->dma_setup = mcam_ctlr_dma_sg;
+ cam->frame_complete = mcam_dma_sg_done;
+#endif
+ break;
+ case B_vmalloc:
+#ifdef MCAM_MODE_VMALLOC
+ tasklet_init(&cam->s_tasklet, mcam_frame_tasklet,
+ (unsigned long) cam);
+ vq->ops = &mcam_vb2_ops;
+ vq->mem_ops = &vb2_vmalloc_memops;
+ vq->buf_struct_size = sizeof(struct mcam_vb_buffer);
+ vq->io_modes = VB2_MMAP;
+ cam->dma_setup = mcam_ctlr_dma_vmalloc;
+ cam->frame_complete = mcam_vmalloc_done;
+#endif
+ break;
+ }
+ return vb2_queue_init(vq);
+}
+
+static void mcam_cleanup_vb2(struct mcam_camera *cam)
+{
+ vb2_queue_release(&cam->vb_queue);
+#ifdef MCAM_MODE_DMA_CONTIG
+ if (cam->buffer_mode == B_DMA_contig)
+ vb2_dma_contig_cleanup_ctx(cam->vb_alloc_ctx);
+#endif
+}
+
+
+/* ---------------------------------------------------------------------- */
+/*
+ * The long list of V4L2 ioctl() operations.
+ */
+
+static int mcam_vidioc_streamon(struct file *filp, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_streamon(&cam->vb_queue, type);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_vidioc_streamoff(struct file *filp, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_streamoff(&cam->vb_queue, type);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_vidioc_reqbufs(struct file *filp, void *priv,
+ struct v4l2_requestbuffers *req)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_reqbufs(&cam->vb_queue, req);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_vidioc_querybuf(struct file *filp, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_querybuf(&cam->vb_queue, buf);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+static int mcam_vidioc_qbuf(struct file *filp, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_qbuf(&cam->vb_queue, buf);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+static int mcam_vidioc_dqbuf(struct file *filp, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_dqbuf(&cam->vb_queue, buf, filp->f_flags & O_NONBLOCK);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+
+static int mcam_vidioc_queryctrl(struct file *filp, void *priv,
+ struct v4l2_queryctrl *qc)
+{
+ struct mcam_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, core, queryctrl, qc);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_vidioc_g_ctrl(struct file *filp, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct mcam_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, core, g_ctrl, ctrl);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_vidioc_s_ctrl(struct file *filp, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct mcam_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, core, s_ctrl, ctrl);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strcpy(cap->driver, "marvell_ccic");
+ strcpy(cap->card, "marvell_ccic");
+ cap->version = 1;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+ return 0;
+}
+
+
+static int mcam_vidioc_enum_fmt_vid_cap(struct file *filp,
+ void *priv, struct v4l2_fmtdesc *fmt)
+{
+ if (fmt->index >= N_MCAM_FMTS)
+ return -EINVAL;
+ strlcpy(fmt->description, mcam_formats[fmt->index].desc,
+ sizeof(fmt->description));
+ fmt->pixelformat = mcam_formats[fmt->index].pixelformat;
+ return 0;
+}
+
+static int mcam_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct mcam_camera *cam = priv;
+ struct mcam_format_struct *f;
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ struct v4l2_mbus_framefmt mbus_fmt;
+ int ret;
+
+ f = mcam_find_format(pix->pixelformat);
+ pix->pixelformat = f->pixelformat;
+ v4l2_fill_mbus_format(&mbus_fmt, pix, f->mbus_code);
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, video, try_mbus_fmt, &mbus_fmt);
+ mutex_unlock(&cam->s_mutex);
+ v4l2_fill_pix_format(pix, &mbus_fmt);
+ pix->bytesperline = pix->width * f->bpp;
+ pix->sizeimage = pix->height * pix->bytesperline;
+ return ret;
+}
+
+static int mcam_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct mcam_camera *cam = priv;
+ struct mcam_format_struct *f;
+ int ret;
+
+ /*
+ * Can't do anything if the device is not idle
+ * Also can't if there are streaming buffers in place.
+ */
+ if (cam->state != S_IDLE || cam->vb_queue.num_buffers > 0)
+ return -EBUSY;
+
+ f = mcam_find_format(fmt->fmt.pix.pixelformat);
+
+ /*
+ * See if the formatting works in principle.
+ */
+ ret = mcam_vidioc_try_fmt_vid_cap(filp, priv, fmt);
+ if (ret)
+ return ret;
+ /*
+ * Now we start to change things for real, so let's do it
+ * under lock.
+ */
+ mutex_lock(&cam->s_mutex);
+ cam->pix_format = fmt->fmt.pix;
+ cam->mbus_code = f->mbus_code;
+
+ /*
+ * Make sure we have appropriate DMA buffers.
+ */
+ if (cam->buffer_mode == B_vmalloc) {
+ ret = mcam_check_dma_buffers(cam);
+ if (ret)
+ goto out;
+ }
+ mcam_set_config_needed(cam, 1);
+ ret = 0;
+out:
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+/*
+ * Return our stored notion of how the camera is/should be configured.
+ * The V4l2 spec wants us to be smarter, and actually get this from
+ * the camera (and not mess with it at open time). Someday.
+ */
+static int mcam_vidioc_g_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *f)
+{
+ struct mcam_camera *cam = priv;
+
+ f->fmt.pix = cam->pix_format;
+ return 0;
+}
+
+/*
+ * We only have one input - the sensor - so minimize the nonsense here.
+ */
+static int mcam_vidioc_enum_input(struct file *filp, void *priv,
+ struct v4l2_input *input)
+{
+ if (input->index != 0)
+ return -EINVAL;
+
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+ input->std = V4L2_STD_ALL; /* Not sure what should go here */
+ strcpy(input->name, "Camera");
+ return 0;
+}
+
+static int mcam_vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int mcam_vidioc_s_input(struct file *filp, void *priv, unsigned int i)
+{
+ if (i != 0)
+ return -EINVAL;
+ return 0;
+}
+
+/* from vivi.c */
+static int mcam_vidioc_s_std(struct file *filp, void *priv, v4l2_std_id *a)
+{
+ return 0;
+}
+
+/*
+ * G/S_PARM. Most of this is done by the sensor, but we are
+ * the level which controls the number of read buffers.
+ */
+static int mcam_vidioc_g_parm(struct file *filp, void *priv,
+ struct v4l2_streamparm *parms)
+{
+ struct mcam_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, video, g_parm, parms);
+ mutex_unlock(&cam->s_mutex);
+ parms->parm.capture.readbuffers = n_dma_bufs;
+ return ret;
+}
+
+static int mcam_vidioc_s_parm(struct file *filp, void *priv,
+ struct v4l2_streamparm *parms)
+{
+ struct mcam_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, video, s_parm, parms);
+ mutex_unlock(&cam->s_mutex);
+ parms->parm.capture.readbuffers = n_dma_bufs;
+ return ret;
+}
+
+static int mcam_vidioc_g_chip_ident(struct file *file, void *priv,
+ struct v4l2_dbg_chip_ident *chip)
+{
+ struct mcam_camera *cam = priv;
+
+ chip->ident = V4L2_IDENT_NONE;
+ chip->revision = 0;
+ if (v4l2_chip_match_host(&chip->match)) {
+ chip->ident = cam->chip_id;
+ return 0;
+ }
+ return sensor_call(cam, core, g_chip_ident, chip);
+}
+
+static int mcam_vidioc_enum_framesizes(struct file *filp, void *priv,
+ struct v4l2_frmsizeenum *sizes)
+{
+ struct mcam_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, video, enum_framesizes, sizes);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+static int mcam_vidioc_enum_frameintervals(struct file *filp, void *priv,
+ struct v4l2_frmivalenum *interval)
+{
+ struct mcam_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, video, enum_frameintervals, interval);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int mcam_vidioc_g_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg)
+{
+ struct mcam_camera *cam = priv;
+
+ if (v4l2_chip_match_host(&reg->match)) {
+ reg->val = mcam_reg_read(cam, reg->reg);
+ reg->size = 4;
+ return 0;
+ }
+ return sensor_call(cam, core, g_register, reg);
+}
+
+static int mcam_vidioc_s_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg)
+{
+ struct mcam_camera *cam = priv;
+
+ if (v4l2_chip_match_host(&reg->match)) {
+ mcam_reg_write(cam, reg->reg, reg->val);
+ return 0;
+ }
+ return sensor_call(cam, core, s_register, reg);
+}
+#endif
+
+static const struct v4l2_ioctl_ops mcam_v4l_ioctl_ops = {
+ .vidioc_querycap = mcam_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = mcam_vidioc_enum_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = mcam_vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = mcam_vidioc_s_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = mcam_vidioc_g_fmt_vid_cap,
+ .vidioc_enum_input = mcam_vidioc_enum_input,
+ .vidioc_g_input = mcam_vidioc_g_input,
+ .vidioc_s_input = mcam_vidioc_s_input,
+ .vidioc_s_std = mcam_vidioc_s_std,
+ .vidioc_reqbufs = mcam_vidioc_reqbufs,
+ .vidioc_querybuf = mcam_vidioc_querybuf,
+ .vidioc_qbuf = mcam_vidioc_qbuf,
+ .vidioc_dqbuf = mcam_vidioc_dqbuf,
+ .vidioc_streamon = mcam_vidioc_streamon,
+ .vidioc_streamoff = mcam_vidioc_streamoff,
+ .vidioc_queryctrl = mcam_vidioc_queryctrl,
+ .vidioc_g_ctrl = mcam_vidioc_g_ctrl,
+ .vidioc_s_ctrl = mcam_vidioc_s_ctrl,
+ .vidioc_g_parm = mcam_vidioc_g_parm,
+ .vidioc_s_parm = mcam_vidioc_s_parm,
+ .vidioc_enum_framesizes = mcam_vidioc_enum_framesizes,
+ .vidioc_enum_frameintervals = mcam_vidioc_enum_frameintervals,
+ .vidioc_g_chip_ident = mcam_vidioc_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = mcam_vidioc_g_register,
+ .vidioc_s_register = mcam_vidioc_s_register,
+#endif
+};
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Our various file operations.
+ */
+static int mcam_v4l_open(struct file *filp)
+{
+ struct mcam_camera *cam = video_drvdata(filp);
+ int ret = 0;
+
+ filp->private_data = cam;
+
+ frames = singles = delivered = 0;
+ mutex_lock(&cam->s_mutex);
+ if (cam->users == 0) {
+ ret = mcam_setup_vb2(cam);
+ if (ret)
+ goto out;
+ mcam_ctlr_power_up(cam);
+ __mcam_cam_reset(cam);
+ mcam_set_config_needed(cam, 1);
+ }
+ (cam->users)++;
+out:
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_v4l_release(struct file *filp)
+{
+ struct mcam_camera *cam = filp->private_data;
+
+ cam_err(cam, "Release, %d frames, %d singles, %d delivered\n", frames,
+ singles, delivered);
+ mutex_lock(&cam->s_mutex);
+ (cam->users)--;
+ if (filp == cam->owner) {
+ mcam_ctlr_stop_dma(cam);
+ cam->owner = NULL;
+ }
+ if (cam->users == 0) {
+ mcam_cleanup_vb2(cam);
+ mcam_ctlr_power_down(cam);
+ if (cam->buffer_mode == B_vmalloc && alloc_bufs_at_read)
+ mcam_free_dma_bufs(cam);
+ }
+ mutex_unlock(&cam->s_mutex);
+ return 0;
+}
+
+static ssize_t mcam_v4l_read(struct file *filp,
+ char __user *buffer, size_t len, loff_t *pos)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_read(&cam->vb_queue, buffer, len, pos,
+ filp->f_flags & O_NONBLOCK);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+
+static unsigned int mcam_v4l_poll(struct file *filp,
+ struct poll_table_struct *pt)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_poll(&cam->vb_queue, filp, pt);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_v4l_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_mmap(&cam->vb_queue, vma);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+
+static const struct v4l2_file_operations mcam_v4l_fops = {
+ .owner = THIS_MODULE,
+ .open = mcam_v4l_open,
+ .release = mcam_v4l_release,
+ .read = mcam_v4l_read,
+ .poll = mcam_v4l_poll,
+ .mmap = mcam_v4l_mmap,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+
+/*
+ * This template device holds all of those v4l2 methods; we
+ * clone it for specific real devices.
+ */
+static struct video_device mcam_v4l_template = {
+ .name = "mcam",
+ .tvnorms = V4L2_STD_NTSC_M,
+ .current_norm = V4L2_STD_NTSC_M, /* make mplayer happy */
+
+ .fops = &mcam_v4l_fops,
+ .ioctl_ops = &mcam_v4l_ioctl_ops,
+ .release = video_device_release_empty,
+};
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Interrupt handler stuff
+ */
+static void mcam_frame_complete(struct mcam_camera *cam, int frame)
+{
+ /*
+ * Basic frame housekeeping.
+ */
+ set_bit(frame, &cam->flags);
+ clear_bit(CF_DMA_ACTIVE, &cam->flags);
+ cam->next_buf = frame;
+ cam->buf_seq[frame] = ++(cam->sequence);
+ frames++;
+ /*
+ * "This should never happen"
+ */
+ if (cam->state != S_STREAMING)
+ return;
+ /*
+ * Process the frame and set up the next one.
+ */
+ cam->frame_complete(cam, frame);
+}
+
+
+/*
+ * The interrupt handler; this needs to be called from the
+ * platform irq handler with the lock held.
+ */
+int mccic_irq(struct mcam_camera *cam, unsigned int irqs)
+{
+ unsigned int frame, handled = 0;
+
+ mcam_reg_write(cam, REG_IRQSTAT, FRAMEIRQS); /* Clear'em all */
+ /*
+ * Handle any frame completions. There really should
+ * not be more than one of these, or we have fallen
+ * far behind.
+ *
+ * When running in S/G mode, the frame number lacks any
+ * real meaning - there's only one descriptor array - but
+ * the controller still picks a different one to signal
+ * each time.
+ */
+ for (frame = 0; frame < cam->nbufs; frame++)
+ if (irqs & (IRQ_EOF0 << frame)) {
+ mcam_frame_complete(cam, frame);
+ handled = 1;
+ }
+ /*
+ * If a frame starts, note that we have DMA active. This
+ * code assumes that we won't get multiple frame interrupts
+ * at once; may want to rethink that.
+ */
+ if (irqs & (IRQ_SOF0 | IRQ_SOF1 | IRQ_SOF2)) {
+ set_bit(CF_DMA_ACTIVE, &cam->flags);
+ handled = 1;
+ if (cam->buffer_mode == B_DMA_sg)
+ mcam_ctlr_stop(cam);
+ }
+ return handled;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Registration and such.
+ */
+static struct ov7670_config sensor_cfg = {
+ /*
+ * Exclude QCIF mode, because it only captures a tiny portion
+ * of the sensor FOV
+ */
+ .min_width = 320,
+ .min_height = 240,
+};
+
+
+int mccic_register(struct mcam_camera *cam)
+{
+ struct i2c_board_info ov7670_info = {
+ .type = "ov7670",
+ .addr = 0x42 >> 1,
+ .platform_data = &sensor_cfg,
+ };
+ int ret;
+
+ /*
+ * Validate the requested buffer mode.
+ */
+ if (buffer_mode >= 0)
+ cam->buffer_mode = buffer_mode;
+ if (cam->buffer_mode == B_DMA_sg &&
+ cam->chip_id == V4L2_IDENT_CAFE) {
+ printk(KERN_ERR "marvell-cam: Cafe can't do S/G I/O, "
+ "attempting vmalloc mode instead\n");
+ cam->buffer_mode = B_vmalloc;
+ }
+ if (!mcam_buffer_mode_supported(cam->buffer_mode)) {
+ printk(KERN_ERR "marvell-cam: buffer mode %d unsupported\n",
+ cam->buffer_mode);
+ return -EINVAL;
+ }
+ /*
+ * Register with V4L
+ */
+ ret = v4l2_device_register(cam->dev, &cam->v4l2_dev);
+ if (ret)
+ return ret;
+
+ mutex_init(&cam->s_mutex);
+ cam->state = S_NOTREADY;
+ mcam_set_config_needed(cam, 1);
+ cam->pix_format = mcam_def_pix_format;
+ cam->mbus_code = mcam_def_mbus_code;
+ INIT_LIST_HEAD(&cam->buffers);
+ mcam_ctlr_init(cam);
+
+ /*
+ * Try to find the sensor.
+ */
+ sensor_cfg.clock_speed = cam->clock_speed;
+ sensor_cfg.use_smbus = cam->use_smbus;
+ cam->sensor_addr = ov7670_info.addr;
+ cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev,
+ cam->i2c_adapter, &ov7670_info, NULL);
+ if (cam->sensor == NULL) {
+ ret = -ENODEV;
+ goto out_unregister;
+ }
+
+ ret = mcam_cam_init(cam);
+ if (ret)
+ goto out_unregister;
+ /*
+ * Get the v4l2 setup done.
+ */
+ mutex_lock(&cam->s_mutex);
+ cam->vdev = mcam_v4l_template;
+ cam->vdev.debug = 0;
+ cam->vdev.v4l2_dev = &cam->v4l2_dev;
+ ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto out;
+ video_set_drvdata(&cam->vdev, cam);
+
+ /*
+ * If so requested, try to get our DMA buffers now.
+ */
+ if (cam->buffer_mode == B_vmalloc && !alloc_bufs_at_read) {
+ if (mcam_alloc_dma_bufs(cam, 1))
+ cam_warn(cam, "Unable to alloc DMA buffers at load"
+ " will try again later.");
+ }
+
+out:
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+out_unregister:
+ v4l2_device_unregister(&cam->v4l2_dev);
+ return ret;
+}
+
+
+void mccic_shutdown(struct mcam_camera *cam)
+{
+ /*
+ * If we have no users (and we really, really should have no
+ * users) the device will already be powered down. Trying to
+ * take it down again will wedge the machine, which is frowned
+ * upon.
+ */
+ if (cam->users > 0) {
+ cam_warn(cam, "Removing a device with users!\n");
+ mcam_ctlr_power_down(cam);
+ }
+ vb2_queue_release(&cam->vb_queue);
+ if (cam->buffer_mode == B_vmalloc)
+ mcam_free_dma_bufs(cam);
+ video_unregister_device(&cam->vdev);
+ v4l2_device_unregister(&cam->v4l2_dev);
+}
+
+/*
+ * Power management
+ */
+#ifdef CONFIG_PM
+
+void mccic_suspend(struct mcam_camera *cam)
+{
+ enum mcam_state cstate = cam->state;
+
+ mcam_ctlr_stop_dma(cam);
+ mcam_ctlr_power_down(cam);
+ cam->state = cstate;
+}
+
+int mccic_resume(struct mcam_camera *cam)
+{
+ int ret = 0;
+
+ mutex_lock(&cam->s_mutex);
+ if (cam->users > 0) {
+ mcam_ctlr_power_up(cam);
+ __mcam_cam_reset(cam);
+ } else {
+ mcam_ctlr_power_down(cam);
+ }
+ mutex_unlock(&cam->s_mutex);
+
+ set_bit(CF_CONFIG_NEEDED, &cam->flags);
+ if (cam->state == S_STREAMING)
+ ret = mcam_read_setup(cam);
+ return ret;
+}
+#endif /* CONFIG_PM */
diff --git a/drivers/media/video/marvell-ccic/mcam-core.h b/drivers/media/video/marvell-ccic/mcam-core.h
new file mode 100644
index 0000000..917200e
--- /dev/null
+++ b/drivers/media/video/marvell-ccic/mcam-core.h
@@ -0,0 +1,323 @@
+/*
+ * Marvell camera core structures.
+ *
+ * Copyright 2011 Jonathan Corbet corbet@lwn.net
+ */
+#ifndef _MCAM_CORE_H
+#define _MCAM_CORE_H
+
+#include <linux/list.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf2-core.h>
+
+/*
+ * Create our own symbols for the supported buffer modes, but, for now,
+ * base them entirely on which videobuf2 options have been selected.
+ */
+#if defined(CONFIG_VIDEOBUF2_VMALLOC) || defined(CONFIG_VIDEOBUF2_VMALLOC_MODULE)
+#define MCAM_MODE_VMALLOC 1
+#endif
+
+#if defined(CONFIG_VIDEOBUF2_DMA_CONTIG) || defined(CONFIG_VIDEOBUF2_DMA_CONTIG_MODULE)
+#define MCAM_MODE_DMA_CONTIG 1
+#endif
+
+#if defined(CONFIG_VIDEOBUF2_DMA_SG) || defined(CONFIG_VIDEOBUF2_DMA_SG_MODULE)
+#define MCAM_MODE_DMA_SG 1
+#endif
+
+#if !defined(MCAM_MODE_VMALLOC) && !defined(MCAM_MODE_DMA_CONTIG) && \
+ !defined(MCAM_MODE_DMA_SG)
+#error One of the videobuf buffer modes must be selected in the config
+#endif
+
+
+enum mcam_state {
+ S_NOTREADY, /* Not yet initialized */
+ S_IDLE, /* Just hanging around */
+ S_FLAKED, /* Some sort of problem */
+ S_STREAMING, /* Streaming data */
+ S_BUFWAIT /* streaming requested but no buffers yet */
+};
+#define MAX_DMA_BUFS 3
+
+/*
+ * Different platforms work best with different buffer modes, so we
+ * let the platform pick.
+ */
+enum mcam_buffer_mode {
+ B_vmalloc = 0,
+ B_DMA_contig = 1,
+ B_DMA_sg = 2
+};
+
+/*
+ * Is a given buffer mode supported by the current kernel configuration?
+ */
+static inline int mcam_buffer_mode_supported(enum mcam_buffer_mode mode)
+{
+ switch (mode) {
+#ifdef MCAM_MODE_VMALLOC
+ case B_vmalloc:
+#endif
+#ifdef MCAM_MODE_DMA_CONTIG
+ case B_DMA_contig:
+#endif
+#ifdef MCAM_MODE_DMA_SG
+ case B_DMA_sg:
+#endif
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+
+/*
+ * A description of one of our devices.
+ * Locking: controlled by s_mutex. Certain fields, however, require
+ * the dev_lock spinlock; they are marked as such by comments.
+ * dev_lock is also required for access to device registers.
+ */
+struct mcam_camera {
+ /*
+ * These fields should be set by the platform code prior to
+ * calling mcam_register().
+ */
+ struct i2c_adapter *i2c_adapter;
+ unsigned char __iomem *regs;
+ spinlock_t dev_lock;
+ struct device *dev; /* For messages, dma alloc */
+ unsigned int chip_id;
+ short int clock_speed; /* Sensor clock speed, default 30 */
+ short int use_smbus; /* SMBUS or straight I2c? */
+ enum mcam_buffer_mode buffer_mode;
+ /*
+ * Callbacks from the core to the platform code.
+ */
+ void (*plat_power_up) (struct mcam_camera *cam);
+ void (*plat_power_down) (struct mcam_camera *cam);
+
+ /*
+ * Everything below here is private to the mcam core and
+ * should not be touched by the platform code.
+ */
+ struct v4l2_device v4l2_dev;
+ enum mcam_state state;
+ unsigned long flags; /* Buffer status, mainly (dev_lock) */
+ int users; /* How many open FDs */
+ struct file *owner; /* Who has data access (v4l2) */
+
+ /*
+ * Subsystem structures.
+ */
+ struct video_device vdev;
+ struct v4l2_subdev *sensor;
+ unsigned short sensor_addr;
+
+ /* Videobuf2 stuff */
+ struct vb2_queue vb_queue;
+ struct list_head buffers; /* Available frames */
+
+ unsigned int nbufs; /* How many are alloc'd */
+ int next_buf; /* Next to consume (dev_lock) */
+
+ /* DMA buffers - vmalloc mode */
+#ifdef MCAM_MODE_VMALLOC
+ unsigned int dma_buf_size; /* allocated size */
+ void *dma_bufs[MAX_DMA_BUFS]; /* Internal buffer addresses */
+ dma_addr_t dma_handles[MAX_DMA_BUFS]; /* Buffer bus addresses */
+ struct tasklet_struct s_tasklet;
+#endif
+ unsigned int sequence; /* Frame sequence number */
+ unsigned int buf_seq[MAX_DMA_BUFS]; /* Sequence for individual bufs */
+
+ /* DMA buffers - DMA modes */
+ struct mcam_vb_buffer *vb_bufs[MAX_DMA_BUFS];
+ struct vb2_alloc_ctx *vb_alloc_ctx;
+
+ /* Mode-specific ops, set at open time */
+ void (*dma_setup)(struct mcam_camera *cam);
+ void (*frame_complete)(struct mcam_camera *cam, int frame);
+
+ /* Current operating parameters */
+ u32 sensor_type; /* Currently ov7670 only */
+ struct v4l2_pix_format pix_format;
+ enum v4l2_mbus_pixelcode mbus_code;
+
+ /* Locks */
+ struct mutex s_mutex; /* Access to this structure */
+};
+
+
+/*
+ * Register I/O functions. These are here because the platform code
+ * may legitimately need to mess with the register space.
+ */
+/*
+ * Device register I/O
+ */
+static inline void mcam_reg_write(struct mcam_camera *cam, unsigned int reg,
+ unsigned int val)
+{
+ iowrite32(val, cam->regs + reg);
+}
+
+static inline unsigned int mcam_reg_read(struct mcam_camera *cam,
+ unsigned int reg)
+{
+ return ioread32(cam->regs + reg);
+}
+
+
+static inline void mcam_reg_write_mask(struct mcam_camera *cam, unsigned int reg,
+ unsigned int val, unsigned int mask)
+{
+ unsigned int v = mcam_reg_read(cam, reg);
+
+ v = (v & ~mask) | (val & mask);
+ mcam_reg_write(cam, reg, v);
+}
+
+static inline void mcam_reg_clear_bit(struct mcam_camera *cam,
+ unsigned int reg, unsigned int val)
+{
+ mcam_reg_write_mask(cam, reg, 0, val);
+}
+
+static inline void mcam_reg_set_bit(struct mcam_camera *cam,
+ unsigned int reg, unsigned int val)
+{
+ mcam_reg_write_mask(cam, reg, val, val);
+}
+
+/*
+ * Functions for use by platform code.
+ */
+int mccic_register(struct mcam_camera *cam);
+int mccic_irq(struct mcam_camera *cam, unsigned int irqs);
+void mccic_shutdown(struct mcam_camera *cam);
+#ifdef CONFIG_PM
+void mccic_suspend(struct mcam_camera *cam);
+int mccic_resume(struct mcam_camera *cam);
+#endif
+
+/*
+ * Register definitions for the m88alp01 camera interface. Offsets in bytes
+ * as given in the spec.
+ */
+#define REG_Y0BAR 0x00
+#define REG_Y1BAR 0x04
+#define REG_Y2BAR 0x08
+/* ... */
+
+#define REG_IMGPITCH 0x24 /* Image pitch register */
+#define IMGP_YP_SHFT 2 /* Y pitch params */
+#define IMGP_YP_MASK 0x00003ffc /* Y pitch field */
+#define IMGP_UVP_SHFT 18 /* UV pitch (planar) */
+#define IMGP_UVP_MASK 0x3ffc0000
+#define REG_IRQSTATRAW 0x28 /* RAW IRQ Status */
+#define IRQ_EOF0 0x00000001 /* End of frame 0 */
+#define IRQ_EOF1 0x00000002 /* End of frame 1 */
+#define IRQ_EOF2 0x00000004 /* End of frame 2 */
+#define IRQ_SOF0 0x00000008 /* Start of frame 0 */
+#define IRQ_SOF1 0x00000010 /* Start of frame 1 */
+#define IRQ_SOF2 0x00000020 /* Start of frame 2 */
+#define IRQ_OVERFLOW 0x00000040 /* FIFO overflow */
+#define IRQ_TWSIW 0x00010000 /* TWSI (smbus) write */
+#define IRQ_TWSIR 0x00020000 /* TWSI read */
+#define IRQ_TWSIE 0x00040000 /* TWSI error */
+#define TWSIIRQS (IRQ_TWSIW|IRQ_TWSIR|IRQ_TWSIE)
+#define FRAMEIRQS (IRQ_EOF0|IRQ_EOF1|IRQ_EOF2|IRQ_SOF0|IRQ_SOF1|IRQ_SOF2)
+#define ALLIRQS (TWSIIRQS|FRAMEIRQS|IRQ_OVERFLOW)
+#define REG_IRQMASK 0x2c /* IRQ mask - same bits as IRQSTAT */
+#define REG_IRQSTAT 0x30 /* IRQ status / clear */
+
+#define REG_IMGSIZE 0x34 /* Image size */
+#define IMGSZ_V_MASK 0x1fff0000
+#define IMGSZ_V_SHIFT 16
+#define IMGSZ_H_MASK 0x00003fff
+#define REG_IMGOFFSET 0x38 /* IMage offset */
+
+#define REG_CTRL0 0x3c /* Control 0 */
+#define C0_ENABLE 0x00000001 /* Makes the whole thing go */
+
+/* Mask for all the format bits */
+#define C0_DF_MASK 0x00fffffc /* Bits 2-23 */
+
+/* RGB ordering */
+#define C0_RGB4_RGBX 0x00000000
+#define C0_RGB4_XRGB 0x00000004
+#define C0_RGB4_BGRX 0x00000008
+#define C0_RGB4_XBGR 0x0000000c
+#define C0_RGB5_RGGB 0x00000000
+#define C0_RGB5_GRBG 0x00000004
+#define C0_RGB5_GBRG 0x00000008
+#define C0_RGB5_BGGR 0x0000000c
+
+/* Spec has two fields for DIN and DOUT, but they must match, so
+ combine them here. */
+#define C0_DF_YUV 0x00000000 /* Data is YUV */
+#define C0_DF_RGB 0x000000a0 /* ... RGB */
+#define C0_DF_BAYER 0x00000140 /* ... Bayer */
+/* 8-8-8 must be missing from the below - ask */
+#define C0_RGBF_565 0x00000000
+#define C0_RGBF_444 0x00000800
+#define C0_RGB_BGR 0x00001000 /* Blue comes first */
+#define C0_YUV_PLANAR 0x00000000 /* YUV 422 planar format */
+#define C0_YUV_PACKED 0x00008000 /* YUV 422 packed */
+#define C0_YUV_420PL 0x0000a000 /* YUV 420 planar */
+/* Think that 420 packed must be 111 - ask */
+#define C0_YUVE_YUYV 0x00000000 /* Y1CbY0Cr */
+#define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */
+#define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */
+#define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */
+#define C0_YUVE_XYUV 0x00000000 /* 420: .YUV */
+#define C0_YUVE_XYVU 0x00010000 /* 420: .YVU */
+#define C0_YUVE_XUVY 0x00020000 /* 420: .UVY */
+#define C0_YUVE_XVUY 0x00030000 /* 420: .VUY */
+/* Bayer bits 18,19 if needed */
+#define C0_HPOL_LOW 0x01000000 /* HSYNC polarity active low */
+#define C0_VPOL_LOW 0x02000000 /* VSYNC polarity active low */
+#define C0_VCLK_LOW 0x04000000 /* VCLK on falling edge */
+#define C0_DOWNSCALE 0x08000000 /* Enable downscaler */
+#define C0_SIFM_MASK 0xc0000000 /* SIF mode bits */
+#define C0_SIF_HVSYNC 0x00000000 /* Use H/VSYNC */
+#define CO_SOF_NOSYNC 0x40000000 /* Use inband active signaling */
+
+/* Bits below C1_444ALPHA are not present in Cafe */
+#define REG_CTRL1 0x40 /* Control 1 */
+#define C1_CLKGATE 0x00000001 /* Sensor clock gate */
+#define C1_DESC_ENA 0x00000100 /* DMA descriptor enable */
+#define C1_DESC_3WORD 0x00000200 /* Three-word descriptors used */
+#define C1_444ALPHA 0x00f00000 /* Alpha field in RGB444 */
+#define C1_ALPHA_SHFT 20
+#define C1_DMAB32 0x00000000 /* 32-byte DMA burst */
+#define C1_DMAB16 0x02000000 /* 16-byte DMA burst */
+#define C1_DMAB64 0x04000000 /* 64-byte DMA burst */
+#define C1_DMAB_MASK 0x06000000
+#define C1_TWOBUFS 0x08000000 /* Use only two DMA buffers */
+#define C1_PWRDWN 0x10000000 /* Power down */
+
+#define REG_CLKCTRL 0x88 /* Clock control */
+#define CLK_DIV_MASK 0x0000ffff /* Upper bits RW "reserved" */
+
+/* This appears to be a Cafe-only register */
+#define REG_UBAR 0xc4 /* Upper base address register */
+
+/* Armada 610 DMA descriptor registers */
+#define REG_DMA_DESC_Y 0x200
+#define REG_DMA_DESC_U 0x204
+#define REG_DMA_DESC_V 0x208
+#define REG_DESC_LEN_Y 0x20c /* Lengths are in bytes */
+#define REG_DESC_LEN_U 0x210
+#define REG_DESC_LEN_V 0x214
+
+/*
+ * Useful stuff that probably belongs somewhere global.
+ */
+#define VGA_WIDTH 640
+#define VGA_HEIGHT 480
+
+#endif /* _MCAM_CORE_H */
diff --git a/drivers/media/video/marvell-ccic/mmp-driver.c b/drivers/media/video/marvell-ccic/mmp-driver.c
new file mode 100644
index 0000000..d6b7645
--- /dev/null
+++ b/drivers/media/video/marvell-ccic/mmp-driver.c
@@ -0,0 +1,340 @@
+/*
+ * Support for the camera device found on Marvell MMP processors; known
+ * to work with the Armada 610 as used in the OLPC 1.75 system.
+ *
+ * Copyright 2011 Jonathan Corbet <corbet@lwn.net>
+ *
+ * This file may be distributed under the terms of the GNU General
+ * Public License, version 2.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/i2c-gpio.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/mmp-camera.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+
+#include "mcam-core.h"
+
+MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
+MODULE_LICENSE("GPL");
+
+struct mmp_camera {
+ void *power_regs;
+ struct platform_device *pdev;
+ struct mcam_camera mcam;
+ struct list_head devlist;
+ int irq;
+};
+
+static inline struct mmp_camera *mcam_to_cam(struct mcam_camera *mcam)
+{
+ return container_of(mcam, struct mmp_camera, mcam);
+}
+
+/*
+ * A silly little infrastructure so we can keep track of our devices.
+ * Chances are that we will never have more than one of them, but
+ * the Armada 610 *does* have two controllers...
+ */
+
+static LIST_HEAD(mmpcam_devices);
+static struct mutex mmpcam_devices_lock;
+
+static void mmpcam_add_device(struct mmp_camera *cam)
+{
+ mutex_lock(&mmpcam_devices_lock);
+ list_add(&cam->devlist, &mmpcam_devices);
+ mutex_unlock(&mmpcam_devices_lock);
+}
+
+static void mmpcam_remove_device(struct mmp_camera *cam)
+{
+ mutex_lock(&mmpcam_devices_lock);
+ list_del(&cam->devlist);
+ mutex_unlock(&mmpcam_devices_lock);
+}
+
+/*
+ * Platform dev remove passes us a platform_device, and there's
+ * no handy unused drvdata to stash a backpointer in. So just
+ * dig it out of our list.
+ */
+static struct mmp_camera *mmpcam_find_device(struct platform_device *pdev)
+{
+ struct mmp_camera *cam;
+
+ mutex_lock(&mmpcam_devices_lock);
+ list_for_each_entry(cam, &mmpcam_devices, devlist) {
+ if (cam->pdev == pdev) {
+ mutex_unlock(&mmpcam_devices_lock);
+ return cam;
+ }
+ }
+ mutex_unlock(&mmpcam_devices_lock);
+ return NULL;
+}
+
+
+
+
+/*
+ * Power-related registers; this almost certainly belongs
+ * somewhere else.
+ *
+ * ARMADA 610 register manual, sec 7.2.1, p1842.
+ */
+#define CPU_SUBSYS_PMU_BASE 0xd4282800
+#define REG_CCIC_DCGCR 0x28 /* CCIC dyn clock gate ctrl reg */
+#define REG_CCIC_CRCR 0x50 /* CCIC clk reset ctrl reg */
+
+/*
+ * Power control.
+ */
+static void mmpcam_power_up(struct mcam_camera *mcam)
+{
+ struct mmp_camera *cam = mcam_to_cam(mcam);
+ struct mmp_camera_platform_data *pdata;
+/*
+ * Turn on power and clocks to the controller.
+ */
+ iowrite32(0x3f, cam->power_regs + REG_CCIC_DCGCR);
+ iowrite32(0x3805b, cam->power_regs + REG_CCIC_CRCR);
+ mdelay(1);
+/*
+ * Provide power to the sensor.
+ */
+ mcam_reg_write(mcam, REG_CLKCTRL, 0x60000002);
+ pdata = cam->pdev->dev.platform_data;
+ gpio_set_value(pdata->sensor_power_gpio, 1);
+ mdelay(5);
+ mcam_reg_clear_bit(mcam, REG_CTRL1, 0x10000000);
+ gpio_set_value(pdata->sensor_reset_gpio, 0); /* reset is active low */
+ mdelay(5);
+ gpio_set_value(pdata->sensor_reset_gpio, 1); /* reset is active low */
+ mdelay(5);
+}
+
+static void mmpcam_power_down(struct mcam_camera *mcam)
+{
+ struct mmp_camera *cam = mcam_to_cam(mcam);
+ struct mmp_camera_platform_data *pdata;
+/*
+ * Turn off clocks and set reset lines
+ */
+ iowrite32(0, cam->power_regs + REG_CCIC_DCGCR);
+ iowrite32(0, cam->power_regs + REG_CCIC_CRCR);
+/*
+ * Shut down the sensor.
+ */
+ pdata = cam->pdev->dev.platform_data;
+ gpio_set_value(pdata->sensor_power_gpio, 0);
+ gpio_set_value(pdata->sensor_reset_gpio, 0);
+}
+
+
+static irqreturn_t mmpcam_irq(int irq, void *data)
+{
+ struct mcam_camera *mcam = data;
+ unsigned int irqs, handled;
+
+ spin_lock(&mcam->dev_lock);
+ irqs = mcam_reg_read(mcam, REG_IRQSTAT);
+ handled = mccic_irq(mcam, irqs);
+ spin_unlock(&mcam->dev_lock);
+ return IRQ_RETVAL(handled);
+}
+
+
+static int mmpcam_probe(struct platform_device *pdev)
+{
+ struct mmp_camera *cam;
+ struct mcam_camera *mcam;
+ struct resource *res;
+ struct mmp_camera_platform_data *pdata;
+ int ret;
+
+ cam = kzalloc(sizeof(*cam), GFP_KERNEL);
+ if (cam == NULL)
+ return -ENOMEM;
+ cam->pdev = pdev;
+ INIT_LIST_HEAD(&cam->devlist);
+
+ mcam = &cam->mcam;
+ mcam->platform = MHP_Armada610;
+ mcam->plat_power_up = mmpcam_power_up;
+ mcam->plat_power_down = mmpcam_power_down;
+ mcam->dev = &pdev->dev;
+ mcam->use_smbus = 0;
+ mcam->chip_id = V4L2_IDENT_ARMADA610;
+ mcam->buffer_mode = B_DMA_sg;
+ spin_lock_init(&mcam->dev_lock);
+ /*
+ * Get our I/O memory.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no iomem resource!\n");
+ ret = -ENODEV;
+ goto out_free;
+ }
+ mcam->regs = ioremap(res->start, resource_size(res));
+ if (mcam->regs == NULL) {
+ dev_err(&pdev->dev, "MMIO ioremap fail\n");
+ ret = -ENODEV;
+ goto out_free;
+ }
+ /*
+ * Power/clock memory is elsewhere; get it too. Perhaps this
+ * should really be managed outside of this driver?
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no power resource!\n");
+ ret = -ENODEV;
+ goto out_unmap1;
+ }
+ cam->power_regs = ioremap(res->start, resource_size(res));
+ if (cam->power_regs == NULL) {
+ dev_err(&pdev->dev, "power MMIO ioremap fail\n");
+ ret = -ENODEV;
+ goto out_unmap1;
+ }
+ /*
+ * Find the i2c adapter. This assumes, of course, that the
+ * i2c bus is already up and functioning.
+ */
+ pdata = pdev->dev.platform_data;
+ mcam->i2c_adapter = platform_get_drvdata(pdata->i2c_device);
+ if (mcam->i2c_adapter == NULL) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "No i2c adapter\n");
+ goto out_unmap2;
+ }
+ /*
+ * Sensor GPIO pins.
+ */
+ ret = gpio_request(pdata->sensor_power_gpio, "cam-power");
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get sensor power gpio %d",
+ pdata->sensor_power_gpio);
+ goto out_unmap2;
+ }
+ gpio_direction_output(pdata->sensor_power_gpio, 0);
+ ret = gpio_request(pdata->sensor_reset_gpio, "cam-reset");
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get sensor reset gpio %d",
+ pdata->sensor_reset_gpio);
+ goto out_gpio;
+ }
+ gpio_direction_output(pdata->sensor_reset_gpio, 0);
+ /*
+ * Power the device up and hand it off to the core.
+ */
+ mmpcam_power_up(mcam);
+ ret = mccic_register(mcam);
+ if (ret)
+ goto out_gpio2;
+ /*
+ * Finally, set up our IRQ now that the core is ready to
+ * deal with it.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ ret = -ENODEV;
+ goto out_unregister;
+ }
+ cam->irq = res->start;
+ ret = request_irq(cam->irq, mmpcam_irq, IRQF_SHARED,
+ "mmp-camera", mcam);
+ if (ret == 0) {
+ mmpcam_add_device(cam);
+ return 0;
+ }
+
+out_unregister:
+ mccic_shutdown(mcam);
+out_gpio2:
+ mmpcam_power_down(mcam);
+ gpio_free(pdata->sensor_reset_gpio);
+out_gpio:
+ gpio_free(pdata->sensor_power_gpio);
+out_unmap2:
+ iounmap(cam->power_regs);
+out_unmap1:
+ iounmap(mcam->regs);
+out_free:
+ kfree(cam);
+ return ret;
+}
+
+
+static int mmpcam_remove(struct mmp_camera *cam)
+{
+ struct mcam_camera *mcam = &cam->mcam;
+ struct mmp_camera_platform_data *pdata;
+
+ mmpcam_remove_device(cam);
+ free_irq(cam->irq, mcam);
+ mccic_shutdown(mcam);
+ mmpcam_power_down(mcam);
+ pdata = cam->pdev->dev.platform_data;
+ gpio_free(pdata->sensor_reset_gpio);
+ gpio_free(pdata->sensor_power_gpio);
+ iounmap(cam->power_regs);
+ iounmap(mcam->regs);
+ kfree(cam);
+ return 0;
+}
+
+static int mmpcam_platform_remove(struct platform_device *pdev)
+{
+ struct mmp_camera *cam = mmpcam_find_device(pdev);
+
+ if (cam == NULL)
+ return -ENODEV;
+ return mmpcam_remove(cam);
+}
+
+
+static struct platform_driver mmpcam_driver = {
+ .probe = mmpcam_probe,
+ .remove = mmpcam_platform_remove,
+ .driver = {
+ .name = "mmp-camera",
+ .owner = THIS_MODULE
+ }
+};
+
+
+static int __init mmpcam_init_module(void)
+{
+ mutex_init(&mmpcam_devices_lock);
+ return platform_driver_register(&mmpcam_driver);
+}
+
+static void __exit mmpcam_exit_module(void)
+{
+ platform_driver_unregister(&mmpcam_driver);
+ /*
+ * platform_driver_unregister() should have emptied the list
+ */
+ if (!list_empty(&mmpcam_devices))
+ printk(KERN_ERR "mmp_camera leaving devices behind\n");
+}
+
+module_init(mmpcam_init_module);
+module_exit(mmpcam_exit_module);
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index b03d74e..166bf93 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/fs.h>
-#include <linux/version.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -35,7 +34,7 @@
MODULE_DESCRIPTION("Virtual device for mem2mem framework testing");
MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
MODULE_LICENSE("GPL");
-
+MODULE_VERSION("0.1.1");
#define MIN_W 32
#define MIN_H 32
@@ -380,7 +379,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
cap->bus_info[0] = 0;
- cap->version = KERNEL_VERSION(0, 1, 0);
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
| V4L2_CAP_STREAMING;
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
index e2bbd8c..4da9cca 100644
--- a/drivers/media/video/mt9m001.c
+++ b/drivers/media/video/mt9m001.c
@@ -603,13 +603,9 @@ static int mt9m001_video_probe(struct soc_camera_device *icd,
unsigned long flags;
int ret;
- /*
- * We must have a parent by now. And it cannot be a wrong one.
- * So this entire test is completely redundant.
- */
- if (!icd->dev.parent ||
- to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
- return -ENODEV;
+ /* We must have a parent by now. And it cannot be a wrong one. */
+ BUG_ON(!icd->parent ||
+ to_soc_camera_host(icd->parent)->nr != icd->iface);
/* Enable the chip */
data = reg_write(client, MT9M001_CHIP_ENABLE, 1);
@@ -675,8 +671,8 @@ static void mt9m001_video_remove(struct soc_camera_device *icd)
{
struct soc_camera_link *icl = to_soc_camera_link(icd);
- dev_dbg(&icd->dev, "Video removed: %p, %p\n",
- icd->dev.parent, icd->vdev);
+ dev_dbg(icd->pdev, "Video removed: %p, %p\n",
+ icd->parent, icd->vdev);
if (icl->free_bus)
icl->free_bus(icl);
}
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index ebebed9..a357aa8 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -63,6 +63,12 @@
#define MT9M111_RESET_RESTART_FRAME (1 << 1)
#define MT9M111_RESET_RESET_MODE (1 << 0)
+#define MT9M111_RM_FULL_POWER_RD (0 << 10)
+#define MT9M111_RM_LOW_POWER_RD (1 << 10)
+#define MT9M111_RM_COL_SKIP_4X (1 << 5)
+#define MT9M111_RM_ROW_SKIP_4X (1 << 4)
+#define MT9M111_RM_COL_SKIP_2X (1 << 3)
+#define MT9M111_RM_ROW_SKIP_2X (1 << 2)
#define MT9M111_RMB_MIRROR_COLS (1 << 1)
#define MT9M111_RMB_MIRROR_ROWS (1 << 0)
#define MT9M111_CTXT_CTRL_RESTART (1 << 15)
@@ -95,7 +101,8 @@
#define MT9M111_OPMODE_AUTOEXPO_EN (1 << 14)
#define MT9M111_OPMODE_AUTOWHITEBAL_EN (1 << 1)
-
+#define MT9M111_OUTFMT_FLIP_BAYER_COL (1 << 9)
+#define MT9M111_OUTFMT_FLIP_BAYER_ROW (1 << 8)
#define MT9M111_OUTFMT_PROCESSED_BAYER (1 << 14)
#define MT9M111_OUTFMT_BYPASS_IFP (1 << 10)
#define MT9M111_OUTFMT_INV_PIX_CLOCK (1 << 9)
@@ -110,9 +117,8 @@
#define MT9M111_OUTFMT_TST_RAMP_FRAME (3 << 4)
#define MT9M111_OUTFMT_SHIFT_3_UP (1 << 3)
#define MT9M111_OUTFMT_AVG_CHROMA (1 << 2)
-#define MT9M111_OUTFMT_SWAP_YCbCr_C_Y (1 << 1)
-#define MT9M111_OUTFMT_SWAP_RGB_EVEN (1 << 1)
-#define MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr (1 << 0)
+#define MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN (1 << 1)
+#define MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B (1 << 0)
/*
* Camera control register addresses (0x200..0x2ff not implemented)
@@ -122,6 +128,8 @@
#define reg_write(reg, val) mt9m111_reg_write(client, MT9M111_##reg, (val))
#define reg_set(reg, val) mt9m111_reg_set(client, MT9M111_##reg, (val))
#define reg_clear(reg, val) mt9m111_reg_clear(client, MT9M111_##reg, (val))
+#define reg_mask(reg, val, mask) mt9m111_reg_mask(client, MT9M111_##reg, \
+ (val), (mask))
#define MT9M111_MIN_DARK_ROWS 8
#define MT9M111_MIN_DARK_COLS 26
@@ -153,7 +161,11 @@ static const struct mt9m111_datafmt mt9m111_colour_fmts[] = {
{V4L2_MBUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG},
{V4L2_MBUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG},
{V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
+ {V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB},
{V4L2_MBUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB},
+ {V4L2_MBUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB},
+ {V4L2_MBUS_FMT_BGR565_2X8_LE, V4L2_COLORSPACE_SRGB},
+ {V4L2_MBUS_FMT_BGR565_2X8_BE, V4L2_COLORSPACE_SRGB},
{V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
{V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
};
@@ -169,6 +181,8 @@ struct mt9m111 {
* from v4l2-chip-ident.h */
enum mt9m111_context context;
struct v4l2_rect rect;
+ struct mutex power_lock; /* lock to protect power_count */
+ int power_count;
const struct mt9m111_datafmt *fmt;
unsigned int gain;
unsigned char autoexposure;
@@ -176,10 +190,6 @@ struct mt9m111 {
unsigned int powered:1;
unsigned int hflip:1;
unsigned int vflip:1;
- unsigned int swap_rgb_even_odd:1;
- unsigned int swap_rgb_red_blue:1;
- unsigned int swap_yuv_y_chromas:1;
- unsigned int swap_yuv_cb_cr:1;
unsigned int autowhitebalance:1;
};
@@ -248,12 +258,26 @@ static int mt9m111_reg_clear(struct i2c_client *client, const u16 reg,
int ret;
ret = mt9m111_reg_read(client, reg);
- return mt9m111_reg_write(client, reg, ret & ~data);
+ if (ret >= 0)
+ ret = mt9m111_reg_write(client, reg, ret & ~data);
+ return ret;
}
-static int mt9m111_set_context(struct i2c_client *client,
+static int mt9m111_reg_mask(struct i2c_client *client, const u16 reg,
+ const u16 data, const u16 mask)
+{
+ int ret;
+
+ ret = mt9m111_reg_read(client, reg);
+ if (ret >= 0)
+ ret = mt9m111_reg_write(client, reg, (ret & ~mask) | data);
+ return ret;
+}
+
+static int mt9m111_set_context(struct mt9m111 *mt9m111,
enum mt9m111_context ctxt)
{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int valB = MT9M111_CTXT_CTRL_RESTART | MT9M111_CTXT_CTRL_DEFECTCOR_B
| MT9M111_CTXT_CTRL_RESIZE_B | MT9M111_CTXT_CTRL_CTRL2_B
| MT9M111_CTXT_CTRL_GAMMA_B | MT9M111_CTXT_CTRL_READ_MODE_B
@@ -267,10 +291,10 @@ static int mt9m111_set_context(struct i2c_client *client,
return reg_write(CONTEXT_CONTROL, valA);
}
-static int mt9m111_setup_rect(struct i2c_client *client,
+static int mt9m111_setup_rect(struct mt9m111 *mt9m111,
struct v4l2_rect *rect)
{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int ret, is_raw_format;
int width = rect->width;
int height = rect->height;
@@ -312,81 +336,9 @@ static int mt9m111_setup_rect(struct i2c_client *client,
return ret;
}
-static int mt9m111_setup_pixfmt(struct i2c_client *client, u16 outfmt)
+static int mt9m111_enable(struct mt9m111 *mt9m111)
{
- int ret;
- u16 mask = MT9M111_OUTFMT_PROCESSED_BAYER | MT9M111_OUTFMT_RGB |
- MT9M111_OUTFMT_BYPASS_IFP | MT9M111_OUTFMT_SWAP_RGB_EVEN |
- MT9M111_OUTFMT_RGB565 | MT9M111_OUTFMT_RGB555 |
- MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr |
- MT9M111_OUTFMT_SWAP_YCbCr_C_Y;
-
- ret = reg_read(OUTPUT_FORMAT_CTRL2_A);
- if (ret >= 0)
- ret = reg_write(OUTPUT_FORMAT_CTRL2_A, (ret & ~mask) | outfmt);
- if (!ret)
- ret = reg_read(OUTPUT_FORMAT_CTRL2_B);
- if (ret >= 0)
- ret = reg_write(OUTPUT_FORMAT_CTRL2_B, (ret & ~mask) | outfmt);
-
- return ret;
-}
-
-static int mt9m111_setfmt_bayer8(struct i2c_client *client)
-{
- return mt9m111_setup_pixfmt(client, MT9M111_OUTFMT_PROCESSED_BAYER |
- MT9M111_OUTFMT_RGB);
-}
-
-static int mt9m111_setfmt_bayer10(struct i2c_client *client)
-{
- return mt9m111_setup_pixfmt(client, MT9M111_OUTFMT_BYPASS_IFP);
-}
-
-static int mt9m111_setfmt_rgb565(struct i2c_client *client)
-{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
- int val = 0;
-
- if (mt9m111->swap_rgb_red_blue)
- val |= MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr;
- if (mt9m111->swap_rgb_even_odd)
- val |= MT9M111_OUTFMT_SWAP_RGB_EVEN;
- val |= MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565;
-
- return mt9m111_setup_pixfmt(client, val);
-}
-
-static int mt9m111_setfmt_rgb555(struct i2c_client *client)
-{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
- int val = 0;
-
- if (mt9m111->swap_rgb_red_blue)
- val |= MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr;
- if (mt9m111->swap_rgb_even_odd)
- val |= MT9M111_OUTFMT_SWAP_RGB_EVEN;
- val |= MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB555;
-
- return mt9m111_setup_pixfmt(client, val);
-}
-
-static int mt9m111_setfmt_yuv(struct i2c_client *client)
-{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
- int val = 0;
-
- if (mt9m111->swap_yuv_cb_cr)
- val |= MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr;
- if (mt9m111->swap_yuv_y_chromas)
- val |= MT9M111_OUTFMT_SWAP_YCbCr_C_Y;
-
- return mt9m111_setup_pixfmt(client, val);
-}
-
-static int mt9m111_enable(struct i2c_client *client)
-{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int ret;
ret = reg_set(RESET, MT9M111_RESET_CHIP_ENABLE);
@@ -395,8 +347,9 @@ static int mt9m111_enable(struct i2c_client *client)
return ret;
}
-static int mt9m111_reset(struct i2c_client *client)
+static int mt9m111_reset(struct mt9m111 *mt9m111)
{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int ret;
ret = reg_set(RESET, MT9M111_RESET_RESET_MODE);
@@ -424,11 +377,9 @@ static int mt9m111_set_bus_param(struct soc_camera_device *icd, unsigned long f)
return 0;
}
-static int mt9m111_make_rect(struct i2c_client *client,
+static int mt9m111_make_rect(struct mt9m111 *mt9m111,
struct v4l2_rect *rect)
{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
-
if (mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE) {
/* Bayer format - even size lengths */
@@ -444,14 +395,14 @@ static int mt9m111_make_rect(struct i2c_client *client,
soc_camera_limit_side(&rect->top, &rect->height,
MT9M111_MIN_DARK_ROWS, 2, MT9M111_MAX_HEIGHT);
- return mt9m111_setup_rect(client, rect);
+ return mt9m111_setup_rect(mt9m111, rect);
}
static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
struct v4l2_rect rect = a->c;
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
int ret;
dev_dbg(&client->dev, "%s left=%d, top=%d, width=%d, height=%d\n",
@@ -460,7 +411,7 @@ static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- ret = mt9m111_make_rect(client, &rect);
+ ret = mt9m111_make_rect(mt9m111, &rect);
if (!ret)
mt9m111->rect = rect;
return ret;
@@ -468,8 +419,7 @@ static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
static int mt9m111_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
a->c = mt9m111->rect;
a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -496,8 +446,7 @@ static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
static int mt9m111_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
mf->width = mt9m111->rect.width;
mf->height = mt9m111->rect.height;
@@ -508,51 +457,73 @@ static int mt9m111_g_fmt(struct v4l2_subdev *sd,
return 0;
}
-static int mt9m111_set_pixfmt(struct i2c_client *client,
+static int mt9m111_set_pixfmt(struct mt9m111 *mt9m111,
enum v4l2_mbus_pixelcode code)
{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
+ u16 data_outfmt2, mask_outfmt2 = MT9M111_OUTFMT_PROCESSED_BAYER |
+ MT9M111_OUTFMT_BYPASS_IFP | MT9M111_OUTFMT_RGB |
+ MT9M111_OUTFMT_RGB565 | MT9M111_OUTFMT_RGB555 |
+ MT9M111_OUTFMT_RGB444x | MT9M111_OUTFMT_RGBx444 |
+ MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN |
+ MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
int ret;
switch (code) {
case V4L2_MBUS_FMT_SBGGR8_1X8:
- ret = mt9m111_setfmt_bayer8(client);
+ data_outfmt2 = MT9M111_OUTFMT_PROCESSED_BAYER |
+ MT9M111_OUTFMT_RGB;
break;
case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE:
- ret = mt9m111_setfmt_bayer10(client);
+ data_outfmt2 = MT9M111_OUTFMT_BYPASS_IFP | MT9M111_OUTFMT_RGB;
break;
case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
- ret = mt9m111_setfmt_rgb555(client);
+ data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB555 |
+ MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN;
+ break;
+ case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE:
+ data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB555;
break;
case V4L2_MBUS_FMT_RGB565_2X8_LE:
- ret = mt9m111_setfmt_rgb565(client);
+ data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565 |
+ MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN;
+ break;
+ case V4L2_MBUS_FMT_RGB565_2X8_BE:
+ data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565;
+ break;
+ case V4L2_MBUS_FMT_BGR565_2X8_BE:
+ data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565 |
+ MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
+ break;
+ case V4L2_MBUS_FMT_BGR565_2X8_LE:
+ data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565 |
+ MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN |
+ MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
break;
case V4L2_MBUS_FMT_UYVY8_2X8:
- mt9m111->swap_yuv_y_chromas = 0;
- mt9m111->swap_yuv_cb_cr = 0;
- ret = mt9m111_setfmt_yuv(client);
+ data_outfmt2 = 0;
break;
case V4L2_MBUS_FMT_VYUY8_2X8:
- mt9m111->swap_yuv_y_chromas = 0;
- mt9m111->swap_yuv_cb_cr = 1;
- ret = mt9m111_setfmt_yuv(client);
+ data_outfmt2 = MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
break;
case V4L2_MBUS_FMT_YUYV8_2X8:
- mt9m111->swap_yuv_y_chromas = 1;
- mt9m111->swap_yuv_cb_cr = 0;
- ret = mt9m111_setfmt_yuv(client);
+ data_outfmt2 = MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN;
break;
case V4L2_MBUS_FMT_YVYU8_2X8:
- mt9m111->swap_yuv_y_chromas = 1;
- mt9m111->swap_yuv_cb_cr = 1;
- ret = mt9m111_setfmt_yuv(client);
+ data_outfmt2 = MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN |
+ MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
break;
default:
- dev_err(&client->dev, "Pixel format not handled : %x\n",
- code);
- ret = -EINVAL;
+ dev_err(&client->dev, "Pixel format not handled: %x\n", code);
+ return -EINVAL;
}
+ ret = reg_mask(OUTPUT_FORMAT_CTRL2_A, data_outfmt2,
+ mask_outfmt2);
+ if (!ret)
+ ret = reg_mask(OUTPUT_FORMAT_CTRL2_B, data_outfmt2,
+ mask_outfmt2);
+
return ret;
}
@@ -561,7 +532,7 @@ static int mt9m111_s_fmt(struct v4l2_subdev *sd,
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
const struct mt9m111_datafmt *fmt;
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
struct v4l2_rect rect = {
.left = mt9m111->rect.left,
.top = mt9m111->rect.top,
@@ -579,9 +550,9 @@ static int mt9m111_s_fmt(struct v4l2_subdev *sd,
"%s code=%x left=%d, top=%d, width=%d, height=%d\n", __func__,
mf->code, rect.left, rect.top, rect.width, rect.height);
- ret = mt9m111_make_rect(client, &rect);
+ ret = mt9m111_make_rect(mt9m111, &rect);
if (!ret)
- ret = mt9m111_set_pixfmt(client, mf->code);
+ ret = mt9m111_set_pixfmt(mt9m111, mf->code);
if (!ret) {
mt9m111->rect = rect;
mt9m111->fmt = fmt;
@@ -594,8 +565,7 @@ static int mt9m111_s_fmt(struct v4l2_subdev *sd,
static int mt9m111_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
const struct mt9m111_datafmt *fmt;
bool bayer = mf->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE;
@@ -635,7 +605,7 @@ static int mt9m111_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
return -EINVAL;
@@ -726,21 +696,16 @@ static const struct v4l2_queryctrl mt9m111_controls[] = {
}
};
-static int mt9m111_resume(struct soc_camera_device *icd);
-static int mt9m111_suspend(struct soc_camera_device *icd, pm_message_t state);
-
static struct soc_camera_ops mt9m111_ops = {
- .suspend = mt9m111_suspend,
- .resume = mt9m111_resume,
.query_bus_param = mt9m111_query_bus_param,
.set_bus_param = mt9m111_set_bus_param,
.controls = mt9m111_controls,
.num_controls = ARRAY_SIZE(mt9m111_controls),
};
-static int mt9m111_set_flip(struct i2c_client *client, int flip, int mask)
+static int mt9m111_set_flip(struct mt9m111 *mt9m111, int flip, int mask)
{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int ret;
if (mt9m111->context == HIGHPOWER) {
@@ -758,8 +723,9 @@ static int mt9m111_set_flip(struct i2c_client *client, int flip, int mask)
return ret;
}
-static int mt9m111_get_global_gain(struct i2c_client *client)
+static int mt9m111_get_global_gain(struct mt9m111 *mt9m111)
{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int data;
data = reg_read(GLOBAL_GAIN);
@@ -769,9 +735,9 @@ static int mt9m111_get_global_gain(struct i2c_client *client)
return data;
}
-static int mt9m111_set_global_gain(struct i2c_client *client, int gain)
+static int mt9m111_set_global_gain(struct mt9m111 *mt9m111, int gain)
{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
u16 val;
if (gain > 63 * 2 * 2)
@@ -788,9 +754,9 @@ static int mt9m111_set_global_gain(struct i2c_client *client, int gain)
return reg_write(GLOBAL_GAIN, val);
}
-static int mt9m111_set_autoexposure(struct i2c_client *client, int on)
+static int mt9m111_set_autoexposure(struct mt9m111 *mt9m111, int on)
{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int ret;
if (on)
@@ -804,9 +770,9 @@ static int mt9m111_set_autoexposure(struct i2c_client *client, int on)
return ret;
}
-static int mt9m111_set_autowhitebalance(struct i2c_client *client, int on)
+static int mt9m111_set_autowhitebalance(struct mt9m111 *mt9m111, int on)
{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int ret;
if (on)
@@ -823,7 +789,7 @@ static int mt9m111_set_autowhitebalance(struct i2c_client *client, int on)
static int mt9m111_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
int data;
switch (ctrl->id) {
@@ -848,7 +814,7 @@ static int mt9m111_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
ctrl->value = !!(data & MT9M111_RMB_MIRROR_COLS);
break;
case V4L2_CID_GAIN:
- data = mt9m111_get_global_gain(client);
+ data = mt9m111_get_global_gain(mt9m111);
if (data < 0)
return data;
ctrl->value = data;
@@ -865,8 +831,7 @@ static int mt9m111_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
static int mt9m111_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
const struct v4l2_queryctrl *qctrl;
int ret;
@@ -877,22 +842,22 @@ static int mt9m111_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
switch (ctrl->id) {
case V4L2_CID_VFLIP:
mt9m111->vflip = ctrl->value;
- ret = mt9m111_set_flip(client, ctrl->value,
+ ret = mt9m111_set_flip(mt9m111, ctrl->value,
MT9M111_RMB_MIRROR_ROWS);
break;
case V4L2_CID_HFLIP:
mt9m111->hflip = ctrl->value;
- ret = mt9m111_set_flip(client, ctrl->value,
+ ret = mt9m111_set_flip(mt9m111, ctrl->value,
MT9M111_RMB_MIRROR_COLS);
break;
case V4L2_CID_GAIN:
- ret = mt9m111_set_global_gain(client, ctrl->value);
+ ret = mt9m111_set_global_gain(mt9m111, ctrl->value);
break;
case V4L2_CID_EXPOSURE_AUTO:
- ret = mt9m111_set_autoexposure(client, ctrl->value);
+ ret = mt9m111_set_autoexposure(mt9m111, ctrl->value);
break;
case V4L2_CID_AUTO_WHITE_BALANCE:
- ret = mt9m111_set_autowhitebalance(client, ctrl->value);
+ ret = mt9m111_set_autowhitebalance(mt9m111, ctrl->value);
break;
default:
ret = -EINVAL;
@@ -901,60 +866,52 @@ static int mt9m111_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
return ret;
}
-static int mt9m111_suspend(struct soc_camera_device *icd, pm_message_t state)
+static int mt9m111_suspend(struct mt9m111 *mt9m111)
{
- struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
- struct mt9m111 *mt9m111 = to_mt9m111(client);
-
- mt9m111->gain = mt9m111_get_global_gain(client);
+ mt9m111->gain = mt9m111_get_global_gain(mt9m111);
return 0;
}
-static int mt9m111_restore_state(struct i2c_client *client)
+static void mt9m111_restore_state(struct mt9m111 *mt9m111)
{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
-
- mt9m111_set_context(client, mt9m111->context);
- mt9m111_set_pixfmt(client, mt9m111->fmt->code);
- mt9m111_setup_rect(client, &mt9m111->rect);
- mt9m111_set_flip(client, mt9m111->hflip, MT9M111_RMB_MIRROR_COLS);
- mt9m111_set_flip(client, mt9m111->vflip, MT9M111_RMB_MIRROR_ROWS);
- mt9m111_set_global_gain(client, mt9m111->gain);
- mt9m111_set_autoexposure(client, mt9m111->autoexposure);
- mt9m111_set_autowhitebalance(client, mt9m111->autowhitebalance);
- return 0;
+ mt9m111_set_context(mt9m111, mt9m111->context);
+ mt9m111_set_pixfmt(mt9m111, mt9m111->fmt->code);
+ mt9m111_setup_rect(mt9m111, &mt9m111->rect);
+ mt9m111_set_flip(mt9m111, mt9m111->hflip, MT9M111_RMB_MIRROR_COLS);
+ mt9m111_set_flip(mt9m111, mt9m111->vflip, MT9M111_RMB_MIRROR_ROWS);
+ mt9m111_set_global_gain(mt9m111, mt9m111->gain);
+ mt9m111_set_autoexposure(mt9m111, mt9m111->autoexposure);
+ mt9m111_set_autowhitebalance(mt9m111, mt9m111->autowhitebalance);
}
-static int mt9m111_resume(struct soc_camera_device *icd)
+static int mt9m111_resume(struct mt9m111 *mt9m111)
{
- struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
- struct mt9m111 *mt9m111 = to_mt9m111(client);
int ret = 0;
if (mt9m111->powered) {
- ret = mt9m111_enable(client);
+ ret = mt9m111_enable(mt9m111);
if (!ret)
- ret = mt9m111_reset(client);
+ ret = mt9m111_reset(mt9m111);
if (!ret)
- ret = mt9m111_restore_state(client);
+ mt9m111_restore_state(mt9m111);
}
return ret;
}
-static int mt9m111_init(struct i2c_client *client)
+static int mt9m111_init(struct mt9m111 *mt9m111)
{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int ret;
mt9m111->context = HIGHPOWER;
- ret = mt9m111_enable(client);
+ ret = mt9m111_enable(mt9m111);
if (!ret)
- ret = mt9m111_reset(client);
+ ret = mt9m111_reset(mt9m111);
if (!ret)
- ret = mt9m111_set_context(client, mt9m111->context);
+ ret = mt9m111_set_context(mt9m111, mt9m111->context);
if (!ret)
- ret = mt9m111_set_autoexposure(client, mt9m111->autoexposure);
+ ret = mt9m111_set_autoexposure(mt9m111, mt9m111->autoexposure);
if (ret)
dev_err(&client->dev, "mt9m111 init failed: %d\n", ret);
return ret;
@@ -971,20 +928,13 @@ static int mt9m111_video_probe(struct soc_camera_device *icd,
s32 data;
int ret;
- /*
- * We must have a parent by now. And it cannot be a wrong one.
- * So this entire test is completely redundant.
- */
- if (!icd->dev.parent ||
- to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
- return -ENODEV;
+ /* We must have a parent by now. And it cannot be a wrong one. */
+ BUG_ON(!icd->parent ||
+ to_soc_camera_host(icd->parent)->nr != icd->iface);
mt9m111->autoexposure = 1;
mt9m111->autowhitebalance = 1;
- mt9m111->swap_rgb_even_odd = 1;
- mt9m111->swap_rgb_red_blue = 1;
-
data = reg_read(CHIP_VERSION);
switch (data) {
@@ -1005,16 +955,51 @@ static int mt9m111_video_probe(struct soc_camera_device *icd,
goto ei2c;
}
- ret = mt9m111_init(client);
+ ret = mt9m111_init(mt9m111);
ei2c:
return ret;
}
+static int mt9m111_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ mutex_lock(&mt9m111->power_lock);
+
+ /*
+ * If the power count is modified from 0 to != 0 or from != 0 to 0,
+ * update the power state.
+ */
+ if (mt9m111->power_count == !on) {
+ if (on) {
+ ret = mt9m111_resume(mt9m111);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to resume the sensor: %d\n", ret);
+ goto out;
+ }
+ } else {
+ mt9m111_suspend(mt9m111);
+ }
+ }
+
+ /* Update the power count. */
+ mt9m111->power_count += on ? 1 : -1;
+ WARN_ON(mt9m111->power_count < 0);
+
+out:
+ mutex_unlock(&mt9m111->power_lock);
+ return ret;
+}
+
static struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = {
.g_ctrl = mt9m111_g_ctrl,
.s_ctrl = mt9m111_s_ctrl,
.g_chip_ident = mt9m111_g_chip_ident,
+ .s_power = mt9m111_s_power,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = mt9m111_g_register,
.s_register = mt9m111_s_register,
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
index 7ce279c..30547cc 100644
--- a/drivers/media/video/mt9t031.c
+++ b/drivers/media/video/mt9t031.c
@@ -700,8 +700,7 @@ static int mt9t031_runtime_suspend(struct device *dev)
static int mt9t031_runtime_resume(struct device *dev)
{
struct video_device *vdev = to_video_device(dev);
- struct soc_camera_device *icd = container_of(vdev->parent,
- struct soc_camera_device, dev);
+ struct soc_camera_device *icd = dev_get_drvdata(vdev->parent);
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
index bffa9ee..d2e0a50 100644
--- a/drivers/media/video/mt9t112.c
+++ b/drivers/media/video/mt9t112.c
@@ -1057,13 +1057,9 @@ static int mt9t112_camera_probe(struct soc_camera_device *icd,
const char *devname;
int chipid;
- /*
- * We must have a parent by now. And it cannot be a wrong one.
- * So this entire test is completely redundant.
- */
- if (!icd->dev.parent ||
- to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
- return -ENODEV;
+ /* We must have a parent by now. And it cannot be a wrong one. */
+ BUG_ON(!icd->parent ||
+ to_soc_camera_host(icd->parent)->nr != icd->iface);
/*
* check and show chip ID
diff --git a/drivers/media/video/mt9v011.c b/drivers/media/video/mt9v011.c
index 4904d25..893a8b8 100644
--- a/drivers/media/video/mt9v011.c
+++ b/drivers/media/video/mt9v011.c
@@ -54,11 +54,20 @@ static struct v4l2_queryctrl mt9v011_qctrl[] = {
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Gain",
.minimum = 0,
- .maximum = (1 << 10) - 1,
+ .maximum = (1 << 12) - 1 - 0x0020,
.step = 1,
.default_value = 0x0020,
.flags = 0,
}, {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Exposure",
+ .minimum = 0,
+ .maximum = 2047,
+ .step = 1,
+ .default_value = 0x01fc,
+ .flags = 0,
+ }, {
.id = V4L2_CID_RED_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Red Balance",
@@ -105,7 +114,8 @@ struct mt9v011 {
unsigned hflip:1;
unsigned vflip:1;
- u16 global_gain, red_bal, blue_bal;
+ u16 global_gain, exposure;
+ s16 red_bal, blue_bal;
};
static inline struct mt9v011 *to_mt9v011(struct v4l2_subdev *sd)
@@ -180,24 +190,68 @@ static const struct i2c_reg_value mt9v011_init_default[] = {
{ R07_MT9V011_OUT_CTRL, 0x0002 }, /* chip enable */
};
+
+static u16 calc_mt9v011_gain(s16 lineargain)
+{
+
+ u16 digitalgain = 0;
+ u16 analogmult = 0;
+ u16 analoginit = 0;
+
+ if (lineargain < 0)
+ lineargain = 0;
+
+ /* recommended minimum */
+ lineargain += 0x0020;
+
+ if (lineargain > 2047)
+ lineargain = 2047;
+
+ if (lineargain > 1023) {
+ digitalgain = 3;
+ analogmult = 3;
+ analoginit = lineargain / 16;
+ } else if (lineargain > 511) {
+ digitalgain = 1;
+ analogmult = 3;
+ analoginit = lineargain / 8;
+ } else if (lineargain > 255) {
+ analogmult = 3;
+ analoginit = lineargain / 4;
+ } else if (lineargain > 127) {
+ analogmult = 1;
+ analoginit = lineargain / 2;
+ } else
+ analoginit = lineargain;
+
+ return analoginit + (analogmult << 7) + (digitalgain << 9);
+
+}
+
static void set_balance(struct v4l2_subdev *sd)
{
struct mt9v011 *core = to_mt9v011(sd);
- u16 green1_gain, green2_gain, blue_gain, red_gain;
+ u16 green_gain, blue_gain, red_gain;
+ u16 exposure;
+ s16 bal;
- green1_gain = core->global_gain;
- green2_gain = core->global_gain;
+ exposure = core->exposure;
- blue_gain = core->global_gain +
- core->global_gain * core->blue_bal / (1 << 9);
+ green_gain = calc_mt9v011_gain(core->global_gain);
- red_gain = core->global_gain +
- core->global_gain * core->blue_bal / (1 << 9);
+ bal = core->global_gain;
+ bal += (core->blue_bal * core->global_gain / (1 << 7));
+ blue_gain = calc_mt9v011_gain(bal);
- mt9v011_write(sd, R2B_MT9V011_GREEN_1_GAIN, green1_gain);
- mt9v011_write(sd, R2E_MT9V011_GREEN_2_GAIN, green1_gain);
+ bal = core->global_gain;
+ bal += (core->red_bal * core->global_gain / (1 << 7));
+ red_gain = calc_mt9v011_gain(bal);
+
+ mt9v011_write(sd, R2B_MT9V011_GREEN_1_GAIN, green_gain);
+ mt9v011_write(sd, R2E_MT9V011_GREEN_2_GAIN, green_gain);
mt9v011_write(sd, R2C_MT9V011_BLUE_GAIN, blue_gain);
mt9v011_write(sd, R2D_MT9V011_RED_GAIN, red_gain);
+ mt9v011_write(sd, R09_MT9V011_SHUTTER_WIDTH, exposure);
}
static void calc_fps(struct v4l2_subdev *sd, u32 *numerator, u32 *denominator)
@@ -286,7 +340,7 @@ static void set_res(struct v4l2_subdev *sd)
* be missing.
*/
- hstart = 14 + (640 - core->width) / 2;
+ hstart = 20 + (640 - core->width) / 2;
mt9v011_write(sd, R02_MT9V011_COLSTART, hstart);
mt9v011_write(sd, R04_MT9V011_WIDTH, core->width);
mt9v011_write(sd, R05_MT9V011_HBLANK, 771 - core->width);
@@ -338,6 +392,9 @@ static int mt9v011_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
case V4L2_CID_GAIN:
ctrl->value = core->global_gain;
return 0;
+ case V4L2_CID_EXPOSURE:
+ ctrl->value = core->exposure;
+ return 0;
case V4L2_CID_RED_BALANCE:
ctrl->value = core->red_bal;
return 0;
@@ -392,6 +449,9 @@ static int mt9v011_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
case V4L2_CID_GAIN:
core->global_gain = ctrl->value;
break;
+ case V4L2_CID_EXPOSURE:
+ core->exposure = ctrl->value;
+ break;
case V4L2_CID_RED_BALANCE:
core->red_bal = ctrl->value;
break;
@@ -598,6 +658,7 @@ static int mt9v011_probe(struct i2c_client *c,
}
core->global_gain = 0x0024;
+ core->exposure = 0x01fc;
core->width = 640;
core->height = 480;
core->xtal = 27000000; /* Hz */
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index fc76ed1..51b0fcc 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -728,9 +728,9 @@ static int mt9v022_video_probe(struct soc_camera_device *icd,
int ret;
unsigned long flags;
- if (!icd->dev.parent ||
- to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
- return -ENODEV;
+ /* We must have a parent by now. And it cannot be a wrong one. */
+ BUG_ON(!icd->parent ||
+ to_soc_camera_host(icd->parent)->nr != icd->iface);
/* Read out the chip version register */
data = reg_read(client, MT9V022_CHIP_VERSION);
@@ -809,8 +809,8 @@ static void mt9v022_video_remove(struct soc_camera_device *icd)
{
struct soc_camera_link *icl = to_soc_camera_link(icd);
- dev_dbg(&icd->dev, "Video removed: %p, %p\n",
- icd->dev.parent, icd->vdev);
+ dev_dbg(icd->pdev, "Video removed: %p, %p\n",
+ icd->parent, icd->vdev);
if (icl->free_bus)
icl->free_bus(icl);
}
diff --git a/drivers/media/video/mt9v032.c b/drivers/media/video/mt9v032.c
index 1319c2c..c64e1dc 100644
--- a/drivers/media/video/mt9v032.c
+++ b/drivers/media/video/mt9v032.c
@@ -31,14 +31,14 @@
#define MT9V032_CHIP_VERSION 0x00
#define MT9V032_CHIP_ID_REV1 0x1311
#define MT9V032_CHIP_ID_REV3 0x1313
-#define MT9V032_ROW_START 0x01
-#define MT9V032_ROW_START_MIN 4
-#define MT9V032_ROW_START_DEF 10
-#define MT9V032_ROW_START_MAX 482
-#define MT9V032_COLUMN_START 0x02
+#define MT9V032_COLUMN_START 0x01
#define MT9V032_COLUMN_START_MIN 1
-#define MT9V032_COLUMN_START_DEF 2
+#define MT9V032_COLUMN_START_DEF 1
#define MT9V032_COLUMN_START_MAX 752
+#define MT9V032_ROW_START 0x02
+#define MT9V032_ROW_START_MIN 4
+#define MT9V032_ROW_START_DEF 5
+#define MT9V032_ROW_START_MAX 482
#define MT9V032_WINDOW_HEIGHT 0x03
#define MT9V032_WINDOW_HEIGHT_MIN 1
#define MT9V032_WINDOW_HEIGHT_DEF 480
@@ -420,13 +420,13 @@ static int mt9v032_set_crop(struct v4l2_subdev *subdev,
struct v4l2_rect *__crop;
struct v4l2_rect rect;
- /* Clamp the crop rectangle boundaries and align them to a multiple of 2
- * pixels.
+ /* Clamp the crop rectangle boundaries and align them to a non multiple
+ * of 2 pixels to ensure a GRBG Bayer pattern.
*/
- rect.left = clamp(ALIGN(crop->rect.left, 2),
+ rect.left = clamp(ALIGN(crop->rect.left + 1, 2) - 1,
MT9V032_COLUMN_START_MIN,
MT9V032_COLUMN_START_MAX);
- rect.top = clamp(ALIGN(crop->rect.top, 2),
+ rect.top = clamp(ALIGN(crop->rect.top + 1, 2) - 1,
MT9V032_ROW_START_MIN,
MT9V032_ROW_START_MAX);
rect.width = clamp(ALIGN(crop->rect.width, 2),
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 63f8a0c..087db12 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -31,7 +31,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/time.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <media/soc_camera.h>
@@ -73,7 +72,7 @@
#define CSISR_SOF_INT (1 << 16)
#define CSISR_DRDY (1 << 0)
-#define VERSION_CODE KERNEL_VERSION(0, 0, 1)
+#define DRIVER_VERSION "0.0.2"
#define DRIVER_NAME "mx1-camera"
#define CSI_IRQ_MASK (CSISR_SFF_OR_INT | CSISR_RFF_OR_INT | \
@@ -142,7 +141,7 @@ static int mx1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
if (*size * *count > MAX_VIDEO_MEM * 1024 * 1024)
*count = (MAX_VIDEO_MEM * 1024 * 1024) / *size;
- dev_dbg(icd->dev.parent, "count=%d, size=%d\n", *count, *size);
+ dev_dbg(icd->parent, "count=%d, size=%d\n", *count, *size);
return 0;
}
@@ -154,7 +153,7 @@ static void free_buffer(struct videobuf_queue *vq, struct mx1_buffer *buf)
BUG_ON(in_interrupt());
- dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
/*
@@ -179,7 +178,7 @@ static int mx1_videobuf_prepare(struct videobuf_queue *vq,
if (bytes_per_line < 0)
return bytes_per_line;
- dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
/* Added list head initialization on alloc */
@@ -232,7 +231,7 @@ out:
static int mx1_camera_setup_dma(struct mx1_camera_dev *pcdev)
{
struct videobuf_buffer *vbuf = &pcdev->active->vb;
- struct device *dev = pcdev->icd->dev.parent;
+ struct device *dev = pcdev->icd->parent;
int ret;
if (unlikely(!pcdev->active)) {
@@ -256,11 +255,11 @@ static void mx1_videobuf_queue(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct soc_camera_device *icd = vq->priv_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx1_camera_dev *pcdev = ici->priv;
struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb);
- dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
list_add_tail(&vb->queue, &pcdev->capture);
@@ -287,7 +286,7 @@ static void mx1_videobuf_release(struct videobuf_queue *vq,
struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb);
#ifdef DEBUG
struct soc_camera_device *icd = vq->priv_data;
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
@@ -343,7 +342,7 @@ static void mx1_camera_wakeup(struct mx1_camera_dev *pcdev,
static void mx1_camera_dma_irq(int channel, void *data)
{
struct mx1_camera_dev *pcdev = data;
- struct device *dev = pcdev->icd->dev.parent;
+ struct device *dev = pcdev->icd->parent;
struct mx1_buffer *buf;
struct videobuf_buffer *vb;
unsigned long flags;
@@ -378,10 +377,10 @@ static struct videobuf_queue_ops mx1_videobuf_ops = {
static void mx1_camera_init_videobuf(struct videobuf_queue *q,
struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx1_camera_dev *pcdev = ici->priv;
- videobuf_queue_dma_contig_init(q, &mx1_videobuf_ops, icd->dev.parent,
+ videobuf_queue_dma_contig_init(q, &mx1_videobuf_ops, icd->parent,
&pcdev->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_NONE,
sizeof(struct mx1_buffer), icd, &icd->video_lock);
@@ -401,7 +400,7 @@ static int mclk_get_divisor(struct mx1_camera_dev *pcdev)
*/
div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1;
- dev_dbg(pcdev->icd->dev.parent,
+ dev_dbg(pcdev->icd->parent,
"System clock %lukHz, target freq %dkHz, divisor %lu\n",
lcdclk / 1000, mclk / 1000, div);
@@ -412,7 +411,7 @@ static void mx1_camera_activate(struct mx1_camera_dev *pcdev)
{
unsigned int csicr1 = CSICR1_EN;
- dev_dbg(pcdev->icd->dev.parent, "Activate device\n");
+ dev_dbg(pcdev->icd->parent, "Activate device\n");
clk_enable(pcdev->clk);
@@ -428,7 +427,7 @@ static void mx1_camera_activate(struct mx1_camera_dev *pcdev)
static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev)
{
- dev_dbg(pcdev->icd->dev.parent, "Deactivate device\n");
+ dev_dbg(pcdev->icd->parent, "Deactivate device\n");
/* Disable all CSI interface */
__raw_writel(0x00, pcdev->base + CSICR1);
@@ -442,13 +441,13 @@ static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev)
*/
static int mx1_camera_add_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx1_camera_dev *pcdev = ici->priv;
if (pcdev->icd)
return -EBUSY;
- dev_info(icd->dev.parent, "MX1 Camera driver attached to camera %d\n",
+ dev_info(icd->parent, "MX1 Camera driver attached to camera %d\n",
icd->devnum);
mx1_camera_activate(pcdev);
@@ -460,7 +459,7 @@ static int mx1_camera_add_device(struct soc_camera_device *icd)
static void mx1_camera_remove_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx1_camera_dev *pcdev = ici->priv;
unsigned int csicr1;
@@ -473,7 +472,7 @@ static void mx1_camera_remove_device(struct soc_camera_device *icd)
/* Stop DMA engine */
imx_dma_disable(pcdev->dma_chan);
- dev_info(icd->dev.parent, "MX1 Camera driver detached from camera %d\n",
+ dev_info(icd->parent, "MX1 Camera driver detached from camera %d\n",
icd->devnum);
mx1_camera_deactivate(pcdev);
@@ -491,7 +490,7 @@ static int mx1_camera_set_crop(struct soc_camera_device *icd,
static int mx1_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx1_camera_dev *pcdev = ici->priv;
unsigned long camera_flags, common_flags;
unsigned int csicr1;
@@ -562,14 +561,14 @@ static int mx1_camera_set_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
if (!xlate) {
- dev_warn(icd->dev.parent, "Format %x not found\n",
+ dev_warn(icd->parent, "Format %x not found\n",
pix->pixelformat);
return -EINVAL;
}
buswidth = xlate->host_fmt->bits_per_sample;
if (buswidth > 8) {
- dev_warn(icd->dev.parent,
+ dev_warn(icd->parent,
"bits-per-sample %d for format %x unsupported\n",
buswidth, pix->pixelformat);
return -EINVAL;
@@ -609,7 +608,7 @@ static int mx1_camera_try_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
if (!xlate) {
- dev_warn(icd->dev.parent, "Format %x not found\n",
+ dev_warn(icd->parent, "Format %x not found\n",
pix->pixelformat);
return -EINVAL;
}
@@ -676,7 +675,6 @@ static int mx1_camera_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the friendly caller:-> */
strlcpy(cap->card, "i.MX1/i.MXL Camera", sizeof(cap->card));
- cap->version = VERSION_CODE;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
@@ -883,4 +881,5 @@ module_exit(mx1_camera_exit);
MODULE_DESCRIPTION("i.MX1/i.MXL SoC Camera Host driver");
MODULE_AUTHOR("Paulius Zaleckas <paulius.zaleckas@teltonika.lt>");
MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRIVER_VERSION);
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index 4eab1c6..ec2410c 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -23,7 +23,6 @@
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/time.h>
-#include <linux/version.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
@@ -47,7 +46,7 @@
#include <asm/dma.h>
#define MX2_CAM_DRV_NAME "mx2-camera"
-#define MX2_CAM_VERSION_CODE KERNEL_VERSION(0, 0, 5)
+#define MX2_CAM_VERSION "0.0.6"
#define MX2_CAM_DRIVER_DESCRIPTION "i.MX2x_Camera"
/* reset values */
@@ -278,7 +277,7 @@ static void mx2_camera_deactivate(struct mx2_camera_dev *pcdev)
*/
static int mx2_camera_add_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
int ret;
u32 csicr1;
@@ -303,7 +302,7 @@ static int mx2_camera_add_device(struct soc_camera_device *icd)
pcdev->icd = icd;
- dev_info(icd->dev.parent, "Camera driver attached to camera %d\n",
+ dev_info(icd->parent, "Camera driver attached to camera %d\n",
icd->devnum);
return 0;
@@ -311,12 +310,12 @@ static int mx2_camera_add_device(struct soc_camera_device *icd)
static void mx2_camera_remove_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
BUG_ON(icd != pcdev->icd);
- dev_info(icd->dev.parent, "Camera driver detached from camera %d\n",
+ dev_info(icd->parent, "Camera driver detached from camera %d\n",
icd->devnum);
mx2_camera_deactivate(pcdev);
@@ -437,7 +436,7 @@ static int mx2_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
icd->current_fmt->host_fmt);
- dev_dbg(&icd->dev, "count=%d, size=%d\n", *count, *size);
+ dev_dbg(icd->parent, "count=%d, size=%d\n", *count, *size);
if (bytes_per_line < 0)
return bytes_per_line;
@@ -457,7 +456,7 @@ static void free_buffer(struct videobuf_queue *vq, struct mx2_buffer *buf)
struct soc_camera_device *icd = vq->priv_data;
struct videobuf_buffer *vb = &buf->vb;
- dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
/*
@@ -467,7 +466,7 @@ static void free_buffer(struct videobuf_queue *vq, struct mx2_buffer *buf)
videobuf_waiton(vq, vb, 0, 0);
videobuf_dma_contig_free(vq, vb);
- dev_dbg(&icd->dev, "%s freed\n", __func__);
+ dev_dbg(icd->parent, "%s freed\n", __func__);
vb->state = VIDEOBUF_NEEDS_INIT;
}
@@ -481,7 +480,7 @@ static int mx2_videobuf_prepare(struct videobuf_queue *vq,
icd->current_fmt->host_fmt);
int ret = 0;
- dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
if (bytes_per_line < 0)
@@ -533,12 +532,12 @@ static void mx2_videobuf_queue(struct videobuf_queue *vq,
{
struct soc_camera_device *icd = vq->priv_data;
struct soc_camera_host *ici =
- to_soc_camera_host(icd->dev.parent);
+ to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
struct mx2_buffer *buf = container_of(vb, struct mx2_buffer, vb);
unsigned long flags;
- dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
spin_lock_irqsave(&pcdev->lock, flags);
@@ -611,27 +610,27 @@ static void mx2_videobuf_release(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct soc_camera_device *icd = vq->priv_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
struct mx2_buffer *buf = container_of(vb, struct mx2_buffer, vb);
unsigned long flags;
#ifdef DEBUG
- dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
switch (vb->state) {
case VIDEOBUF_ACTIVE:
- dev_info(&icd->dev, "%s (active)\n", __func__);
+ dev_info(icd->parent, "%s (active)\n", __func__);
break;
case VIDEOBUF_QUEUED:
- dev_info(&icd->dev, "%s (queued)\n", __func__);
+ dev_info(icd->parent, "%s (queued)\n", __func__);
break;
case VIDEOBUF_PREPARED:
- dev_info(&icd->dev, "%s (prepared)\n", __func__);
+ dev_info(icd->parent, "%s (prepared)\n", __func__);
break;
default:
- dev_info(&icd->dev, "%s (unknown) %d\n", __func__,
+ dev_info(icd->parent, "%s (unknown) %d\n", __func__,
vb->state);
break;
}
@@ -678,7 +677,7 @@ static struct videobuf_queue_ops mx2_videobuf_ops = {
static void mx2_camera_init_videobuf(struct videobuf_queue *q,
struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
videobuf_queue_dma_contig_init(q, &mx2_videobuf_ops, pcdev->dev,
@@ -719,7 +718,7 @@ static void mx27_camera_emma_buf_init(struct soc_camera_device *icd,
int bytesperline)
{
struct soc_camera_host *ici =
- to_soc_camera_host(icd->dev.parent);
+ to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
writel(pcdev->discard_buffer_dma,
@@ -772,7 +771,7 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd,
__u32 pixfmt)
{
struct soc_camera_host *ici =
- to_soc_camera_host(icd->dev.parent);
+ to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
unsigned long camera_flags, common_flags;
int ret = 0;
@@ -891,7 +890,7 @@ static int mx2_camera_set_crop(struct soc_camera_device *icd,
if (ret < 0)
return ret;
- dev_dbg(icd->dev.parent, "Sensor cropped %dx%d\n",
+ dev_dbg(icd->parent, "Sensor cropped %dx%d\n",
mf.width, mf.height);
icd->user_width = mf.width;
@@ -911,7 +910,7 @@ static int mx2_camera_set_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
if (!xlate) {
- dev_warn(icd->dev.parent, "Format %x not found\n",
+ dev_warn(icd->parent, "Format %x not found\n",
pix->pixelformat);
return -EINVAL;
}
@@ -951,7 +950,7 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (pixfmt && !xlate) {
- dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt);
+ dev_warn(icd->parent, "Format %x not found\n", pixfmt);
return -EINVAL;
}
@@ -974,11 +973,16 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
if (pix->bytesperline < 0)
return pix->bytesperline;
pix->sizeimage = pix->height * pix->bytesperline;
- if (pix->sizeimage > (4 * 0x3ffff)) { /* CSIRXCNT limit */
- dev_warn(icd->dev.parent,
- "Image size (%u) above limit\n",
- pix->sizeimage);
- return -EINVAL;
+ /* Check against the CSIRXCNT limit */
+ if (pix->sizeimage > 4 * 0x3ffff) {
+ /* Adjust geometry, preserve aspect ratio */
+ unsigned int new_height = int_sqrt(4 * 0x3ffff *
+ pix->height / pix->bytesperline);
+ pix->width = new_height * pix->width / pix->height;
+ pix->height = new_height;
+ pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
+ xlate->host_fmt);
+ BUG_ON(pix->bytesperline < 0);
}
}
@@ -996,7 +1000,7 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
if (mf.field == V4L2_FIELD_ANY)
mf.field = V4L2_FIELD_NONE;
if (mf.field != V4L2_FIELD_NONE) {
- dev_err(icd->dev.parent, "Field type %d unsupported.\n",
+ dev_err(icd->parent, "Field type %d unsupported.\n",
mf.field);
return -EINVAL;
}
@@ -1014,7 +1018,6 @@ static int mx2_camera_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the friendly caller:-> */
strlcpy(cap->card, MX2_CAM_DRIVER_DESCRIPTION, sizeof(cap->card));
- cap->version = MX2_CAM_VERSION_CODE;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
@@ -1523,3 +1526,4 @@ module_exit(mx2_camera_exit);
MODULE_DESCRIPTION("i.MX27/i.MX25 SoC Camera Host driver");
MODULE_AUTHOR("Sascha Hauer <sha@pengutronix.de>");
MODULE_LICENSE("GPL");
+MODULE_VERSION(MX2_CAM_VERSION);
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index c7680eb..c045b47 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
@@ -195,7 +194,7 @@ static int mx3_videobuf_setup(struct vb2_queue *vq,
unsigned long sizes[], void *alloc_ctxs[])
{
struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
icd->current_fmt->host_fmt);
@@ -224,7 +223,7 @@ static int mx3_videobuf_setup(struct vb2_queue *vq,
static int mx3_videobuf_prepare(struct vb2_buffer *vb)
{
struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
struct scatterlist *sg;
@@ -242,7 +241,7 @@ static int mx3_videobuf_prepare(struct vb2_buffer *vb)
new_size = bytes_per_line * icd->user_height;
if (vb2_plane_size(vb, 0) < new_size) {
- dev_err(icd->dev.parent, "Buffer too small (%lu < %zu)\n",
+ dev_err(icd->parent, "Buffer too small (%lu < %zu)\n",
vb2_plane_size(vb, 0), new_size);
return -ENOBUFS;
}
@@ -284,7 +283,7 @@ static enum pixel_fmt fourcc_to_ipu_pix(__u32 fourcc)
static void mx3_videobuf_queue(struct vb2_buffer *vb)
{
struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
struct mx3_camera_buffer *buf = to_mx3_vb(vb);
struct dma_async_tx_descriptor *txd = buf->txd;
@@ -337,7 +336,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
spin_unlock_irq(&mx3_cam->lock);
cookie = txd->tx_submit(txd);
- dev_dbg(icd->dev.parent, "Submitted cookie %d DMA 0x%08x\n",
+ dev_dbg(icd->parent, "Submitted cookie %d DMA 0x%08x\n",
cookie, sg_dma_address(&buf->sg));
if (cookie >= 0)
@@ -358,13 +357,13 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
static void mx3_videobuf_release(struct vb2_buffer *vb)
{
struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
struct mx3_camera_buffer *buf = to_mx3_vb(vb);
struct dma_async_tx_descriptor *txd = buf->txd;
unsigned long flags;
- dev_dbg(icd->dev.parent,
+ dev_dbg(icd->parent,
"Release%s DMA 0x%08x, queue %sempty\n",
mx3_cam->active == buf ? " active" : "", sg_dma_address(&buf->sg),
list_empty(&buf->queue) ? "" : "not ");
@@ -403,7 +402,7 @@ static int mx3_videobuf_init(struct vb2_buffer *vb)
static int mx3_stop_streaming(struct vb2_queue *q)
{
struct soc_camera_device *icd = soc_camera_from_vb2q(q);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
struct dma_chan *chan;
@@ -499,7 +498,7 @@ static void mx3_camera_activate(struct mx3_camera_dev *mx3_cam,
clk_enable(mx3_cam->clk);
rate = clk_round_rate(mx3_cam->clk, mx3_cam->mclk);
- dev_dbg(icd->dev.parent, "Set SENS_CONF to %x, rate %ld\n", conf, rate);
+ dev_dbg(icd->parent, "Set SENS_CONF to %x, rate %ld\n", conf, rate);
if (rate)
clk_set_rate(mx3_cam->clk, rate);
}
@@ -507,7 +506,7 @@ static void mx3_camera_activate(struct mx3_camera_dev *mx3_cam,
/* Called with .video_lock held */
static int mx3_camera_add_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
if (mx3_cam->icd)
@@ -517,7 +516,7 @@ static int mx3_camera_add_device(struct soc_camera_device *icd)
mx3_cam->icd = icd;
- dev_info(icd->dev.parent, "MX3 Camera driver attached to camera %d\n",
+ dev_info(icd->parent, "MX3 Camera driver attached to camera %d\n",
icd->devnum);
return 0;
@@ -526,7 +525,7 @@ static int mx3_camera_add_device(struct soc_camera_device *icd)
/* Called with .video_lock held */
static void mx3_camera_remove_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
struct idmac_channel **ichan = &mx3_cam->idmac_channel[0];
@@ -541,7 +540,7 @@ static void mx3_camera_remove_device(struct soc_camera_device *icd)
mx3_cam->icd = NULL;
- dev_info(icd->dev.parent, "MX3 Camera driver detached from camera %d\n",
+ dev_info(icd->parent, "MX3 Camera driver detached from camera %d\n",
icd->devnum);
}
@@ -608,12 +607,12 @@ static int test_platform_param(struct mx3_camera_dev *mx3_cam,
static int mx3_camera_try_bus_param(struct soc_camera_device *icd,
const unsigned int depth)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
unsigned long bus_flags, camera_flags;
int ret = test_platform_param(mx3_cam, depth, &bus_flags);
- dev_dbg(icd->dev.parent, "request bus width %d bit: %d\n", depth, ret);
+ dev_dbg(icd->parent, "request bus width %d bit: %d\n", depth, ret);
if (ret < 0)
return ret;
@@ -622,7 +621,7 @@ static int mx3_camera_try_bus_param(struct soc_camera_device *icd,
ret = soc_camera_bus_param_compatible(camera_flags, bus_flags);
if (ret < 0)
- dev_warn(icd->dev.parent,
+ dev_warn(icd->parent,
"Flags incompatible: camera %lx, host %lx\n",
camera_flags, bus_flags);
@@ -676,7 +675,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, unsigned int id
struct soc_camera_format_xlate *xlate)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
int formats = 0, ret;
enum v4l2_mbus_pixelcode code;
const struct soc_mbus_pixelfmt *fmt;
@@ -688,7 +687,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, unsigned int id
fmt = soc_mbus_get_fmtdesc(code);
if (!fmt) {
- dev_warn(icd->dev.parent,
+ dev_warn(icd->parent,
"Unsupported format code #%u: %d\n", idx, code);
return 0;
}
@@ -816,7 +815,7 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd,
struct v4l2_crop *a)
{
struct v4l2_rect *rect = &a->c;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct v4l2_mbus_framefmt mf;
@@ -849,7 +848,7 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd,
configure_geometry(mx3_cam, mf.width, mf.height,
icd->current_fmt->host_fmt);
- dev_dbg(icd->dev.parent, "Sensor cropped %dx%d\n",
+ dev_dbg(icd->parent, "Sensor cropped %dx%d\n",
mf.width, mf.height);
icd->user_width = mf.width;
@@ -861,7 +860,7 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd,
static int mx3_camera_set_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
const struct soc_camera_format_xlate *xlate;
@@ -871,13 +870,13 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
if (!xlate) {
- dev_warn(icd->dev.parent, "Format %x not found\n",
+ dev_warn(icd->parent, "Format %x not found\n",
pix->pixelformat);
return -EINVAL;
}
stride_align(&pix->width);
- dev_dbg(icd->dev.parent, "Set format %dx%d\n", pix->width, pix->height);
+ dev_dbg(icd->parent, "Set format %dx%d\n", pix->width, pix->height);
/*
* Might have to perform a complete interface initialisation like in
@@ -913,13 +912,7 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd,
pix->colorspace = mf.colorspace;
icd->current_fmt = xlate;
- pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
- xlate->host_fmt);
- if (pix->bytesperline < 0)
- return pix->bytesperline;
- pix->sizeimage = pix->height * pix->bytesperline;
-
- dev_dbg(icd->dev.parent, "Sensor set %dx%d\n", pix->width, pix->height);
+ dev_dbg(icd->parent, "Sensor set %dx%d\n", pix->width, pix->height);
return ret;
}
@@ -936,7 +929,7 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (pixfmt && !xlate) {
- dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt);
+ dev_warn(icd->parent, "Format %x not found\n", pixfmt);
return -EINVAL;
}
@@ -946,12 +939,6 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd,
if (pix->width > 4096)
pix->width = 4096;
- pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
- xlate->host_fmt);
- if (pix->bytesperline < 0)
- return pix->bytesperline;
- pix->sizeimage = pix->height * pix->bytesperline;
-
/* limit to sensor capabilities */
mf.width = pix->width;
mf.height = pix->height;
@@ -974,7 +961,7 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd,
case V4L2_FIELD_NONE:
break;
default:
- dev_err(icd->dev.parent, "Field type %d unsupported.\n",
+ dev_err(icd->parent, "Field type %d unsupported.\n",
mf.field);
ret = -EINVAL;
}
@@ -1000,7 +987,6 @@ static int mx3_camera_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the firendly caller:-> */
strlcpy(cap->card, "i.MX3x Camera", sizeof(cap->card));
- cap->version = KERNEL_VERSION(0, 2, 2);
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
@@ -1008,7 +994,7 @@ static int mx3_camera_querycap(struct soc_camera_host *ici,
static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
unsigned long bus_flags, camera_flags, common_flags;
u32 dw, sens_conf;
@@ -1016,7 +1002,7 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
int buswidth;
int ret;
const struct soc_camera_format_xlate *xlate;
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
fmt = soc_mbus_get_fmtdesc(icd->current_fmt->code);
if (!fmt)
@@ -1325,4 +1311,5 @@ module_exit(mx3_camera_exit);
MODULE_DESCRIPTION("i.MX3x SoC Camera Host driver");
MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.2.3");
MODULE_ALIAS("platform:" MX3_CAM_DRV_NAME);
diff --git a/drivers/media/video/omap/Kconfig b/drivers/media/video/omap/Kconfig
index e63233fd..390ab09 100644
--- a/drivers/media/video/omap/Kconfig
+++ b/drivers/media/video/omap/Kconfig
@@ -1,11 +1,14 @@
+config VIDEO_OMAP2_VOUT_VRFB
+ bool
+
config VIDEO_OMAP2_VOUT
tristate "OMAP2/OMAP3 V4L2-Display driver"
depends on ARCH_OMAP2 || ARCH_OMAP3
select VIDEOBUF_GEN
select VIDEOBUF_DMA_CONTIG
select OMAP2_DSS
- select OMAP2_VRAM
- select OMAP2_VRFB
+ select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
+ select VIDEO_OMAP2_VOUT_VRFB if VIDEO_OMAP2_VOUT && OMAP2_VRFB
default n
---help---
V4L2 Display driver support for OMAP2/3 based boards.
diff --git a/drivers/media/video/omap/Makefile b/drivers/media/video/omap/Makefile
index b287880..fc410b4 100644
--- a/drivers/media/video/omap/Makefile
+++ b/drivers/media/video/omap/Makefile
@@ -4,4 +4,5 @@
# OMAP2/3 Display driver
omap-vout-y := omap_vout.o omap_voutlib.o
+omap-vout-$(CONFIG_VIDEO_OMAP2_VOUT_VRFB) += omap_vout_vrfb.o
obj-$(CONFIG_VIDEO_OMAP2_VOUT) += omap-vout.o
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index a647894..b5ef362 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -35,28 +35,26 @@
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
#include <linux/irq.h>
#include <linux/videodev2.h>
-#include <linux/slab.h>
+#include <linux/dma-mapping.h>
#include <media/videobuf-dma-contig.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <plat/dma.h>
-#include <plat/vram.h>
#include <plat/vrfb.h>
#include <video/omapdss.h>
#include "omap_voutlib.h"
#include "omap_voutdef.h"
+#include "omap_vout_vrfb.h"
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("OMAP Video for Linux Video out driver");
MODULE_LICENSE("GPL");
-
/* Driver Configuration macros */
#define VOUT_NAME "omap_vout"
@@ -65,31 +63,6 @@ enum omap_vout_channels {
OMAP_VIDEO2,
};
-enum dma_channel_state {
- DMA_CHAN_NOT_ALLOTED,
- DMA_CHAN_ALLOTED,
-};
-
-#define QQVGA_WIDTH 160
-#define QQVGA_HEIGHT 120
-
-/* Max Resolution supported by the driver */
-#define VID_MAX_WIDTH 1280 /* Largest width */
-#define VID_MAX_HEIGHT 720 /* Largest height */
-
-/* Mimimum requirement is 2x2 for DSS */
-#define VID_MIN_WIDTH 2
-#define VID_MIN_HEIGHT 2
-
-/* 2048 x 2048 is max res supported by OMAP display controller */
-#define MAX_PIXELS_PER_LINE 2048
-
-#define VRFB_TX_TIMEOUT 1000
-#define VRFB_NUM_BUFS 4
-
-/* Max buffer size tobe allocated during init */
-#define OMAP_VOUT_MAX_BUF_SIZE (VID_MAX_WIDTH*VID_MAX_HEIGHT*4)
-
static struct videobuf_queue_ops video_vbq_ops;
/* Variables configurable through module params*/
static u32 video1_numbuffers = 3;
@@ -172,84 +145,6 @@ static const struct v4l2_fmtdesc omap_formats[] = {
#define NUM_OUTPUT_FORMATS (ARRAY_SIZE(omap_formats))
/*
- * Allocate buffers
- */
-static unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr)
-{
- u32 order, size;
- unsigned long virt_addr, addr;
-
- size = PAGE_ALIGN(buf_size);
- order = get_order(size);
- virt_addr = __get_free_pages(GFP_KERNEL | GFP_DMA, order);
- addr = virt_addr;
-
- if (virt_addr) {
- while (size > 0) {
- SetPageReserved(virt_to_page(addr));
- addr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- }
- *phys_addr = (u32) virt_to_phys((void *) virt_addr);
- return virt_addr;
-}
-
-/*
- * Free buffers
- */
-static void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size)
-{
- u32 order, size;
- unsigned long addr = virtaddr;
-
- size = PAGE_ALIGN(buf_size);
- order = get_order(size);
-
- while (size > 0) {
- ClearPageReserved(virt_to_page(addr));
- addr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- free_pages((unsigned long) virtaddr, order);
-}
-
-/*
- * Function for allocating video buffers
- */
-static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout,
- unsigned int *count, int startindex)
-{
- int i, j;
-
- for (i = 0; i < *count; i++) {
- if (!vout->smsshado_virt_addr[i]) {
- vout->smsshado_virt_addr[i] =
- omap_vout_alloc_buffer(vout->smsshado_size,
- &vout->smsshado_phy_addr[i]);
- }
- if (!vout->smsshado_virt_addr[i] && startindex != -1) {
- if (V4L2_MEMORY_MMAP == vout->memory && i >= startindex)
- break;
- }
- if (!vout->smsshado_virt_addr[i]) {
- for (j = 0; j < i; j++) {
- omap_vout_free_buffer(
- vout->smsshado_virt_addr[j],
- vout->smsshado_size);
- vout->smsshado_virt_addr[j] = 0;
- vout->smsshado_phy_addr[j] = 0;
- }
- *count = 0;
- return -ENOMEM;
- }
- memset((void *) vout->smsshado_virt_addr[i], 0,
- vout->smsshado_size);
- }
- return 0;
-}
-
-/*
* Try format
*/
static int omap_vout_try_format(struct v4l2_pix_format *pix)
@@ -342,73 +237,9 @@ static u32 omap_vout_uservirt_to_phys(u32 virtp)
}
/*
- * Wakes up the application once the DMA transfer to VRFB space is completed.
- */
-static void omap_vout_vrfb_dma_tx_callback(int lch, u16 ch_status, void *data)
-{
- struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data;
-
- t->tx_status = 1;
- wake_up_interruptible(&t->wait);
-}
-
-/*
- * Release the VRFB context once the module exits
- */
-static void omap_vout_release_vrfb(struct omap_vout_device *vout)
-{
- int i;
-
- for (i = 0; i < VRFB_NUM_BUFS; i++)
- omap_vrfb_release_ctx(&vout->vrfb_context[i]);
-
- if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) {
- vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
- omap_free_dma(vout->vrfb_dma_tx.dma_ch);
- }
-}
-
-/*
- * Return true if rotation is 90 or 270
- */
-static inline int rotate_90_or_270(const struct omap_vout_device *vout)
-{
- return (vout->rotation == dss_rotation_90_degree ||
- vout->rotation == dss_rotation_270_degree);
-}
-
-/*
- * Return true if rotation is enabled
- */
-static inline int rotation_enabled(const struct omap_vout_device *vout)
-{
- return vout->rotation || vout->mirror;
-}
-
-/*
- * Reverse the rotation degree if mirroring is enabled
- */
-static inline int calc_rotation(const struct omap_vout_device *vout)
-{
- if (!vout->mirror)
- return vout->rotation;
-
- switch (vout->rotation) {
- case dss_rotation_90_degree:
- return dss_rotation_270_degree;
- case dss_rotation_270_degree:
- return dss_rotation_90_degree;
- case dss_rotation_180_degree:
- return dss_rotation_0_degree;
- default:
- return dss_rotation_180_degree;
- }
-}
-
-/*
* Free the V4L2 buffers
*/
-static void omap_vout_free_buffers(struct omap_vout_device *vout)
+void omap_vout_free_buffers(struct omap_vout_device *vout)
{
int i, numbuffers;
@@ -425,52 +256,6 @@ static void omap_vout_free_buffers(struct omap_vout_device *vout)
}
/*
- * Free VRFB buffers
- */
-static void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout)
-{
- int j;
-
- for (j = 0; j < VRFB_NUM_BUFS; j++) {
- omap_vout_free_buffer(vout->smsshado_virt_addr[j],
- vout->smsshado_size);
- vout->smsshado_virt_addr[j] = 0;
- vout->smsshado_phy_addr[j] = 0;
- }
-}
-
-/*
- * Allocate the buffers for the VRFB space. Data is copied from V4L2
- * buffers to the VRFB buffers using the DMA engine.
- */
-static int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
- unsigned int *count, unsigned int startindex)
-{
- int i;
- bool yuv_mode;
-
- /* Allocate the VRFB buffers only if the buffers are not
- * allocated during init time.
- */
- if ((rotation_enabled(vout)) && !vout->vrfb_static_allocation)
- if (omap_vout_allocate_vrfb_buffers(vout, count, startindex))
- return -ENOMEM;
-
- if (vout->dss_mode == OMAP_DSS_COLOR_YUV2 ||
- vout->dss_mode == OMAP_DSS_COLOR_UYVY)
- yuv_mode = true;
- else
- yuv_mode = false;
-
- for (i = 0; i < *count; i++)
- omap_vrfb_setup(&vout->vrfb_context[i],
- vout->smsshado_phy_addr[i], vout->pix.width,
- vout->pix.height, vout->bpp, yuv_mode);
-
- return 0;
-}
-
-/*
* Convert V4L2 rotation to DSS rotation
* V4L2 understand 0, 90, 180, 270.
* Convert to 0, 1, 2 and 3 respectively for DSS
@@ -499,124 +284,38 @@ static int v4l2_rot_to_dss_rot(int v4l2_rotation,
return ret;
}
-/*
- * Calculate the buffer offsets from which the streaming should
- * start. This offset calculation is mainly required because of
- * the VRFB 32 pixels alignment with rotation.
- */
static int omap_vout_calculate_offset(struct omap_vout_device *vout)
{
- struct omap_overlay *ovl;
- enum dss_rotation rotation;
struct omapvideo_info *ovid;
- bool mirroring = vout->mirror;
- struct omap_dss_device *cur_display;
struct v4l2_rect *crop = &vout->crop;
struct v4l2_pix_format *pix = &vout->pix;
int *cropped_offset = &vout->cropped_offset;
- int vr_ps = 1, ps = 2, temp_ps = 2;
- int offset = 0, ctop = 0, cleft = 0, line_length = 0;
+ int ps = 2, line_length = 0;
ovid = &vout->vid_info;
- ovl = ovid->overlays[0];
- /* get the display device attached to the overlay */
- if (!ovl->manager || !ovl->manager->device)
- return -1;
- cur_display = ovl->manager->device;
- rotation = calc_rotation(vout);
+ if (ovid->rotation_type == VOUT_ROT_VRFB) {
+ omap_vout_calculate_vrfb_offset(vout);
+ } else {
+ vout->line_length = line_length = pix->width;
- if (V4L2_PIX_FMT_YUYV == pix->pixelformat ||
- V4L2_PIX_FMT_UYVY == pix->pixelformat) {
- if (rotation_enabled(vout)) {
- /*
- * ps - Actual pixel size for YUYV/UYVY for
- * VRFB/Mirroring is 4 bytes
- * vr_ps - Virtually pixel size for YUYV/UYVY is
- * 2 bytes
- */
+ if (V4L2_PIX_FMT_YUYV == pix->pixelformat ||
+ V4L2_PIX_FMT_UYVY == pix->pixelformat)
+ ps = 2;
+ else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat)
ps = 4;
- vr_ps = 2;
- } else {
- ps = 2; /* otherwise the pixel size is 2 byte */
- }
- } else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat) {
- ps = 4;
- } else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat) {
- ps = 3;
- }
- vout->ps = ps;
- vout->vr_ps = vr_ps;
-
- if (rotation_enabled(vout)) {
- line_length = MAX_PIXELS_PER_LINE;
- ctop = (pix->height - crop->height) - crop->top;
- cleft = (pix->width - crop->width) - crop->left;
- } else {
- line_length = pix->width;
- }
- vout->line_length = line_length;
- switch (rotation) {
- case dss_rotation_90_degree:
- offset = vout->vrfb_context[0].yoffset *
- vout->vrfb_context[0].bytespp;
- temp_ps = ps / vr_ps;
- if (mirroring == 0) {
- *cropped_offset = offset + line_length *
- temp_ps * cleft + crop->top * temp_ps;
- } else {
- *cropped_offset = offset + line_length * temp_ps *
- cleft + crop->top * temp_ps + (line_length *
- ((crop->width / (vr_ps)) - 1) * ps);
- }
- break;
- case dss_rotation_180_degree:
- offset = ((MAX_PIXELS_PER_LINE * vout->vrfb_context[0].yoffset *
- vout->vrfb_context[0].bytespp) +
- (vout->vrfb_context[0].xoffset *
- vout->vrfb_context[0].bytespp));
- if (mirroring == 0) {
- *cropped_offset = offset + (line_length * ps * ctop) +
- (cleft / vr_ps) * ps;
+ else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat)
+ ps = 3;
- } else {
- *cropped_offset = offset + (line_length * ps * ctop) +
- (cleft / vr_ps) * ps + (line_length *
- (crop->height - 1) * ps);
- }
- break;
- case dss_rotation_270_degree:
- offset = MAX_PIXELS_PER_LINE * vout->vrfb_context[0].xoffset *
- vout->vrfb_context[0].bytespp;
- temp_ps = ps / vr_ps;
- if (mirroring == 0) {
- *cropped_offset = offset + line_length *
- temp_ps * crop->left + ctop * ps;
- } else {
- *cropped_offset = offset + line_length *
- temp_ps * crop->left + ctop * ps +
- (line_length * ((crop->width / vr_ps) - 1) *
- ps);
- }
- break;
- case dss_rotation_0_degree:
- if (mirroring == 0) {
- *cropped_offset = (line_length * ps) *
- crop->top + (crop->left / vr_ps) * ps;
- } else {
- *cropped_offset = (line_length * ps) *
- crop->top + (crop->left / vr_ps) * ps +
- (line_length * (crop->height - 1) * ps);
- }
- break;
- default:
- *cropped_offset = (line_length * ps * crop->top) /
- vr_ps + (crop->left * ps) / vr_ps +
- ((crop->width / vr_ps) - 1) * ps;
- break;
+ vout->ps = ps;
+
+ *cropped_offset = (line_length * ps) *
+ crop->top + crop->left * ps;
}
+
v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "%s Offset:%x\n",
- __func__, *cropped_offset);
+ __func__, vout->cropped_offset);
+
return 0;
}
@@ -664,7 +363,7 @@ static int video_mode_to_dss_mode(struct omap_vout_device *vout)
/*
* Setup the overlay
*/
-int omapvid_setup_overlay(struct omap_vout_device *vout,
+static int omapvid_setup_overlay(struct omap_vout_device *vout,
struct omap_overlay *ovl, int posx, int posy, int outw,
int outh, u32 addr)
{
@@ -687,7 +386,7 @@ int omapvid_setup_overlay(struct omap_vout_device *vout,
/* Setup the input plane parameters according to
* rotation value selected.
*/
- if (rotate_90_or_270(vout)) {
+ if (is_rotation_90_or_270(vout)) {
cropheight = vout->crop.width;
cropwidth = vout->crop.height;
pixheight = vout->pix.width;
@@ -711,7 +410,7 @@ int omapvid_setup_overlay(struct omap_vout_device *vout,
info.out_width = outw;
info.out_height = outh;
info.global_alpha = vout->win.global_alpha;
- if (!rotation_enabled(vout)) {
+ if (!is_rotation_enabled(vout)) {
info.rotation = 0;
info.rotation_type = OMAP_DSS_ROT_DMA;
info.screen_width = pixwidth;
@@ -744,7 +443,7 @@ setup_ovl_err:
/*
* Initialize the overlay structure
*/
-int omapvid_init(struct omap_vout_device *vout, u32 addr)
+static int omapvid_init(struct omap_vout_device *vout, u32 addr)
{
int ret = 0, i;
struct v4l2_window *win;
@@ -809,7 +508,7 @@ omapvid_init_err:
/*
* Apply the changes set the go bit of DSS
*/
-int omapvid_apply_changes(struct omap_vout_device *vout)
+static int omapvid_apply_changes(struct omap_vout_device *vout)
{
int i;
struct omap_overlay *ovl;
@@ -825,7 +524,7 @@ int omapvid_apply_changes(struct omap_vout_device *vout)
return 0;
}
-void omap_vout_isr(void *arg, unsigned int irqstatus)
+static void omap_vout_isr(void *arg, unsigned int irqstatus)
{
int ret;
u32 addr, fid;
@@ -848,10 +547,20 @@ void omap_vout_isr(void *arg, unsigned int irqstatus)
spin_lock(&vout->vbq_lock);
do_gettimeofday(&timevalue);
- if (cur_display->type == OMAP_DISPLAY_TYPE_DPI) {
- if (!(irqstatus & DISPC_IRQ_VSYNC))
- goto vout_isr_err;
+ if (cur_display->type != OMAP_DISPLAY_TYPE_VENC) {
+ switch (cur_display->type) {
+ case OMAP_DISPLAY_TYPE_DPI:
+ if (!(irqstatus & (DISPC_IRQ_VSYNC | DISPC_IRQ_VSYNC2)))
+ goto vout_isr_err;
+ break;
+ case OMAP_DISPLAY_TYPE_HDMI:
+ if (!(irqstatus & DISPC_IRQ_EVSYNC_EVEN))
+ goto vout_isr_err;
+ break;
+ default:
+ goto vout_isr_err;
+ }
if (!vout->first_int && (vout->cur_frm != vout->next_frm)) {
vout->cur_frm->ts = timevalue;
vout->cur_frm->state = VIDEOBUF_DONE;
@@ -875,7 +584,7 @@ void omap_vout_isr(void *arg, unsigned int irqstatus)
ret = omapvid_init(vout, addr);
if (ret)
printk(KERN_ERR VOUT_NAME
- "failed to set overlay info\n");
+ "failed to set overlay info\n");
/* Enable the pipeline and set the Go bit */
ret = omapvid_apply_changes(vout);
if (ret)
@@ -954,6 +663,7 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
int startindex = 0, i, j;
u32 phy_addr = 0, virt_addr = 0;
struct omap_vout_device *vout = q->priv_data;
+ struct omapvideo_info *ovid = &vout->vid_info;
if (!vout)
return -EINVAL;
@@ -966,13 +676,10 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
if (V4L2_MEMORY_MMAP == vout->memory && *count < startindex)
*count = startindex;
- if ((rotation_enabled(vout)) && *count > VRFB_NUM_BUFS)
- *count = VRFB_NUM_BUFS;
-
- /* If rotation is enabled, allocate memory for VRFB space also */
- if (rotation_enabled(vout))
+ if (ovid->rotation_type == VOUT_ROT_VRFB) {
if (omap_vout_vrfb_buffer_setup(vout, count, startindex))
return -ENOMEM;
+ }
if (V4L2_MEMORY_MMAP != vout->memory)
return 0;
@@ -996,8 +703,11 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
virt_addr = omap_vout_alloc_buffer(vout->buffer_size,
&phy_addr);
if (!virt_addr) {
- if (!rotation_enabled(vout))
+ if (ovid->rotation_type == VOUT_ROT_NONE) {
break;
+ } else {
+ if (!is_rotation_enabled(vout))
+ break;
/* Free the VRFB buffers if no space for V4L2 buffers */
for (j = i; j < *count; j++) {
omap_vout_free_buffer(
@@ -1005,6 +715,7 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
vout->smsshado_size);
vout->smsshado_virt_addr[j] = 0;
vout->smsshado_phy_addr[j] = 0;
+ }
}
}
vout->buf_virt_addr[i] = virt_addr;
@@ -1017,9 +728,9 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
/*
* Free the V4L2 buffers additionally allocated than default
- * number of buffers and free all the VRFB buffers
+ * number of buffers
*/
-static void omap_vout_free_allbuffers(struct omap_vout_device *vout)
+static void omap_vout_free_extra_buffers(struct omap_vout_device *vout)
{
int num_buffers = 0, i;
@@ -1034,20 +745,6 @@ static void omap_vout_free_allbuffers(struct omap_vout_device *vout)
vout->buf_virt_addr[i] = 0;
vout->buf_phy_addr[i] = 0;
}
- /* Free the VRFB buffers only if they are allocated
- * during reqbufs. Don't free if init time allocated
- */
- if (!vout->vrfb_static_allocation) {
- for (i = 0; i < VRFB_NUM_BUFS; i++) {
- if (vout->smsshado_virt_addr[i]) {
- omap_vout_free_buffer(
- vout->smsshado_virt_addr[i],
- vout->smsshado_size);
- vout->smsshado_virt_addr[i] = 0;
- vout->smsshado_phy_addr[i] = 0;
- }
- }
- }
vout->buffer_allocated = num_buffers;
}
@@ -1059,16 +756,11 @@ static void omap_vout_free_allbuffers(struct omap_vout_device *vout)
* buffer into VRFB memory space before giving it to the DSS.
*/
static int omap_vout_buffer_prepare(struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- enum v4l2_field field)
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
{
- dma_addr_t dmabuf;
- struct vid_vrfb_dma *tx;
- enum dss_rotation rotation;
struct omap_vout_device *vout = q->priv_data;
- u32 dest_frame_index = 0, src_element_index = 0;
- u32 dest_element_index = 0, src_frame_index = 0;
- u32 elem_count = 0, frame_count = 0, pixsize = 2;
+ struct omapvideo_info *ovid = &vout->vid_info;
if (VIDEOBUF_NEEDS_INIT == vb->state) {
vb->width = vout->pix.width;
@@ -1087,66 +779,24 @@ static int omap_vout_buffer_prepare(struct videobuf_queue *q,
vout->queued_buf_addr[vb->i] = (u8 *)
omap_vout_uservirt_to_phys(vb->baddr);
} else {
- vout->queued_buf_addr[vb->i] = (u8 *)vout->buf_phy_addr[vb->i];
- }
+ u32 addr, dma_addr;
+ unsigned long size;
- if (!rotation_enabled(vout))
- return 0;
+ addr = (unsigned long) vout->buf_virt_addr[vb->i];
+ size = (unsigned long) vb->size;
- dmabuf = vout->buf_phy_addr[vb->i];
- /* If rotation is enabled, copy input buffer into VRFB
- * memory space using DMA. We are copying input buffer
- * into VRFB memory space of desired angle and DSS will
- * read image VRFB memory for 0 degree angle
- */
- pixsize = vout->bpp * vout->vrfb_bpp;
- /*
- * DMA transfer in double index mode
- */
+ dma_addr = dma_map_single(vout->vid_dev->v4l2_dev.dev, (void *) addr,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(vout->vid_dev->v4l2_dev.dev, dma_addr))
+ v4l2_err(&vout->vid_dev->v4l2_dev, "dma_map_single failed\n");
- /* Frame index */
- dest_frame_index = ((MAX_PIXELS_PER_LINE * pixsize) -
- (vout->pix.width * vout->bpp)) + 1;
-
- /* Source and destination parameters */
- src_element_index = 0;
- src_frame_index = 0;
- dest_element_index = 1;
- /* Number of elements per frame */
- elem_count = vout->pix.width * vout->bpp;
- frame_count = vout->pix.height;
- tx = &vout->vrfb_dma_tx;
- tx->tx_status = 0;
- omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32,
- (elem_count / 4), frame_count, OMAP_DMA_SYNC_ELEMENT,
- tx->dev_id, 0x0);
- /* src_port required only for OMAP1 */
- omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
- dmabuf, src_element_index, src_frame_index);
- /*set dma source burst mode for VRFB */
- omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
- rotation = calc_rotation(vout);
-
- /* dest_port required only for OMAP1 */
- omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX,
- vout->vrfb_context[vb->i].paddr[0], dest_element_index,
- dest_frame_index);
- /*set dma dest burst mode for VRFB */
- omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
- omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0);
-
- omap_start_dma(tx->dma_ch);
- interruptible_sleep_on_timeout(&tx->wait, VRFB_TX_TIMEOUT);
-
- if (tx->tx_status == 0) {
- omap_stop_dma(tx->dma_ch);
- return -EINVAL;
+ vout->queued_buf_addr[vb->i] = (u8 *)vout->buf_phy_addr[vb->i];
}
- /* Store buffers physical address into an array. Addresses
- * from this array will be used to configure DSS */
- vout->queued_buf_addr[vb->i] = (u8 *)
- vout->vrfb_context[vb->i].paddr[rotation];
- return 0;
+
+ if (ovid->rotation_type == VOUT_ROT_VRFB)
+ return omap_vout_prepare_vrfb(vout, vb);
+ else
+ return 0;
}
/*
@@ -1298,7 +948,15 @@ static int omap_vout_release(struct file *file)
"Unable to apply changes\n");
/* Free all buffers */
- omap_vout_free_allbuffers(vout);
+ omap_vout_free_extra_buffers(vout);
+
+ /* Free the VRFB buffers only if they are allocated
+ * during reqbufs. Don't free if init time allocated
+ */
+ if (ovid->rotation_type == VOUT_ROT_VRFB) {
+ if (!vout->vrfb_static_allocation)
+ omap_vout_free_vrfb_buffers(vout);
+ }
videobuf_mmap_free(q);
/* Even if apply changes fails we should continue
@@ -1307,7 +965,7 @@ static int omap_vout_release(struct file *file)
u32 mask = 0;
mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN |
- DISPC_IRQ_EVSYNC_ODD;
+ DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_VSYNC2;
omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
vout->streaming = 0;
@@ -1383,10 +1041,7 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *fh,
struct v4l2_fmtdesc *fmt)
{
int index = fmt->index;
- enum v4l2_buf_type type = fmt->type;
- fmt->index = index;
- fmt->type = type;
if (index >= NUM_OUTPUT_FORMATS)
return -EINVAL;
@@ -1457,7 +1112,7 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
/* We dont support RGB24-packed mode if vrfb rotation
* is enabled*/
- if ((rotation_enabled(vout)) &&
+ if ((is_rotation_enabled(vout)) &&
f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) {
ret = -EINVAL;
goto s_fmt_vid_out_exit;
@@ -1465,7 +1120,7 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
/* get the framebuffer parameters */
- if (rotate_90_or_270(vout)) {
+ if (is_rotation_90_or_270(vout)) {
vout->fbuf.fmt.height = timing->x_res;
vout->fbuf.fmt.width = timing->y_res;
} else {
@@ -1555,10 +1210,7 @@ static int vidioc_enum_fmt_vid_overlay(struct file *file, void *fh,
struct v4l2_fmtdesc *fmt)
{
int index = fmt->index;
- enum v4l2_buf_type type = fmt->type;
- fmt->index = index;
- fmt->type = type;
if (index >= NUM_OUTPUT_FORMATS)
return -EINVAL;
@@ -1645,7 +1297,7 @@ static int vidioc_s_crop(struct file *file, void *fh, struct v4l2_crop *crop)
/* get the display device attached to the overlay */
timing = &ovl->manager->device->panel.timings;
- if (rotate_90_or_270(vout)) {
+ if (is_rotation_90_or_270(vout)) {
vout->fbuf.fmt.height = timing->x_res;
vout->fbuf.fmt.width = timing->y_res;
} else {
@@ -1725,9 +1377,17 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
switch (a->id) {
case V4L2_CID_ROTATE:
{
+ struct omapvideo_info *ovid;
int rotation = a->value;
+ ovid = &vout->vid_info;
+
mutex_lock(&vout->lock);
+ if (rotation && ovid->rotation_type == VOUT_ROT_NONE) {
+ mutex_unlock(&vout->lock);
+ ret = -ERANGE;
+ break;
+ }
if (rotation && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
mutex_unlock(&vout->lock);
@@ -1783,6 +1443,11 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
ovl = ovid->overlays[0];
mutex_lock(&vout->lock);
+ if (mirror && ovid->rotation_type == VOUT_ROT_NONE) {
+ mutex_unlock(&vout->lock);
+ ret = -ERANGE;
+ break;
+ }
if (mirror && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
mutex_unlock(&vout->lock);
@@ -1893,7 +1558,7 @@ static int vidioc_qbuf(struct file *file, void *fh,
}
}
- if ((rotation_enabled(vout)) &&
+ if ((is_rotation_enabled(vout)) &&
vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED) {
v4l2_warn(&vout->vid_dev->v4l2_dev,
"DMA Channel not allocated for Rotation\n");
@@ -1908,15 +1573,28 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
struct omap_vout_device *vout = fh;
struct videobuf_queue *q = &vout->vbq;
+ int ret;
+ u32 addr;
+ unsigned long size;
+ struct videobuf_buffer *vb;
+
+ vb = q->bufs[b->index];
+
if (!vout->streaming)
return -EINVAL;
if (file->f_flags & O_NONBLOCK)
/* Call videobuf_dqbuf for non blocking mode */
- return videobuf_dqbuf(q, (struct v4l2_buffer *)b, 1);
+ ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 1);
else
/* Call videobuf_dqbuf for blocking mode */
- return videobuf_dqbuf(q, (struct v4l2_buffer *)b, 0);
+ ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 0);
+
+ addr = (unsigned long) vout->buf_phy_addr[vb->i];
+ size = (unsigned long) vb->size;
+ dma_unmap_single(vout->vid_dev->v4l2_dev.dev, addr,
+ size, DMA_TO_DEVICE);
+ return ret;
}
static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
@@ -1965,7 +1643,8 @@ static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
addr = (unsigned long) vout->queued_buf_addr[vout->cur_frm->i]
+ vout->cropped_offset;
- mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD;
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
+ | DISPC_IRQ_VSYNC2;
omap_dispc_register_isr(omap_vout_isr, vout, mask);
@@ -2015,7 +1694,8 @@ static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
return -EINVAL;
vout->streaming = 0;
- mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD;
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
+ | DISPC_IRQ_VSYNC2;
omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
@@ -2228,7 +1908,8 @@ static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
vout->mirror = 0;
vout->control[2].id = V4L2_CID_HFLIP;
vout->control[2].value = 0;
- vout->vrfb_bpp = 2;
+ if (vout->vid_info.rotation_type == VOUT_ROT_VRFB)
+ vout->vrfb_bpp = 2;
control[1].id = V4L2_CID_BG_COLOR;
control[1].value = 0;
@@ -2260,17 +1941,15 @@ static int __init omap_vout_setup_video_bufs(struct platform_device *pdev,
int vid_num)
{
u32 numbuffers;
- int ret = 0, i, j;
- int image_width, image_height;
- struct video_device *vfd;
+ int ret = 0, i;
+ struct omapvideo_info *ovid;
struct omap_vout_device *vout;
- int static_vrfb_allocation = 0, vrfb_num_bufs = VRFB_NUM_BUFS;
struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
struct omap2video_device *vid_dev =
container_of(v4l2_dev, struct omap2video_device, v4l2_dev);
vout = vid_dev->vouts[vid_num];
- vfd = vout->vfd;
+ ovid = &vout->vid_info;
numbuffers = (vid_num == 0) ? video1_numbuffers : video2_numbuffers;
vout->buffer_size = (vid_num == 0) ? video1_bufsize : video2_bufsize;
@@ -2287,66 +1966,16 @@ static int __init omap_vout_setup_video_bufs(struct platform_device *pdev,
}
}
- for (i = 0; i < VRFB_NUM_BUFS; i++) {
- if (omap_vrfb_request_ctx(&vout->vrfb_context[i])) {
- dev_info(&pdev->dev, ": VRFB allocation failed\n");
- for (j = 0; j < i; j++)
- omap_vrfb_release_ctx(&vout->vrfb_context[j]);
- ret = -ENOMEM;
- goto free_buffers;
- }
- }
vout->cropped_offset = 0;
- /* Calculate VRFB memory size */
- /* allocate for worst case size */
- image_width = VID_MAX_WIDTH / TILE_SIZE;
- if (VID_MAX_WIDTH % TILE_SIZE)
- image_width++;
-
- image_width = image_width * TILE_SIZE;
- image_height = VID_MAX_HEIGHT / TILE_SIZE;
-
- if (VID_MAX_HEIGHT % TILE_SIZE)
- image_height++;
-
- image_height = image_height * TILE_SIZE;
- vout->smsshado_size = PAGE_ALIGN(image_width * image_height * 2 * 2);
-
- /*
- * Request and Initialize DMA, for DMA based VRFB transfer
- */
- vout->vrfb_dma_tx.dev_id = OMAP_DMA_NO_DEVICE;
- vout->vrfb_dma_tx.dma_ch = -1;
- vout->vrfb_dma_tx.req_status = DMA_CHAN_ALLOTED;
- ret = omap_request_dma(vout->vrfb_dma_tx.dev_id, "VRFB DMA TX",
- omap_vout_vrfb_dma_tx_callback,
- (void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch);
- if (ret < 0) {
- vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
- dev_info(&pdev->dev, ": failed to allocate DMA Channel for"
- " video%d\n", vfd->minor);
- }
- init_waitqueue_head(&vout->vrfb_dma_tx.wait);
-
- /* Allocate VRFB buffers if selected through bootargs */
- static_vrfb_allocation = (vid_num == 0) ?
- vid1_static_vrfb_alloc : vid2_static_vrfb_alloc;
-
- /* statically allocated the VRFB buffer is done through
- commands line aruments */
- if (static_vrfb_allocation) {
- if (omap_vout_allocate_vrfb_buffers(vout, &vrfb_num_bufs, -1)) {
- ret = -ENOMEM;
- goto release_vrfb_ctx;
- }
- vout->vrfb_static_allocation = 1;
+ if (ovid->rotation_type == VOUT_ROT_VRFB) {
+ int static_vrfb_allocation = (vid_num == 0) ?
+ vid1_static_vrfb_alloc : vid2_static_vrfb_alloc;
+ ret = omap_vout_setup_vrfb_bufs(pdev, vid_num,
+ static_vrfb_allocation);
}
- return 0;
-release_vrfb_ctx:
- for (j = 0; j < VRFB_NUM_BUFS; j++)
- omap_vrfb_release_ctx(&vout->vrfb_context[j]);
+ return ret;
free_buffers:
for (i = 0; i < numbuffers; i++) {
@@ -2389,6 +2018,10 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
vout->vid_info.num_overlays = 1;
vout->vid_info.id = k + 1;
+ /* Set VRFB as rotation_type for omap2 and omap3 */
+ if (cpu_is_omap24xx() || cpu_is_omap34xx())
+ vout->vid_info.rotation_type = VOUT_ROT_VRFB;
+
/* Setup the default configuration for the video devices
*/
if (omap_vout_setup_video_data(vout) != 0) {
@@ -2422,7 +2055,8 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
goto success;
error2:
- omap_vout_release_vrfb(vout);
+ if (vout->vid_info.rotation_type == VOUT_ROT_VRFB)
+ omap_vout_release_vrfb(vout);
omap_vout_free_buffers(vout);
error1:
video_device_release(vfd);
@@ -2443,11 +2077,13 @@ success:
static void omap_vout_cleanup_device(struct omap_vout_device *vout)
{
struct video_device *vfd;
+ struct omapvideo_info *ovid;
if (!vout)
return;
vfd = vout->vfd;
+ ovid = &vout->vid_info;
if (vfd) {
if (!video_is_registered(vfd)) {
/*
@@ -2463,14 +2099,15 @@ static void omap_vout_cleanup_device(struct omap_vout_device *vout)
video_unregister_device(vfd);
}
}
-
- omap_vout_release_vrfb(vout);
+ if (ovid->rotation_type == VOUT_ROT_VRFB) {
+ omap_vout_release_vrfb(vout);
+ /* Free the VRFB buffer if allocated
+ * init time
+ */
+ if (vout->vrfb_static_allocation)
+ omap_vout_free_vrfb_buffers(vout);
+ }
omap_vout_free_buffers(vout);
- /* Free the VRFB buffer if allocated
- * init time
- */
- if (vout->vrfb_static_allocation)
- omap_vout_free_vrfb_buffers(vout);
kfree(vout);
}
diff --git a/drivers/media/video/omap/omap_vout_vrfb.c b/drivers/media/video/omap/omap_vout_vrfb.c
new file mode 100644
index 0000000..ebebcac4
--- /dev/null
+++ b/drivers/media/video/omap/omap_vout_vrfb.c
@@ -0,0 +1,390 @@
+/*
+ * omap_vout_vrfb.c
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+
+#include <media/videobuf-dma-contig.h>
+#include <media/v4l2-device.h>
+
+#include <plat/dma.h>
+#include <plat/vrfb.h>
+
+#include "omap_voutdef.h"
+#include "omap_voutlib.h"
+
+/*
+ * Function for allocating video buffers
+ */
+static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout,
+ unsigned int *count, int startindex)
+{
+ int i, j;
+
+ for (i = 0; i < *count; i++) {
+ if (!vout->smsshado_virt_addr[i]) {
+ vout->smsshado_virt_addr[i] =
+ omap_vout_alloc_buffer(vout->smsshado_size,
+ &vout->smsshado_phy_addr[i]);
+ }
+ if (!vout->smsshado_virt_addr[i] && startindex != -1) {
+ if (V4L2_MEMORY_MMAP == vout->memory && i >= startindex)
+ break;
+ }
+ if (!vout->smsshado_virt_addr[i]) {
+ for (j = 0; j < i; j++) {
+ omap_vout_free_buffer(
+ vout->smsshado_virt_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+ *count = 0;
+ return -ENOMEM;
+ }
+ memset((void *) vout->smsshado_virt_addr[i], 0,
+ vout->smsshado_size);
+ }
+ return 0;
+}
+
+/*
+ * Wakes up the application once the DMA transfer to VRFB space is completed.
+ */
+static void omap_vout_vrfb_dma_tx_callback(int lch, u16 ch_status, void *data)
+{
+ struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data;
+
+ t->tx_status = 1;
+ wake_up_interruptible(&t->wait);
+}
+
+/*
+ * Free VRFB buffers
+ */
+void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout)
+{
+ int j;
+
+ for (j = 0; j < VRFB_NUM_BUFS; j++) {
+ omap_vout_free_buffer(vout->smsshado_virt_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+}
+
+int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
+ u32 static_vrfb_allocation)
+{
+ int ret = 0, i, j;
+ struct omap_vout_device *vout;
+ struct video_device *vfd;
+ int image_width, image_height;
+ int vrfb_num_bufs = VRFB_NUM_BUFS;
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev =
+ container_of(v4l2_dev, struct omap2video_device, v4l2_dev);
+
+ vout = vid_dev->vouts[vid_num];
+ vfd = vout->vfd;
+
+ for (i = 0; i < VRFB_NUM_BUFS; i++) {
+ if (omap_vrfb_request_ctx(&vout->vrfb_context[i])) {
+ dev_info(&pdev->dev, ": VRFB allocation failed\n");
+ for (j = 0; j < i; j++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[j]);
+ ret = -ENOMEM;
+ goto free_buffers;
+ }
+ }
+
+ /* Calculate VRFB memory size */
+ /* allocate for worst case size */
+ image_width = VID_MAX_WIDTH / TILE_SIZE;
+ if (VID_MAX_WIDTH % TILE_SIZE)
+ image_width++;
+
+ image_width = image_width * TILE_SIZE;
+ image_height = VID_MAX_HEIGHT / TILE_SIZE;
+
+ if (VID_MAX_HEIGHT % TILE_SIZE)
+ image_height++;
+
+ image_height = image_height * TILE_SIZE;
+ vout->smsshado_size = PAGE_ALIGN(image_width * image_height * 2 * 2);
+
+ /*
+ * Request and Initialize DMA, for DMA based VRFB transfer
+ */
+ vout->vrfb_dma_tx.dev_id = OMAP_DMA_NO_DEVICE;
+ vout->vrfb_dma_tx.dma_ch = -1;
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_ALLOTED;
+ ret = omap_request_dma(vout->vrfb_dma_tx.dev_id, "VRFB DMA TX",
+ omap_vout_vrfb_dma_tx_callback,
+ (void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch);
+ if (ret < 0) {
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ dev_info(&pdev->dev, ": failed to allocate DMA Channel for"
+ " video%d\n", vfd->minor);
+ }
+ init_waitqueue_head(&vout->vrfb_dma_tx.wait);
+
+ /* statically allocated the VRFB buffer is done through
+ commands line aruments */
+ if (static_vrfb_allocation) {
+ if (omap_vout_allocate_vrfb_buffers(vout, &vrfb_num_bufs, -1)) {
+ ret = -ENOMEM;
+ goto release_vrfb_ctx;
+ }
+ vout->vrfb_static_allocation = 1;
+ }
+ return 0;
+
+release_vrfb_ctx:
+ for (j = 0; j < VRFB_NUM_BUFS; j++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[j]);
+free_buffers:
+ omap_vout_free_buffers(vout);
+
+ return ret;
+}
+
+/*
+ * Release the VRFB context once the module exits
+ */
+void omap_vout_release_vrfb(struct omap_vout_device *vout)
+{
+ int i;
+
+ for (i = 0; i < VRFB_NUM_BUFS; i++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[i]);
+
+ if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) {
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ omap_free_dma(vout->vrfb_dma_tx.dma_ch);
+ }
+}
+
+/*
+ * Allocate the buffers for the VRFB space. Data is copied from V4L2
+ * buffers to the VRFB buffers using the DMA engine.
+ */
+int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
+ unsigned int *count, unsigned int startindex)
+{
+ int i;
+ bool yuv_mode;
+
+ if (!is_rotation_enabled(vout))
+ return 0;
+
+ /* If rotation is enabled, allocate memory for VRFB space also */
+ *count = *count > VRFB_NUM_BUFS ? VRFB_NUM_BUFS : *count;
+
+ /* Allocate the VRFB buffers only if the buffers are not
+ * allocated during init time.
+ */
+ if (!vout->vrfb_static_allocation)
+ if (omap_vout_allocate_vrfb_buffers(vout, count, startindex))
+ return -ENOMEM;
+
+ if (vout->dss_mode == OMAP_DSS_COLOR_YUV2 ||
+ vout->dss_mode == OMAP_DSS_COLOR_UYVY)
+ yuv_mode = true;
+ else
+ yuv_mode = false;
+
+ for (i = 0; i < *count; i++)
+ omap_vrfb_setup(&vout->vrfb_context[i],
+ vout->smsshado_phy_addr[i], vout->pix.width,
+ vout->pix.height, vout->bpp, yuv_mode);
+
+ return 0;
+}
+
+int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
+ struct videobuf_buffer *vb)
+{
+ dma_addr_t dmabuf;
+ struct vid_vrfb_dma *tx;
+ enum dss_rotation rotation;
+ u32 dest_frame_index = 0, src_element_index = 0;
+ u32 dest_element_index = 0, src_frame_index = 0;
+ u32 elem_count = 0, frame_count = 0, pixsize = 2;
+
+ if (!is_rotation_enabled(vout))
+ return 0;
+
+ dmabuf = vout->buf_phy_addr[vb->i];
+ /* If rotation is enabled, copy input buffer into VRFB
+ * memory space using DMA. We are copying input buffer
+ * into VRFB memory space of desired angle and DSS will
+ * read image VRFB memory for 0 degree angle
+ */
+ pixsize = vout->bpp * vout->vrfb_bpp;
+ /*
+ * DMA transfer in double index mode
+ */
+
+ /* Frame index */
+ dest_frame_index = ((MAX_PIXELS_PER_LINE * pixsize) -
+ (vout->pix.width * vout->bpp)) + 1;
+
+ /* Source and destination parameters */
+ src_element_index = 0;
+ src_frame_index = 0;
+ dest_element_index = 1;
+ /* Number of elements per frame */
+ elem_count = vout->pix.width * vout->bpp;
+ frame_count = vout->pix.height;
+ tx = &vout->vrfb_dma_tx;
+ tx->tx_status = 0;
+ omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32,
+ (elem_count / 4), frame_count, OMAP_DMA_SYNC_ELEMENT,
+ tx->dev_id, 0x0);
+ /* src_port required only for OMAP1 */
+ omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
+ dmabuf, src_element_index, src_frame_index);
+ /*set dma source burst mode for VRFB */
+ omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
+ rotation = calc_rotation(vout);
+
+ /* dest_port required only for OMAP1 */
+ omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX,
+ vout->vrfb_context[vb->i].paddr[0], dest_element_index,
+ dest_frame_index);
+ /*set dma dest burst mode for VRFB */
+ omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
+ omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0);
+
+ omap_start_dma(tx->dma_ch);
+ interruptible_sleep_on_timeout(&tx->wait, VRFB_TX_TIMEOUT);
+
+ if (tx->tx_status == 0) {
+ omap_stop_dma(tx->dma_ch);
+ return -EINVAL;
+ }
+ /* Store buffers physical address into an array. Addresses
+ * from this array will be used to configure DSS */
+ vout->queued_buf_addr[vb->i] = (u8 *)
+ vout->vrfb_context[vb->i].paddr[rotation];
+ return 0;
+}
+
+/*
+ * Calculate the buffer offsets from which the streaming should
+ * start. This offset calculation is mainly required because of
+ * the VRFB 32 pixels alignment with rotation.
+ */
+void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout)
+{
+ enum dss_rotation rotation;
+ bool mirroring = vout->mirror;
+ struct v4l2_rect *crop = &vout->crop;
+ struct v4l2_pix_format *pix = &vout->pix;
+ int *cropped_offset = &vout->cropped_offset;
+ int vr_ps = 1, ps = 2, temp_ps = 2;
+ int offset = 0, ctop = 0, cleft = 0, line_length = 0;
+
+ rotation = calc_rotation(vout);
+
+ if (V4L2_PIX_FMT_YUYV == pix->pixelformat ||
+ V4L2_PIX_FMT_UYVY == pix->pixelformat) {
+ if (is_rotation_enabled(vout)) {
+ /*
+ * ps - Actual pixel size for YUYV/UYVY for
+ * VRFB/Mirroring is 4 bytes
+ * vr_ps - Virtually pixel size for YUYV/UYVY is
+ * 2 bytes
+ */
+ ps = 4;
+ vr_ps = 2;
+ } else {
+ ps = 2; /* otherwise the pixel size is 2 byte */
+ }
+ } else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat) {
+ ps = 4;
+ } else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat) {
+ ps = 3;
+ }
+ vout->ps = ps;
+ vout->vr_ps = vr_ps;
+
+ if (is_rotation_enabled(vout)) {
+ line_length = MAX_PIXELS_PER_LINE;
+ ctop = (pix->height - crop->height) - crop->top;
+ cleft = (pix->width - crop->width) - crop->left;
+ } else {
+ line_length = pix->width;
+ }
+ vout->line_length = line_length;
+ switch (rotation) {
+ case dss_rotation_90_degree:
+ offset = vout->vrfb_context[0].yoffset *
+ vout->vrfb_context[0].bytespp;
+ temp_ps = ps / vr_ps;
+ if (mirroring == 0) {
+ *cropped_offset = offset + line_length *
+ temp_ps * cleft + crop->top * temp_ps;
+ } else {
+ *cropped_offset = offset + line_length * temp_ps *
+ cleft + crop->top * temp_ps + (line_length *
+ ((crop->width / (vr_ps)) - 1) * ps);
+ }
+ break;
+ case dss_rotation_180_degree:
+ offset = ((MAX_PIXELS_PER_LINE * vout->vrfb_context[0].yoffset *
+ vout->vrfb_context[0].bytespp) +
+ (vout->vrfb_context[0].xoffset *
+ vout->vrfb_context[0].bytespp));
+ if (mirroring == 0) {
+ *cropped_offset = offset + (line_length * ps * ctop) +
+ (cleft / vr_ps) * ps;
+
+ } else {
+ *cropped_offset = offset + (line_length * ps * ctop) +
+ (cleft / vr_ps) * ps + (line_length *
+ (crop->height - 1) * ps);
+ }
+ break;
+ case dss_rotation_270_degree:
+ offset = MAX_PIXELS_PER_LINE * vout->vrfb_context[0].xoffset *
+ vout->vrfb_context[0].bytespp;
+ temp_ps = ps / vr_ps;
+ if (mirroring == 0) {
+ *cropped_offset = offset + line_length *
+ temp_ps * crop->left + ctop * ps;
+ } else {
+ *cropped_offset = offset + line_length *
+ temp_ps * crop->left + ctop * ps +
+ (line_length * ((crop->width / vr_ps) - 1) *
+ ps);
+ }
+ break;
+ case dss_rotation_0_degree:
+ if (mirroring == 0) {
+ *cropped_offset = (line_length * ps) *
+ crop->top + (crop->left / vr_ps) * ps;
+ } else {
+ *cropped_offset = (line_length * ps) *
+ crop->top + (crop->left / vr_ps) * ps +
+ (line_length * (crop->height - 1) * ps);
+ }
+ break;
+ default:
+ *cropped_offset = (line_length * ps * crop->top) /
+ vr_ps + (crop->left * ps) / vr_ps +
+ ((crop->width / vr_ps) - 1) * ps;
+ break;
+ }
+}
diff --git a/drivers/media/video/omap/omap_vout_vrfb.h b/drivers/media/video/omap/omap_vout_vrfb.h
new file mode 100644
index 0000000..ffde741
--- /dev/null
+++ b/drivers/media/video/omap/omap_vout_vrfb.h
@@ -0,0 +1,40 @@
+/*
+ * omap_vout_vrfb.h
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#ifndef OMAP_VOUT_VRFB_H
+#define OMAP_VOUT_VRFB_H
+
+#ifdef CONFIG_VIDEO_OMAP2_VOUT_VRFB
+void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout);
+int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
+ u32 static_vrfb_allocation);
+void omap_vout_release_vrfb(struct omap_vout_device *vout);
+int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
+ unsigned int *count, unsigned int startindex);
+int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
+ struct videobuf_buffer *vb);
+void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout);
+#else
+void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout) { }
+int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
+ u32 static_vrfb_allocation)
+ { return 0; }
+void omap_vout_release_vrfb(struct omap_vout_device *vout) { }
+int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
+ unsigned int *count, unsigned int startindex)
+ { return 0; }
+int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
+ struct videobuf_buffer *vb)
+ { return 0; }
+void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout) { }
+#endif
+
+#endif
diff --git a/drivers/media/video/omap/omap_voutdef.h b/drivers/media/video/omap/omap_voutdef.h
index 659497b..d793501 100644
--- a/drivers/media/video/omap/omap_voutdef.h
+++ b/drivers/media/video/omap/omap_voutdef.h
@@ -12,6 +12,7 @@
#define OMAP_VOUTDEF_H
#include <video/omapdss.h>
+#include <plat/vrfb.h>
#define YUYV_BPP 2
#define RGB565_BPP 2
@@ -27,6 +28,31 @@
#define MAX_DISPLAYS 3
#define MAX_MANAGERS 3
+#define QQVGA_WIDTH 160
+#define QQVGA_HEIGHT 120
+
+/* Max Resolution supported by the driver */
+#define VID_MAX_WIDTH 1280 /* Largest width */
+#define VID_MAX_HEIGHT 720 /* Largest height */
+
+/* Mimimum requirement is 2x2 for DSS */
+#define VID_MIN_WIDTH 2
+#define VID_MIN_HEIGHT 2
+
+/* 2048 x 2048 is max res supported by OMAP display controller */
+#define MAX_PIXELS_PER_LINE 2048
+
+#define VRFB_TX_TIMEOUT 1000
+#define VRFB_NUM_BUFS 4
+
+/* Max buffer size tobe allocated during init */
+#define OMAP_VOUT_MAX_BUF_SIZE (VID_MAX_WIDTH*VID_MAX_HEIGHT*4)
+
+enum dma_channel_state {
+ DMA_CHAN_NOT_ALLOTED,
+ DMA_CHAN_ALLOTED,
+};
+
/* Enum for Rotation
* DSS understands rotation in 0, 1, 2, 3 context
* while V4L2 driver understands it as 0, 90, 180, 270
@@ -37,6 +63,18 @@ enum dss_rotation {
dss_rotation_180_degree = 2,
dss_rotation_270_degree = 3,
};
+
+/* Enum for choosing rotation type for vout
+ * DSS2 doesn't understand no rotation as an
+ * option while V4L2 driver doesn't support
+ * rotation in the case where VRFB is not built in
+ * the kernel
+ */
+enum vout_rotaion_type {
+ VOUT_ROT_NONE = 0,
+ VOUT_ROT_VRFB = 1,
+};
+
/*
* This structure is used to store the DMA transfer parameters
* for VRFB hidden buffer
@@ -53,6 +91,7 @@ struct omapvideo_info {
int id;
int num_overlays;
struct omap_overlay *overlays[MAX_OVLS];
+ enum vout_rotaion_type rotation_type;
};
struct omap2video_device {
@@ -144,4 +183,43 @@ struct omap_vout_device {
int io_allowed;
};
+
+/*
+ * Return true if rotation is 90 or 270
+ */
+static inline int is_rotation_90_or_270(const struct omap_vout_device *vout)
+{
+ return (vout->rotation == dss_rotation_90_degree ||
+ vout->rotation == dss_rotation_270_degree);
+}
+
+/*
+ * Return true if rotation is enabled
+ */
+static inline int is_rotation_enabled(const struct omap_vout_device *vout)
+{
+ return vout->rotation || vout->mirror;
+}
+
+/*
+ * Reverse the rotation degree if mirroring is enabled
+ */
+static inline int calc_rotation(const struct omap_vout_device *vout)
+{
+ if (!vout->mirror)
+ return vout->rotation;
+
+ switch (vout->rotation) {
+ case dss_rotation_90_degree:
+ return dss_rotation_270_degree;
+ case dss_rotation_270_degree:
+ return dss_rotation_90_degree;
+ case dss_rotation_180_degree:
+ return dss_rotation_0_degree;
+ default:
+ return dss_rotation_180_degree;
+ }
+}
+
+void omap_vout_free_buffers(struct omap_vout_device *vout);
#endif /* ifndef OMAP_VOUTDEF_H */
diff --git a/drivers/media/video/omap/omap_voutlib.c b/drivers/media/video/omap/omap_voutlib.c
index 8ae7481..115408b 100644
--- a/drivers/media/video/omap/omap_voutlib.c
+++ b/drivers/media/video/omap/omap_voutlib.c
@@ -24,8 +24,12 @@
#include <linux/types.h>
#include <linux/videodev2.h>
+#include <linux/dma-mapping.h>
+
#include <plat/cpu.h>
+#include "omap_voutlib.h"
+
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("OMAP Video library");
MODULE_LICENSE("GPL");
@@ -291,3 +295,45 @@ void omap_vout_new_format(struct v4l2_pix_format *pix,
}
EXPORT_SYMBOL_GPL(omap_vout_new_format);
+/*
+ * Allocate buffers
+ */
+unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr)
+{
+ u32 order, size;
+ unsigned long virt_addr, addr;
+
+ size = PAGE_ALIGN(buf_size);
+ order = get_order(size);
+ virt_addr = __get_free_pages(GFP_KERNEL, order);
+ addr = virt_addr;
+
+ if (virt_addr) {
+ while (size > 0) {
+ SetPageReserved(virt_to_page(addr));
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ }
+ *phys_addr = (u32) virt_to_phys((void *) virt_addr);
+ return virt_addr;
+}
+
+/*
+ * Free buffers
+ */
+void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size)
+{
+ u32 order, size;
+ unsigned long addr = virtaddr;
+
+ size = PAGE_ALIGN(buf_size);
+ order = get_order(size);
+
+ while (size > 0) {
+ ClearPageReserved(virt_to_page(addr));
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ free_pages((unsigned long) virtaddr, order);
+}
diff --git a/drivers/media/video/omap/omap_voutlib.h b/drivers/media/video/omap/omap_voutlib.h
index a60b16e..e51750a 100644
--- a/drivers/media/video/omap/omap_voutlib.h
+++ b/drivers/media/video/omap/omap_voutlib.h
@@ -12,23 +12,25 @@
#ifndef OMAP_VOUTLIB_H
#define OMAP_VOUTLIB_H
-extern void omap_vout_default_crop(struct v4l2_pix_format *pix,
+void omap_vout_default_crop(struct v4l2_pix_format *pix,
struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop);
-extern int omap_vout_new_crop(struct v4l2_pix_format *pix,
+int omap_vout_new_crop(struct v4l2_pix_format *pix,
struct v4l2_rect *crop, struct v4l2_window *win,
struct v4l2_framebuffer *fbuf,
const struct v4l2_rect *new_crop);
-extern int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
+int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
struct v4l2_window *new_win);
-extern int omap_vout_new_window(struct v4l2_rect *crop,
+int omap_vout_new_window(struct v4l2_rect *crop,
struct v4l2_window *win, struct v4l2_framebuffer *fbuf,
struct v4l2_window *new_win);
-extern void omap_vout_new_format(struct v4l2_pix_format *pix,
+void omap_vout_new_format(struct v4l2_pix_format *pix,
struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop,
struct v4l2_window *win);
+unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr);
+void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size);
#endif /* #ifndef OMAP_VOUTLIB_H */
diff --git a/drivers/media/video/omap1_camera.c b/drivers/media/video/omap1_camera.c
index e7cfc85..8a947e6 100644
--- a/drivers/media/video/omap1_camera.c
+++ b/drivers/media/video/omap1_camera.c
@@ -26,7 +26,6 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <media/omap1_camera.h>
#include <media/soc_camera.h>
@@ -38,7 +37,7 @@
#define DRIVER_NAME "omap1-camera"
-#define VERSION_CODE KERNEL_VERSION(0, 0, 1)
+#define DRIVER_VERSION "0.0.2"
/*
@@ -208,7 +207,7 @@ static int omap1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
struct soc_camera_device *icd = vq->priv_data;
int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
icd->current_fmt->host_fmt);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct omap1_cam_dev *pcdev = ici->priv;
if (bytes_per_line < 0)
@@ -222,7 +221,7 @@ static int omap1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
if (*size * *count > MAX_VIDEO_MEM * 1024 * 1024)
*count = (MAX_VIDEO_MEM * 1024 * 1024) / *size;
- dev_dbg(icd->dev.parent,
+ dev_dbg(icd->parent,
"%s: count=%d, size=%d\n", __func__, *count, *size);
return 0;
@@ -241,7 +240,7 @@ static void free_buffer(struct videobuf_queue *vq, struct omap1_cam_buf *buf,
videobuf_dma_contig_free(vq, vb);
} else {
struct soc_camera_device *icd = vq->priv_data;
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
videobuf_dma_unmap(dev, dma);
@@ -258,7 +257,7 @@ static int omap1_videobuf_prepare(struct videobuf_queue *vq,
struct omap1_cam_buf *buf = container_of(vb, struct omap1_cam_buf, vb);
int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
icd->current_fmt->host_fmt);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct omap1_cam_dev *pcdev = ici->priv;
int ret;
@@ -490,7 +489,7 @@ static void omap1_videobuf_queue(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct soc_camera_device *icd = vq->priv_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct omap1_cam_dev *pcdev = ici->priv;
struct omap1_cam_buf *buf;
u32 mode;
@@ -519,7 +518,7 @@ static void omap1_videobuf_queue(struct videobuf_queue *vq,
pcdev->active = buf;
pcdev->ready = NULL;
- dev_dbg(icd->dev.parent,
+ dev_dbg(icd->parent,
"%s: capture not active, setup FIFO, start DMA\n", __func__);
mode = CAM_READ_CACHE(pcdev, MODE) & ~THRESHOLD_MASK;
mode |= THRESHOLD_LEVEL(pcdev->vb_mode) << THRESHOLD_SHIFT;
@@ -543,8 +542,8 @@ static void omap1_videobuf_release(struct videobuf_queue *vq,
struct omap1_cam_buf *buf =
container_of(vb, struct omap1_cam_buf, vb);
struct soc_camera_device *icd = vq->priv_data;
- struct device *dev = icd->dev.parent;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
struct omap1_cam_dev *pcdev = ici->priv;
switch (vb->state) {
@@ -573,7 +572,7 @@ static void videobuf_done(struct omap1_cam_dev *pcdev,
{
struct omap1_cam_buf *buf = pcdev->active;
struct videobuf_buffer *vb;
- struct device *dev = pcdev->icd->dev.parent;
+ struct device *dev = pcdev->icd->parent;
if (WARN_ON(!buf)) {
suspend_capture(pcdev);
@@ -799,7 +798,7 @@ out:
static irqreturn_t cam_isr(int irq, void *data)
{
struct omap1_cam_dev *pcdev = data;
- struct device *dev = pcdev->icd->dev.parent;
+ struct device *dev = pcdev->icd->parent;
struct omap1_cam_buf *buf = pcdev->active;
u32 it_status;
unsigned long flags;
@@ -909,7 +908,7 @@ static void sensor_reset(struct omap1_cam_dev *pcdev, bool reset)
*/
static int omap1_cam_add_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct omap1_cam_dev *pcdev = ici->priv;
u32 ctrlclock;
@@ -952,14 +951,14 @@ static int omap1_cam_add_device(struct soc_camera_device *icd)
pcdev->icd = icd;
- dev_dbg(icd->dev.parent, "OMAP1 Camera driver attached to camera %d\n",
+ dev_dbg(icd->parent, "OMAP1 Camera driver attached to camera %d\n",
icd->devnum);
return 0;
}
static void omap1_cam_remove_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct omap1_cam_dev *pcdev = ici->priv;
u32 ctrlclock;
@@ -985,7 +984,7 @@ static void omap1_cam_remove_device(struct soc_camera_device *icd)
pcdev->icd = NULL;
- dev_dbg(icd->dev.parent,
+ dev_dbg(icd->parent,
"OMAP1 Camera driver detached from camera %d\n", icd->devnum);
}
@@ -1070,7 +1069,7 @@ static int omap1_cam_get_formats(struct soc_camera_device *icd,
unsigned int idx, struct soc_camera_format_xlate *xlate)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
int formats = 0, ret;
enum v4l2_mbus_pixelcode code;
const struct soc_mbus_pixelfmt *fmt;
@@ -1222,9 +1221,9 @@ static int omap1_cam_set_crop(struct soc_camera_device *icd,
struct v4l2_rect *rect = &crop->c;
const struct soc_camera_format_xlate *xlate = icd->current_fmt;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
struct omap1_cam_dev *pcdev = ici->priv;
- struct device *dev = icd->dev.parent;
struct v4l2_mbus_framefmt mf;
int ret;
@@ -1270,8 +1269,8 @@ static int omap1_cam_set_fmt(struct soc_camera_device *icd,
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
const struct soc_camera_format_xlate *xlate;
- struct device *dev = icd->dev.parent;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
struct omap1_cam_dev *pcdev = ici->priv;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_mbus_framefmt mf;
@@ -1326,7 +1325,7 @@ static int omap1_cam_try_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
if (!xlate) {
- dev_warn(icd->dev.parent, "Format %#x not found\n",
+ dev_warn(icd->parent, "Format %#x not found\n",
pix->pixelformat);
return -EINVAL;
}
@@ -1362,7 +1361,7 @@ static int omap1_cam_mmap_mapper(struct videobuf_queue *q,
struct vm_area_struct *vma)
{
struct soc_camera_device *icd = q->priv_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct omap1_cam_dev *pcdev = ici->priv;
int ret;
@@ -1377,17 +1376,17 @@ static int omap1_cam_mmap_mapper(struct videobuf_queue *q,
static void omap1_cam_init_videobuf(struct videobuf_queue *q,
struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct omap1_cam_dev *pcdev = ici->priv;
if (!sg_mode)
videobuf_queue_dma_contig_init(q, &omap1_videobuf_ops,
- icd->dev.parent, &pcdev->lock,
+ icd->parent, &pcdev->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
sizeof(struct omap1_cam_buf), icd, &icd->video_lock);
else
videobuf_queue_sg_init(q, &omap1_videobuf_ops,
- icd->dev.parent, &pcdev->lock,
+ icd->parent, &pcdev->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
sizeof(struct omap1_cam_buf), icd, &icd->video_lock);
@@ -1431,7 +1430,6 @@ static int omap1_cam_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the friendly caller:-> */
strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card));
- cap->version = VERSION_CODE;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
@@ -1440,9 +1438,9 @@ static int omap1_cam_querycap(struct soc_camera_host *ici,
static int omap1_cam_set_bus_param(struct soc_camera_device *icd,
__u32 pixfmt)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
struct omap1_cam_dev *pcdev = ici->priv;
- struct device *dev = icd->dev.parent;
const struct soc_camera_format_xlate *xlate;
const struct soc_mbus_pixelfmt *fmt;
unsigned long camera_flags, common_flags;
@@ -1718,4 +1716,5 @@ MODULE_PARM_DESC(sg_mode, "videobuf mode, 0: dma-contig (default), 1: dma-sg");
MODULE_DESCRIPTION("OMAP1 Camera Interface driver");
MODULE_AUTHOR("Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>");
MODULE_LICENSE("GPL v2");
+MODULE_LICENSE(DRIVER_VERSION);
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
index 69b60ba..eb97bff 100644
--- a/drivers/media/video/omap24xxcam.c
+++ b/drivers/media/video/omap24xxcam.c
@@ -31,7 +31,6 @@
#include <linux/interrupt.h>
#include <linux/videodev2.h>
#include <linux/pci.h> /* needed for videobufs */
-#include <linux/version.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -43,7 +42,7 @@
#include "omap24xxcam.h"
-#define OMAP24XXCAM_VERSION KERNEL_VERSION(0, 0, 0)
+#define OMAP24XXCAM_VERSION "0.0.1"
#define RESET_TIMEOUT_NS 10000
@@ -309,11 +308,11 @@ static int omap24xxcam_vbq_alloc_mmap_buffer(struct videobuf_buffer *vb)
order--;
/* try to allocate as many contiguous pages as possible */
- page = alloc_pages(GFP_KERNEL | GFP_DMA, order);
+ page = alloc_pages(GFP_KERNEL, order);
/* if allocation fails, try to allocate smaller amount */
while (page == NULL) {
order--;
- page = alloc_pages(GFP_KERNEL | GFP_DMA, order);
+ page = alloc_pages(GFP_KERNEL, order);
if (page == NULL && !order) {
err = -ENOMEM;
goto out;
@@ -993,7 +992,6 @@ static int vidioc_querycap(struct file *file, void *fh,
strlcpy(cap->driver, CAM_NAME, sizeof(cap->driver));
strlcpy(cap->card, cam->vfd->name, sizeof(cap->card));
- cap->version = OMAP24XXCAM_VERSION;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
@@ -1888,6 +1886,7 @@ static void __exit omap24xxcam_cleanup(void)
MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>");
MODULE_DESCRIPTION("OMAP24xx Video for Linux camera driver");
MODULE_LICENSE("GPL");
+MODULE_VERSION(OMAP24XXCAM_VERSION);
module_param(video_nr, int, 0);
MODULE_PARM_DESC(video_nr,
"Minor number for video device (-1 ==> auto assign)");
diff --git a/drivers/media/video/omap3isp/isp.c b/drivers/media/video/omap3isp/isp.c
index 94b6ed8..5cea2bb 100644
--- a/drivers/media/video/omap3isp/isp.c
+++ b/drivers/media/video/omap3isp/isp.c
@@ -2234,3 +2234,4 @@ module_exit(isp_cleanup);
MODULE_AUTHOR("Nokia Corporation");
MODULE_DESCRIPTION("TI OMAP3 ISP driver");
MODULE_LICENSE("GPL");
+MODULE_VERSION(ISP_VIDEO_DRIVER_VERSION);
diff --git a/drivers/media/video/omap3isp/isp.h b/drivers/media/video/omap3isp/isp.h
index 2620c40..529e582 100644
--- a/drivers/media/video/omap3isp/isp.h
+++ b/drivers/media/video/omap3isp/isp.h
@@ -139,6 +139,10 @@ struct isp_reg {
* 3 - CAMEXT[13:6] -> CAM[7:0]
* @clk_pol: Pixel clock polarity
* 0 - Non Inverted, 1 - Inverted
+ * @hs_pol: Horizontal synchronization polarity
+ * 0 - Active high, 1 - Active low
+ * @vs_pol: Vertical synchronization polarity
+ * 0 - Active high, 1 - Active low
* @bridge: CCDC Bridge input control
* ISPCTRL_PAR_BRIDGE_DISABLE - Disable
* ISPCTRL_PAR_BRIDGE_LENDIAN - Little endian
@@ -147,6 +151,8 @@ struct isp_reg {
struct isp_parallel_platform_data {
unsigned int data_lane_shift:2;
unsigned int clk_pol:1;
+ unsigned int hs_pol:1;
+ unsigned int vs_pol:1;
unsigned int bridge:4;
};
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index 39d501b..9d3459d 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -1148,6 +1148,8 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
omap3isp_configure_bridge(isp, ccdc->input, pdata, shift);
ccdc->syncif.datsz = depth_out;
+ ccdc->syncif.hdpol = pdata ? pdata->hs_pol : 0;
+ ccdc->syncif.vdpol = pdata ? pdata->vs_pol : 0;
ccdc_config_sync_if(ccdc, &ccdc->syncif);
/* CCDC_PAD_SINK */
@@ -1691,7 +1693,7 @@ static int ccdc_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
if (sub->type != V4L2_EVENT_OMAP3ISP_HS_VS)
return -EINVAL;
- return v4l2_event_subscribe(fh, sub);
+ return v4l2_event_subscribe(fh, sub, OMAP3ISP_CCDC_NEVENTS);
}
static int ccdc_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
@@ -2162,7 +2164,6 @@ static int ccdc_init_entities(struct isp_ccdc_device *ccdc)
sd->grp_id = 1 << 16; /* group ID for isp subdevs */
v4l2_set_subdevdata(sd, ccdc);
sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
- sd->nevents = OMAP3ISP_CCDC_NEVENTS;
pads[CCDC_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
pads[CCDC_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE;
@@ -2257,8 +2258,6 @@ int omap3isp_ccdc_init(struct isp_device *isp)
ccdc->syncif.fldout = 0;
ccdc->syncif.fldpol = 0;
ccdc->syncif.fldstat = 0;
- ccdc->syncif.hdpol = 0;
- ccdc->syncif.vdpol = 0;
ccdc->clamp.oblen = 0;
ccdc->clamp.dcsubval = 0;
diff --git a/drivers/media/video/omap3isp/ispccp2.c b/drivers/media/video/omap3isp/ispccp2.c
index 0e16cab..ec9e395 100644
--- a/drivers/media/video/omap3isp/ispccp2.c
+++ b/drivers/media/video/omap3isp/ispccp2.c
@@ -30,6 +30,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
+#include <linux/regulator/consumer.h>
#include "isp.h"
#include "ispreg.h"
@@ -163,6 +164,9 @@ static void ccp2_if_enable(struct isp_ccp2_device *ccp2, u8 enable)
struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity);
int i;
+ if (enable && ccp2->vdds_csib)
+ regulator_enable(ccp2->vdds_csib);
+
/* Enable/Disable all the LCx channels */
for (i = 0; i < CCP2_LCx_CHANS_NUM; i++)
isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_CTRL(i),
@@ -186,6 +190,9 @@ static void ccp2_if_enable(struct isp_ccp2_device *ccp2, u8 enable)
ISPCCP2_LC01_IRQENABLE,
ISPCCP2_LC01_IRQSTATUS_LC0_FS_IRQ);
}
+
+ if (!enable && ccp2->vdds_csib)
+ regulator_disable(ccp2->vdds_csib);
}
/*
@@ -1137,6 +1144,9 @@ error:
*/
void omap3isp_ccp2_cleanup(struct isp_device *isp)
{
+ struct isp_ccp2_device *ccp2 = &isp->isp_ccp2;
+
+ regulator_put(ccp2->vdds_csib);
}
/*
@@ -1151,14 +1161,27 @@ int omap3isp_ccp2_init(struct isp_device *isp)
init_waitqueue_head(&ccp2->wait);
- /* On the OMAP36xx, the CCP2 uses the CSI PHY1 or PHY2, shared with
+ /*
+ * On the OMAP34xx the CSI1 receiver is operated in the CSIb IO
+ * complex, which is powered by vdds_csib power rail. Hence the
+ * request for the regulator.
+ *
+ * On the OMAP36xx, the CCP2 uses the CSI PHY1 or PHY2, shared with
* the CSI2c or CSI2a receivers. The PHY then needs to be explicitly
* configured.
*
* TODO: Don't hardcode the usage of PHY1 (shared with CSI2c).
*/
- if (isp->revision == ISP_REVISION_15_0)
+ if (isp->revision == ISP_REVISION_2_0) {
+ ccp2->vdds_csib = regulator_get(isp->dev, "vdds_csib");
+ if (IS_ERR(ccp2->vdds_csib)) {
+ dev_dbg(isp->dev,
+ "Could not get regulator vdds_csib\n");
+ ccp2->vdds_csib = NULL;
+ }
+ } else if (isp->revision == ISP_REVISION_15_0) {
ccp2->phy = &isp->isp_csiphy1;
+ }
ret = ccp2_init_entities(ccp2);
if (ret < 0)
diff --git a/drivers/media/video/omap3isp/ispccp2.h b/drivers/media/video/omap3isp/ispccp2.h
index 5505a86..6674e9d 100644
--- a/drivers/media/video/omap3isp/ispccp2.h
+++ b/drivers/media/video/omap3isp/ispccp2.h
@@ -81,6 +81,7 @@ struct isp_ccp2_device {
struct isp_interface_mem_config mem_cfg;
struct isp_video video_in;
struct isp_csiphy *phy;
+ struct regulator *vdds_csib;
unsigned int error;
enum isp_pipeline_stream_state state;
wait_queue_head_t wait;
diff --git a/drivers/media/video/omap3isp/ispstat.c b/drivers/media/video/omap3isp/ispstat.c
index b44cb68..8080659 100644
--- a/drivers/media/video/omap3isp/ispstat.c
+++ b/drivers/media/video/omap3isp/ispstat.c
@@ -1032,7 +1032,6 @@ static int isp_stat_init_entities(struct ispstat *stat, const char *name,
snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name);
subdev->grp_id = 1 << 16; /* group ID for isp subdevs */
subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
- subdev->nevents = STAT_NEVENTS;
v4l2_set_subdevdata(subdev, stat);
stat->pad.flags = MEDIA_PAD_FL_SINK;
@@ -1050,7 +1049,7 @@ int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
if (sub->type != stat->event_type)
return -EINVAL;
- return v4l2_event_subscribe(fh, sub);
+ return v4l2_event_subscribe(fh, sub, STAT_NEVENTS);
}
int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
diff --git a/drivers/media/video/omap3isp/ispvideo.c b/drivers/media/video/omap3isp/ispvideo.c
index 9cd8f1a..fd965ad 100644
--- a/drivers/media/video/omap3isp/ispvideo.c
+++ b/drivers/media/video/omap3isp/ispvideo.c
@@ -695,7 +695,6 @@ isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
strlcpy(cap->card, video->video.name, sizeof(cap->card));
strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
- cap->version = ISP_VIDEO_DRIVER_VERSION;
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
diff --git a/drivers/media/video/omap3isp/ispvideo.h b/drivers/media/video/omap3isp/ispvideo.h
index 911bea6..53160aa 100644
--- a/drivers/media/video/omap3isp/ispvideo.h
+++ b/drivers/media/video/omap3isp/ispvideo.h
@@ -27,7 +27,6 @@
#define OMAP3_ISP_VIDEO_H
#include <linux/v4l2-mediabus.h>
-#include <linux/version.h>
#include <media/media-entity.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
@@ -35,7 +34,7 @@
#include "ispqueue.h"
#define ISP_VIDEO_DRIVER_NAME "ispvideo"
-#define ISP_VIDEO_DRIVER_VERSION KERNEL_VERSION(0, 0, 1)
+#define ISP_VIDEO_DRIVER_VERSION "0.0.2"
struct isp_device;
struct isp_video;
diff --git a/drivers/media/video/ov2640.c b/drivers/media/video/ov2640.c
index 0cea0cf..9ce2fa0 100644
--- a/drivers/media/video/ov2640.c
+++ b/drivers/media/video/ov2640.c
@@ -1031,16 +1031,9 @@ static int ov2640_video_probe(struct soc_camera_device *icd,
const char *devname;
int ret;
- /*
- * we must have a parent by now. And it cannot be a wrong one.
- * So this entire test is completely redundant.
- */
- if (!icd->dev.parent ||
- to_soc_camera_host(icd->dev.parent)->nr != icd->iface) {
- dev_err(&client->dev, "Parent missing or invalid!\n");
- ret = -ENODEV;
- goto err;
- }
+ /* We must have a parent by now. And it cannot be a wrong one. */
+ BUG_ON(!icd->parent ||
+ to_soc_camera_host(icd->parent)->nr != icd->iface);
/*
* check and show product ID and manufacturer ID
diff --git a/drivers/media/video/ov5642.c b/drivers/media/video/ov5642.c
new file mode 100644
index 0000000..349a4ad
--- /dev/null
+++ b/drivers/media/video/ov5642.c
@@ -0,0 +1,1012 @@
+/*
+ * Driver for OV5642 CMOS Image Sensor from Omnivision
+ *
+ * Copyright (C) 2011, Bastian Hecht <hechtb@gmail.com>
+ *
+ * Based on Sony IMX074 Camera Driver
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * Based on Omnivision OV7670 Camera Driver
+ * Copyright (C) 2006-7 Jonathan Corbet <corbet@lwn.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/module.h>
+
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-subdev.h>
+
+/* OV5642 registers */
+#define REG_CHIP_ID_HIGH 0x300a
+#define REG_CHIP_ID_LOW 0x300b
+
+#define REG_WINDOW_START_X_HIGH 0x3800
+#define REG_WINDOW_START_X_LOW 0x3801
+#define REG_WINDOW_START_Y_HIGH 0x3802
+#define REG_WINDOW_START_Y_LOW 0x3803
+#define REG_WINDOW_WIDTH_HIGH 0x3804
+#define REG_WINDOW_WIDTH_LOW 0x3805
+#define REG_WINDOW_HEIGHT_HIGH 0x3806
+#define REG_WINDOW_HEIGHT_LOW 0x3807
+#define REG_OUT_WIDTH_HIGH 0x3808
+#define REG_OUT_WIDTH_LOW 0x3809
+#define REG_OUT_HEIGHT_HIGH 0x380a
+#define REG_OUT_HEIGHT_LOW 0x380b
+#define REG_OUT_TOTAL_WIDTH_HIGH 0x380c
+#define REG_OUT_TOTAL_WIDTH_LOW 0x380d
+#define REG_OUT_TOTAL_HEIGHT_HIGH 0x380e
+#define REG_OUT_TOTAL_HEIGHT_LOW 0x380f
+
+/*
+ * define standard resolution.
+ * Works currently only for up to 720 lines
+ * eg. 320x240, 640x480, 800x600, 1280x720, 2048x720
+ */
+
+#define OV5642_WIDTH 1280
+#define OV5642_HEIGHT 720
+#define OV5642_TOTAL_WIDTH 3200
+#define OV5642_TOTAL_HEIGHT 2000
+#define OV5642_SENSOR_SIZE_X 2592
+#define OV5642_SENSOR_SIZE_Y 1944
+
+struct regval_list {
+ u16 reg_num;
+ u8 value;
+};
+
+static struct regval_list ov5642_default_regs_init[] = {
+ { 0x3103, 0x93 },
+ { 0x3008, 0x82 },
+ { 0x3017, 0x7f },
+ { 0x3018, 0xfc },
+ { 0x3810, 0xc2 },
+ { 0x3615, 0xf0 },
+ { 0x3000, 0x0 },
+ { 0x3001, 0x0 },
+ { 0x3002, 0x0 },
+ { 0x3003, 0x0 },
+ { 0x3004, 0xff },
+ { 0x3030, 0x2b },
+ { 0x3011, 0x8 },
+ { 0x3010, 0x10 },
+ { 0x3604, 0x60 },
+ { 0x3622, 0x60 },
+ { 0x3621, 0x9 },
+ { 0x3709, 0x0 },
+ { 0x4000, 0x21 },
+ { 0x401d, 0x22 },
+ { 0x3600, 0x54 },
+ { 0x3605, 0x4 },
+ { 0x3606, 0x3f },
+ { 0x3c01, 0x80 },
+ { 0x300d, 0x22 },
+ { 0x3623, 0x22 },
+ { 0x5000, 0x4f },
+ { 0x5020, 0x4 },
+ { 0x5181, 0x79 },
+ { 0x5182, 0x0 },
+ { 0x5185, 0x22 },
+ { 0x5197, 0x1 },
+ { 0x5500, 0xa },
+ { 0x5504, 0x0 },
+ { 0x5505, 0x7f },
+ { 0x5080, 0x8 },
+ { 0x300e, 0x18 },
+ { 0x4610, 0x0 },
+ { 0x471d, 0x5 },
+ { 0x4708, 0x6 },
+ { 0x370c, 0xa0 },
+ { 0x5687, 0x94 },
+ { 0x501f, 0x0 },
+ { 0x5000, 0x4f },
+ { 0x5001, 0xcf },
+ { 0x4300, 0x30 },
+ { 0x4300, 0x30 },
+ { 0x460b, 0x35 },
+ { 0x471d, 0x0 },
+ { 0x3002, 0xc },
+ { 0x3002, 0x0 },
+ { 0x4713, 0x3 },
+ { 0x471c, 0x50 },
+ { 0x4721, 0x2 },
+ { 0x4402, 0x90 },
+ { 0x460c, 0x22 },
+ { 0x3815, 0x44 },
+ { 0x3503, 0x7 },
+ { 0x3501, 0x73 },
+ { 0x3502, 0x80 },
+ { 0x350b, 0x0 },
+ { 0x3818, 0xc8 },
+ { 0x3824, 0x11 },
+ { 0x3a00, 0x78 },
+ { 0x3a1a, 0x4 },
+ { 0x3a13, 0x30 },
+ { 0x3a18, 0x0 },
+ { 0x3a19, 0x7c },
+ { 0x3a08, 0x12 },
+ { 0x3a09, 0xc0 },
+ { 0x3a0a, 0xf },
+ { 0x3a0b, 0xa0 },
+ { 0x350c, 0x7 },
+ { 0x350d, 0xd0 },
+ { 0x3a0d, 0x8 },
+ { 0x3a0e, 0x6 },
+ { 0x3500, 0x0 },
+ { 0x3501, 0x0 },
+ { 0x3502, 0x0 },
+ { 0x350a, 0x0 },
+ { 0x350b, 0x0 },
+ { 0x3503, 0x0 },
+ { 0x3a0f, 0x3c },
+ { 0x3a10, 0x32 },
+ { 0x3a1b, 0x3c },
+ { 0x3a1e, 0x32 },
+ { 0x3a11, 0x80 },
+ { 0x3a1f, 0x20 },
+ { 0x3030, 0x2b },
+ { 0x3a02, 0x0 },
+ { 0x3a03, 0x7d },
+ { 0x3a04, 0x0 },
+ { 0x3a14, 0x0 },
+ { 0x3a15, 0x7d },
+ { 0x3a16, 0x0 },
+ { 0x3a00, 0x78 },
+ { 0x3a08, 0x9 },
+ { 0x3a09, 0x60 },
+ { 0x3a0a, 0x7 },
+ { 0x3a0b, 0xd0 },
+ { 0x3a0d, 0x10 },
+ { 0x3a0e, 0xd },
+ { 0x4407, 0x4 },
+ { 0x5193, 0x70 },
+ { 0x589b, 0x0 },
+ { 0x589a, 0xc0 },
+ { 0x401e, 0x20 },
+ { 0x4001, 0x42 },
+ { 0x401c, 0x6 },
+ { 0x3825, 0xac },
+ { 0x3827, 0xc },
+ { 0x528a, 0x1 },
+ { 0x528b, 0x4 },
+ { 0x528c, 0x8 },
+ { 0x528d, 0x10 },
+ { 0x528e, 0x20 },
+ { 0x528f, 0x28 },
+ { 0x5290, 0x30 },
+ { 0x5292, 0x0 },
+ { 0x5293, 0x1 },
+ { 0x5294, 0x0 },
+ { 0x5295, 0x4 },
+ { 0x5296, 0x0 },
+ { 0x5297, 0x8 },
+ { 0x5298, 0x0 },
+ { 0x5299, 0x10 },
+ { 0x529a, 0x0 },
+ { 0x529b, 0x20 },
+ { 0x529c, 0x0 },
+ { 0x529d, 0x28 },
+ { 0x529e, 0x0 },
+ { 0x529f, 0x30 },
+ { 0x5282, 0x0 },
+ { 0x5300, 0x0 },
+ { 0x5301, 0x20 },
+ { 0x5302, 0x0 },
+ { 0x5303, 0x7c },
+ { 0x530c, 0x0 },
+ { 0x530d, 0xc },
+ { 0x530e, 0x20 },
+ { 0x530f, 0x80 },
+ { 0x5310, 0x20 },
+ { 0x5311, 0x80 },
+ { 0x5308, 0x20 },
+ { 0x5309, 0x40 },
+ { 0x5304, 0x0 },
+ { 0x5305, 0x30 },
+ { 0x5306, 0x0 },
+ { 0x5307, 0x80 },
+ { 0x5314, 0x8 },
+ { 0x5315, 0x20 },
+ { 0x5319, 0x30 },
+ { 0x5316, 0x10 },
+ { 0x5317, 0x0 },
+ { 0x5318, 0x2 },
+ { 0x5380, 0x1 },
+ { 0x5381, 0x0 },
+ { 0x5382, 0x0 },
+ { 0x5383, 0x4e },
+ { 0x5384, 0x0 },
+ { 0x5385, 0xf },
+ { 0x5386, 0x0 },
+ { 0x5387, 0x0 },
+ { 0x5388, 0x1 },
+ { 0x5389, 0x15 },
+ { 0x538a, 0x0 },
+ { 0x538b, 0x31 },
+ { 0x538c, 0x0 },
+ { 0x538d, 0x0 },
+ { 0x538e, 0x0 },
+ { 0x538f, 0xf },
+ { 0x5390, 0x0 },
+ { 0x5391, 0xab },
+ { 0x5392, 0x0 },
+ { 0x5393, 0xa2 },
+ { 0x5394, 0x8 },
+ { 0x5480, 0x14 },
+ { 0x5481, 0x21 },
+ { 0x5482, 0x36 },
+ { 0x5483, 0x57 },
+ { 0x5484, 0x65 },
+ { 0x5485, 0x71 },
+ { 0x5486, 0x7d },
+ { 0x5487, 0x87 },
+ { 0x5488, 0x91 },
+ { 0x5489, 0x9a },
+ { 0x548a, 0xaa },
+ { 0x548b, 0xb8 },
+ { 0x548c, 0xcd },
+ { 0x548d, 0xdd },
+ { 0x548e, 0xea },
+ { 0x548f, 0x1d },
+ { 0x5490, 0x5 },
+ { 0x5491, 0x0 },
+ { 0x5492, 0x4 },
+ { 0x5493, 0x20 },
+ { 0x5494, 0x3 },
+ { 0x5495, 0x60 },
+ { 0x5496, 0x2 },
+ { 0x5497, 0xb8 },
+ { 0x5498, 0x2 },
+ { 0x5499, 0x86 },
+ { 0x549a, 0x2 },
+ { 0x549b, 0x5b },
+ { 0x549c, 0x2 },
+ { 0x549d, 0x3b },
+ { 0x549e, 0x2 },
+ { 0x549f, 0x1c },
+ { 0x54a0, 0x2 },
+ { 0x54a1, 0x4 },
+ { 0x54a2, 0x1 },
+ { 0x54a3, 0xed },
+ { 0x54a4, 0x1 },
+ { 0x54a5, 0xc5 },
+ { 0x54a6, 0x1 },
+ { 0x54a7, 0xa5 },
+ { 0x54a8, 0x1 },
+ { 0x54a9, 0x6c },
+ { 0x54aa, 0x1 },
+ { 0x54ab, 0x41 },
+ { 0x54ac, 0x1 },
+ { 0x54ad, 0x20 },
+ { 0x54ae, 0x0 },
+ { 0x54af, 0x16 },
+ { 0x54b0, 0x1 },
+ { 0x54b1, 0x20 },
+ { 0x54b2, 0x0 },
+ { 0x54b3, 0x10 },
+ { 0x54b4, 0x0 },
+ { 0x54b5, 0xf0 },
+ { 0x54b6, 0x0 },
+ { 0x54b7, 0xdf },
+ { 0x5402, 0x3f },
+ { 0x5403, 0x0 },
+ { 0x3406, 0x0 },
+ { 0x5180, 0xff },
+ { 0x5181, 0x52 },
+ { 0x5182, 0x11 },
+ { 0x5183, 0x14 },
+ { 0x5184, 0x25 },
+ { 0x5185, 0x24 },
+ { 0x5186, 0x6 },
+ { 0x5187, 0x8 },
+ { 0x5188, 0x8 },
+ { 0x5189, 0x7c },
+ { 0x518a, 0x60 },
+ { 0x518b, 0xb2 },
+ { 0x518c, 0xb2 },
+ { 0x518d, 0x44 },
+ { 0x518e, 0x3d },
+ { 0x518f, 0x58 },
+ { 0x5190, 0x46 },
+ { 0x5191, 0xf8 },
+ { 0x5192, 0x4 },
+ { 0x5193, 0x70 },
+ { 0x5194, 0xf0 },
+ { 0x5195, 0xf0 },
+ { 0x5196, 0x3 },
+ { 0x5197, 0x1 },
+ { 0x5198, 0x4 },
+ { 0x5199, 0x12 },
+ { 0x519a, 0x4 },
+ { 0x519b, 0x0 },
+ { 0x519c, 0x6 },
+ { 0x519d, 0x82 },
+ { 0x519e, 0x0 },
+ { 0x5025, 0x80 },
+ { 0x3a0f, 0x38 },
+ { 0x3a10, 0x30 },
+ { 0x3a1b, 0x3a },
+ { 0x3a1e, 0x2e },
+ { 0x3a11, 0x60 },
+ { 0x3a1f, 0x10 },
+ { 0x5688, 0xa6 },
+ { 0x5689, 0x6a },
+ { 0x568a, 0xea },
+ { 0x568b, 0xae },
+ { 0x568c, 0xa6 },
+ { 0x568d, 0x6a },
+ { 0x568e, 0x62 },
+ { 0x568f, 0x26 },
+ { 0x5583, 0x40 },
+ { 0x5584, 0x40 },
+ { 0x5580, 0x2 },
+ { 0x5000, 0xcf },
+ { 0x5800, 0x27 },
+ { 0x5801, 0x19 },
+ { 0x5802, 0x12 },
+ { 0x5803, 0xf },
+ { 0x5804, 0x10 },
+ { 0x5805, 0x15 },
+ { 0x5806, 0x1e },
+ { 0x5807, 0x2f },
+ { 0x5808, 0x15 },
+ { 0x5809, 0xd },
+ { 0x580a, 0xa },
+ { 0x580b, 0x9 },
+ { 0x580c, 0xa },
+ { 0x580d, 0xc },
+ { 0x580e, 0x12 },
+ { 0x580f, 0x19 },
+ { 0x5810, 0xb },
+ { 0x5811, 0x7 },
+ { 0x5812, 0x4 },
+ { 0x5813, 0x3 },
+ { 0x5814, 0x3 },
+ { 0x5815, 0x6 },
+ { 0x5816, 0xa },
+ { 0x5817, 0xf },
+ { 0x5818, 0xa },
+ { 0x5819, 0x5 },
+ { 0x581a, 0x1 },
+ { 0x581b, 0x0 },
+ { 0x581c, 0x0 },
+ { 0x581d, 0x3 },
+ { 0x581e, 0x8 },
+ { 0x581f, 0xc },
+ { 0x5820, 0xa },
+ { 0x5821, 0x5 },
+ { 0x5822, 0x1 },
+ { 0x5823, 0x0 },
+ { 0x5824, 0x0 },
+ { 0x5825, 0x3 },
+ { 0x5826, 0x8 },
+ { 0x5827, 0xc },
+ { 0x5828, 0xe },
+ { 0x5829, 0x8 },
+ { 0x582a, 0x6 },
+ { 0x582b, 0x4 },
+ { 0x582c, 0x5 },
+ { 0x582d, 0x7 },
+ { 0x582e, 0xb },
+ { 0x582f, 0x12 },
+ { 0x5830, 0x18 },
+ { 0x5831, 0x10 },
+ { 0x5832, 0xc },
+ { 0x5833, 0xa },
+ { 0x5834, 0xb },
+ { 0x5835, 0xe },
+ { 0x5836, 0x15 },
+ { 0x5837, 0x19 },
+ { 0x5838, 0x32 },
+ { 0x5839, 0x1f },
+ { 0x583a, 0x18 },
+ { 0x583b, 0x16 },
+ { 0x583c, 0x17 },
+ { 0x583d, 0x1e },
+ { 0x583e, 0x26 },
+ { 0x583f, 0x53 },
+ { 0x5840, 0x10 },
+ { 0x5841, 0xf },
+ { 0x5842, 0xd },
+ { 0x5843, 0xc },
+ { 0x5844, 0xe },
+ { 0x5845, 0x9 },
+ { 0x5846, 0x11 },
+ { 0x5847, 0x10 },
+ { 0x5848, 0x10 },
+ { 0x5849, 0x10 },
+ { 0x584a, 0x10 },
+ { 0x584b, 0xe },
+ { 0x584c, 0x10 },
+ { 0x584d, 0x10 },
+ { 0x584e, 0x11 },
+ { 0x584f, 0x10 },
+ { 0x5850, 0xf },
+ { 0x5851, 0xc },
+ { 0x5852, 0xf },
+ { 0x5853, 0x10 },
+ { 0x5854, 0x10 },
+ { 0x5855, 0xf },
+ { 0x5856, 0xe },
+ { 0x5857, 0xb },
+ { 0x5858, 0x10 },
+ { 0x5859, 0xd },
+ { 0x585a, 0xd },
+ { 0x585b, 0xc },
+ { 0x585c, 0xc },
+ { 0x585d, 0xc },
+ { 0x585e, 0xb },
+ { 0x585f, 0xc },
+ { 0x5860, 0xc },
+ { 0x5861, 0xc },
+ { 0x5862, 0xd },
+ { 0x5863, 0x8 },
+ { 0x5864, 0x11 },
+ { 0x5865, 0x18 },
+ { 0x5866, 0x18 },
+ { 0x5867, 0x19 },
+ { 0x5868, 0x17 },
+ { 0x5869, 0x19 },
+ { 0x586a, 0x16 },
+ { 0x586b, 0x13 },
+ { 0x586c, 0x13 },
+ { 0x586d, 0x12 },
+ { 0x586e, 0x13 },
+ { 0x586f, 0x16 },
+ { 0x5870, 0x14 },
+ { 0x5871, 0x12 },
+ { 0x5872, 0x10 },
+ { 0x5873, 0x11 },
+ { 0x5874, 0x11 },
+ { 0x5875, 0x16 },
+ { 0x5876, 0x14 },
+ { 0x5877, 0x11 },
+ { 0x5878, 0x10 },
+ { 0x5879, 0xf },
+ { 0x587a, 0x10 },
+ { 0x587b, 0x14 },
+ { 0x587c, 0x13 },
+ { 0x587d, 0x12 },
+ { 0x587e, 0x11 },
+ { 0x587f, 0x11 },
+ { 0x5880, 0x12 },
+ { 0x5881, 0x15 },
+ { 0x5882, 0x14 },
+ { 0x5883, 0x15 },
+ { 0x5884, 0x15 },
+ { 0x5885, 0x15 },
+ { 0x5886, 0x13 },
+ { 0x5887, 0x17 },
+ { 0x3710, 0x10 },
+ { 0x3632, 0x51 },
+ { 0x3702, 0x10 },
+ { 0x3703, 0xb2 },
+ { 0x3704, 0x18 },
+ { 0x370b, 0x40 },
+ { 0x370d, 0x3 },
+ { 0x3631, 0x1 },
+ { 0x3632, 0x52 },
+ { 0x3606, 0x24 },
+ { 0x3620, 0x96 },
+ { 0x5785, 0x7 },
+ { 0x3a13, 0x30 },
+ { 0x3600, 0x52 },
+ { 0x3604, 0x48 },
+ { 0x3606, 0x1b },
+ { 0x370d, 0xb },
+ { 0x370f, 0xc0 },
+ { 0x3709, 0x1 },
+ { 0x3823, 0x0 },
+ { 0x5007, 0x0 },
+ { 0x5009, 0x0 },
+ { 0x5011, 0x0 },
+ { 0x5013, 0x0 },
+ { 0x519e, 0x0 },
+ { 0x5086, 0x0 },
+ { 0x5087, 0x0 },
+ { 0x5088, 0x0 },
+ { 0x5089, 0x0 },
+ { 0x302b, 0x0 },
+ { 0x3503, 0x7 },
+ { 0x3011, 0x8 },
+ { 0x350c, 0x2 },
+ { 0x350d, 0xe4 },
+ { 0x3621, 0xc9 },
+ { 0x370a, 0x81 },
+ { 0xffff, 0xff },
+};
+
+static struct regval_list ov5642_default_regs_finalise[] = {
+ { 0x3810, 0xc2 },
+ { 0x3818, 0xc9 },
+ { 0x381c, 0x10 },
+ { 0x381d, 0xa0 },
+ { 0x381e, 0x5 },
+ { 0x381f, 0xb0 },
+ { 0x3820, 0x0 },
+ { 0x3821, 0x0 },
+ { 0x3824, 0x11 },
+ { 0x3a08, 0x1b },
+ { 0x3a09, 0xc0 },
+ { 0x3a0a, 0x17 },
+ { 0x3a0b, 0x20 },
+ { 0x3a0d, 0x2 },
+ { 0x3a0e, 0x1 },
+ { 0x401c, 0x4 },
+ { 0x5682, 0x5 },
+ { 0x5683, 0x0 },
+ { 0x5686, 0x2 },
+ { 0x5687, 0xcc },
+ { 0x5001, 0x4f },
+ { 0x589b, 0x6 },
+ { 0x589a, 0xc5 },
+ { 0x3503, 0x0 },
+ { 0x460c, 0x20 },
+ { 0x460b, 0x37 },
+ { 0x471c, 0xd0 },
+ { 0x471d, 0x5 },
+ { 0x3815, 0x1 },
+ { 0x3818, 0xc1 },
+ { 0x501f, 0x0 },
+ { 0x5002, 0xe0 },
+ { 0x4300, 0x32 }, /* UYVY */
+ { 0x3002, 0x1c },
+ { 0x4800, 0x14 },
+ { 0x4801, 0xf },
+ { 0x3007, 0x3b },
+ { 0x300e, 0x4 },
+ { 0x4803, 0x50 },
+ { 0x3815, 0x1 },
+ { 0x4713, 0x2 },
+ { 0x4842, 0x1 },
+ { 0x300f, 0xe },
+ { 0x3003, 0x3 },
+ { 0x3003, 0x1 },
+ { 0xffff, 0xff },
+};
+
+struct ov5642_datafmt {
+ enum v4l2_mbus_pixelcode code;
+ enum v4l2_colorspace colorspace;
+};
+
+struct ov5642 {
+ struct v4l2_subdev subdev;
+ const struct ov5642_datafmt *fmt;
+};
+
+static const struct ov5642_datafmt ov5642_colour_fmts[] = {
+ {V4L2_MBUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG},
+};
+
+static struct ov5642 *to_ov5642(const struct i2c_client *client)
+{
+ return container_of(i2c_get_clientdata(client), struct ov5642, subdev);
+}
+
+/* Find a data format by a pixel code in an array */
+static const struct ov5642_datafmt
+ *ov5642_find_datafmt(enum v4l2_mbus_pixelcode code)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ov5642_colour_fmts); i++)
+ if (ov5642_colour_fmts[i].code == code)
+ return ov5642_colour_fmts + i;
+
+ return NULL;
+}
+
+static int reg_read(struct i2c_client *client, u16 reg, u8 *val)
+{
+ int ret;
+ /* We have 16-bit i2c addresses - care for endianess */
+ unsigned char data[2] = { reg >> 8, reg & 0xff };
+
+ ret = i2c_master_send(client, data, 2);
+ if (ret < 2) {
+ dev_err(&client->dev, "%s: i2c read error, reg: %x\n",
+ __func__, reg);
+ return ret < 0 ? ret : -EIO;
+ }
+
+ ret = i2c_master_recv(client, val, 1);
+ if (ret < 1) {
+ dev_err(&client->dev, "%s: i2c read error, reg: %x\n",
+ __func__, reg);
+ return ret < 0 ? ret : -EIO;
+ }
+ return 0;
+}
+
+static int reg_write(struct i2c_client *client, u16 reg, u8 val)
+{
+ int ret;
+ unsigned char data[3] = { reg >> 8, reg & 0xff, val };
+
+ ret = i2c_master_send(client, data, 3);
+ if (ret < 3) {
+ dev_err(&client->dev, "%s: i2c write error, reg: %x\n",
+ __func__, reg);
+ return ret < 0 ? ret : -EIO;
+ }
+
+ return 0;
+}
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int ov5642_get_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u8 val;
+
+ if (reg->reg & ~0xffff)
+ return -EINVAL;
+
+ reg->size = 1;
+
+ ret = reg_read(client, reg->reg, &val);
+ if (!ret)
+ reg->val = (__u64)val;
+
+ return ret;
+}
+
+static int ov5642_set_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->reg & ~0xffff || reg->val & ~0xff)
+ return -EINVAL;
+
+ return reg_write(client, reg->reg, reg->val);
+}
+#endif
+
+static int ov5642_write_array(struct i2c_client *client,
+ struct regval_list *vals)
+{
+ while (vals->reg_num != 0xffff || vals->value != 0xff) {
+ int ret = reg_write(client, vals->reg_num, vals->value);
+ if (ret < 0)
+ return ret;
+ vals++;
+ }
+ dev_dbg(&client->dev, "Register list loaded\n");
+ return 0;
+}
+
+static int ov5642_set_resolution(struct i2c_client *client)
+{
+ int ret;
+ u8 start_x_high = ((OV5642_SENSOR_SIZE_X - OV5642_WIDTH) / 2) >> 8;
+ u8 start_x_low = ((OV5642_SENSOR_SIZE_X - OV5642_WIDTH) / 2) & 0xff;
+ u8 start_y_high = ((OV5642_SENSOR_SIZE_Y - OV5642_HEIGHT) / 2) >> 8;
+ u8 start_y_low = ((OV5642_SENSOR_SIZE_Y - OV5642_HEIGHT) / 2) & 0xff;
+
+ u8 width_high = OV5642_WIDTH >> 8;
+ u8 width_low = OV5642_WIDTH & 0xff;
+ u8 height_high = OV5642_HEIGHT >> 8;
+ u8 height_low = OV5642_HEIGHT & 0xff;
+
+ u8 total_width_high = OV5642_TOTAL_WIDTH >> 8;
+ u8 total_width_low = OV5642_TOTAL_WIDTH & 0xff;
+ u8 total_height_high = OV5642_TOTAL_HEIGHT >> 8;
+ u8 total_height_low = OV5642_TOTAL_HEIGHT & 0xff;
+
+ ret = reg_write(client, REG_WINDOW_START_X_HIGH, start_x_high);
+ if (!ret)
+ ret = reg_write(client, REG_WINDOW_START_X_LOW, start_x_low);
+ if (!ret)
+ ret = reg_write(client, REG_WINDOW_START_Y_HIGH, start_y_high);
+ if (!ret)
+ ret = reg_write(client, REG_WINDOW_START_Y_LOW, start_y_low);
+
+ if (!ret)
+ ret = reg_write(client, REG_WINDOW_WIDTH_HIGH, width_high);
+ if (!ret)
+ ret = reg_write(client, REG_WINDOW_WIDTH_LOW , width_low);
+ if (!ret)
+ ret = reg_write(client, REG_WINDOW_HEIGHT_HIGH, height_high);
+ if (!ret)
+ ret = reg_write(client, REG_WINDOW_HEIGHT_LOW, height_low);
+
+ if (!ret)
+ ret = reg_write(client, REG_OUT_WIDTH_HIGH, width_high);
+ if (!ret)
+ ret = reg_write(client, REG_OUT_WIDTH_LOW , width_low);
+ if (!ret)
+ ret = reg_write(client, REG_OUT_HEIGHT_HIGH, height_high);
+ if (!ret)
+ ret = reg_write(client, REG_OUT_HEIGHT_LOW, height_low);
+
+ if (!ret)
+ ret = reg_write(client, REG_OUT_TOTAL_WIDTH_HIGH, total_width_high);
+ if (!ret)
+ ret = reg_write(client, REG_OUT_TOTAL_WIDTH_LOW, total_width_low);
+ if (!ret)
+ ret = reg_write(client, REG_OUT_TOTAL_HEIGHT_HIGH, total_height_high);
+ if (!ret)
+ ret = reg_write(client, REG_OUT_TOTAL_HEIGHT_LOW, total_height_low);
+
+ return ret;
+}
+
+static int ov5642_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ const struct ov5642_datafmt *fmt = ov5642_find_datafmt(mf->code);
+
+ dev_dbg(sd->v4l2_dev->dev, "%s(%u) width: %u heigth: %u\n",
+ __func__, mf->code, mf->width, mf->height);
+
+ if (!fmt) {
+ mf->code = ov5642_colour_fmts[0].code;
+ mf->colorspace = ov5642_colour_fmts[0].colorspace;
+ }
+
+ mf->width = OV5642_WIDTH;
+ mf->height = OV5642_HEIGHT;
+ mf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int ov5642_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5642 *priv = to_ov5642(client);
+
+ dev_dbg(sd->v4l2_dev->dev, "%s(%u)\n", __func__, mf->code);
+
+ /* MIPI CSI could have changed the format, double-check */
+ if (!ov5642_find_datafmt(mf->code))
+ return -EINVAL;
+
+ ov5642_try_fmt(sd, mf);
+
+ priv->fmt = ov5642_find_datafmt(mf->code);
+
+ ov5642_write_array(client, ov5642_default_regs_init);
+ ov5642_set_resolution(client);
+ ov5642_write_array(client, ov5642_default_regs_finalise);
+
+ return 0;
+}
+
+static int ov5642_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5642 *priv = to_ov5642(client);
+
+ const struct ov5642_datafmt *fmt = priv->fmt;
+
+ mf->code = fmt->code;
+ mf->colorspace = fmt->colorspace;
+ mf->width = OV5642_WIDTH;
+ mf->height = OV5642_HEIGHT;
+ mf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int ov5642_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if (index >= ARRAY_SIZE(ov5642_colour_fmts))
+ return -EINVAL;
+
+ *code = ov5642_colour_fmts[index].code;
+ return 0;
+}
+
+static int ov5642_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
+ return -EINVAL;
+
+ if (id->match.addr != client->addr)
+ return -ENODEV;
+
+ id->ident = V4L2_IDENT_OV5642;
+ id->revision = 0;
+
+ return 0;
+}
+
+static int ov5642_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+{
+ struct v4l2_rect *rect = &a->c;
+
+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ rect->top = 0;
+ rect->left = 0;
+ rect->width = OV5642_WIDTH;
+ rect->height = OV5642_HEIGHT;
+
+ return 0;
+}
+
+static int ov5642_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+{
+ a->bounds.left = 0;
+ a->bounds.top = 0;
+ a->bounds.width = OV5642_WIDTH;
+ a->bounds.height = OV5642_HEIGHT;
+ a->defrect = a->bounds;
+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops ov5642_subdev_video_ops = {
+ .s_mbus_fmt = ov5642_s_fmt,
+ .g_mbus_fmt = ov5642_g_fmt,
+ .try_mbus_fmt = ov5642_try_fmt,
+ .enum_mbus_fmt = ov5642_enum_fmt,
+ .g_crop = ov5642_g_crop,
+ .cropcap = ov5642_cropcap,
+};
+
+static struct v4l2_subdev_core_ops ov5642_subdev_core_ops = {
+ .g_chip_ident = ov5642_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = ov5642_get_register,
+ .s_register = ov5642_set_register,
+#endif
+};
+
+static struct v4l2_subdev_ops ov5642_subdev_ops = {
+ .core = &ov5642_subdev_core_ops,
+ .video = &ov5642_subdev_video_ops,
+};
+
+/*
+ * We have to provide soc-camera operations, but we don't have anything to say
+ * there. The MIPI CSI2 driver will provide .query_bus_param and .set_bus_param
+ */
+static unsigned long soc_ov5642_query_bus_param(struct soc_camera_device *icd)
+{
+ return 0;
+}
+
+static int soc_ov5642_set_bus_param(struct soc_camera_device *icd,
+ unsigned long flags)
+{
+ return -EINVAL;
+}
+
+static struct soc_camera_ops soc_ov5642_ops = {
+ .query_bus_param = soc_ov5642_query_bus_param,
+ .set_bus_param = soc_ov5642_set_bus_param,
+};
+
+static int ov5642_video_probe(struct soc_camera_device *icd,
+ struct i2c_client *client)
+{
+ int ret;
+ u8 id_high, id_low;
+ u16 id;
+
+ /* Read sensor Model ID */
+ ret = reg_read(client, REG_CHIP_ID_HIGH, &id_high);
+ if (ret < 0)
+ return ret;
+
+ id = id_high << 8;
+
+ ret = reg_read(client, REG_CHIP_ID_LOW, &id_low);
+ if (ret < 0)
+ return ret;
+
+ id |= id_low;
+
+ dev_info(&client->dev, "Chip ID 0x%04x detected\n", id);
+
+ if (id != 0x5642)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int ov5642_probe(struct i2c_client *client,
+ const struct i2c_device_id *did)
+{
+ struct ov5642 *priv;
+ struct soc_camera_device *icd = client->dev.platform_data;
+ struct soc_camera_link *icl;
+ int ret;
+
+ if (!icd) {
+ dev_err(&client->dev, "OV5642: missing soc-camera data!\n");
+ return -EINVAL;
+ }
+
+ icl = to_soc_camera_link(icd);
+ if (!icl) {
+ dev_err(&client->dev, "OV5642: missing platform data!\n");
+ return -EINVAL;
+ }
+
+ priv = kzalloc(sizeof(struct ov5642), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&priv->subdev, client, &ov5642_subdev_ops);
+
+ icd->ops = &soc_ov5642_ops;
+ priv->fmt = &ov5642_colour_fmts[0];
+
+ ret = ov5642_video_probe(icd, client);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ icd->ops = NULL;
+ kfree(priv);
+ return ret;
+}
+
+static int ov5642_remove(struct i2c_client *client)
+{
+ struct ov5642 *priv = to_ov5642(client);
+ struct soc_camera_device *icd = client->dev.platform_data;
+ struct soc_camera_link *icl = to_soc_camera_link(icd);
+
+ icd->ops = NULL;
+ if (icl->free_bus)
+ icl->free_bus(icl);
+ kfree(priv);
+
+ return 0;
+}
+
+static const struct i2c_device_id ov5642_id[] = {
+ { "ov5642", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ov5642_id);
+
+static struct i2c_driver ov5642_i2c_driver = {
+ .driver = {
+ .name = "ov5642",
+ },
+ .probe = ov5642_probe,
+ .remove = ov5642_remove,
+ .id_table = ov5642_id,
+};
+
+static int __init ov5642_mod_init(void)
+{
+ return i2c_add_driver(&ov5642_i2c_driver);
+}
+
+static void __exit ov5642_mod_exit(void)
+{
+ i2c_del_driver(&ov5642_i2c_driver);
+}
+
+module_init(ov5642_mod_init);
+module_exit(ov5642_mod_exit);
+
+MODULE_DESCRIPTION("Omnivision OV5642 Camera driver");
+MODULE_AUTHOR("Bastian Hecht <hechtb@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index d4e7c11..8aa0585 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -19,8 +19,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-mediabus.h>
-
-#include "ov7670.h"
+#include <media/ov7670.h>
MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
MODULE_DESCRIPTION("A low-level driver for OmniVision ov7670 sensors");
diff --git a/drivers/media/video/ov7670.h b/drivers/media/video/ov7670.h
deleted file mode 100644
index b133bc1..0000000
--- a/drivers/media/video/ov7670.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * A V4L2 driver for OmniVision OV7670 cameras.
- *
- * Copyright 2010 One Laptop Per Child
- *
- * This file may be distributed under the terms of the GNU General
- * Public License, version 2.
- */
-
-#ifndef __OV7670_H
-#define __OV7670_H
-
-struct ov7670_config {
- int min_width; /* Filter out smaller sizes */
- int min_height; /* Filter out smaller sizes */
- int clock_speed; /* External clock speed (MHz) */
- bool use_smbus; /* Use smbus I/O instead of I2C */
-};
-
-#endif
diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c
index 48895ef..397870f 100644
--- a/drivers/media/video/ov772x.c
+++ b/drivers/media/video/ov772x.c
@@ -1032,13 +1032,9 @@ static int ov772x_video_probe(struct soc_camera_device *icd,
u8 pid, ver;
const char *devname;
- /*
- * We must have a parent by now. And it cannot be a wrong one.
- * So this entire test is completely redundant.
- */
- if (!icd->dev.parent ||
- to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
- return -ENODEV;
+ /* We must have a parent by now. And it cannot be a wrong one. */
+ BUG_ON(!icd->parent ||
+ to_soc_camera_host(icd->parent)->nr != icd->iface);
/*
* check and show product ID and manufacturer ID
diff --git a/drivers/media/video/ov9640.c b/drivers/media/video/ov9640.c
index 5173ac4..3681a6f 100644
--- a/drivers/media/video/ov9640.c
+++ b/drivers/media/video/ov9640.c
@@ -657,16 +657,9 @@ static int ov9640_video_probe(struct soc_camera_device *icd,
const char *devname;
int ret = 0;
- /*
- * We must have a parent by now. And it cannot be a wrong one.
- * So this entire test is completely redundant.
- */
- if (!icd->dev.parent ||
- to_soc_camera_host(icd->dev.parent)->nr != icd->iface) {
- dev_err(&client->dev, "Parent missing or invalid!\n");
- ret = -ENODEV;
- goto err;
- }
+ /* We must have a parent by now. And it cannot be a wrong one. */
+ BUG_ON(!icd->parent ||
+ to_soc_camera_host(icd->parent)->nr != icd->iface);
/*
* check and show product ID and manufacturer ID
diff --git a/drivers/media/video/ov9740.c b/drivers/media/video/ov9740.c
index 4d4ee4f..edd1ffc 100644
--- a/drivers/media/video/ov9740.c
+++ b/drivers/media/video/ov9740.c
@@ -44,12 +44,12 @@
#define OV9740_Y_ADDR_START_LO 0x0347
#define OV9740_X_ADDR_END_HI 0x0348
#define OV9740_X_ADDR_END_LO 0x0349
-#define OV9740_Y_ADDR_END_HI 0x034A
-#define OV9740_Y_ADDR_END_LO 0x034B
-#define OV9740_X_OUTPUT_SIZE_HI 0x034C
-#define OV9740_X_OUTPUT_SIZE_LO 0x034D
-#define OV9740_Y_OUTPUT_SIZE_HI 0x034E
-#define OV9740_Y_OUTPUT_SIZE_LO 0x034F
+#define OV9740_Y_ADDR_END_HI 0x034a
+#define OV9740_Y_ADDR_END_LO 0x034b
+#define OV9740_X_OUTPUT_SIZE_HI 0x034c
+#define OV9740_X_OUTPUT_SIZE_LO 0x034d
+#define OV9740_Y_OUTPUT_SIZE_HI 0x034e
+#define OV9740_Y_OUTPUT_SIZE_LO 0x034f
/* IO Control Registers */
#define OV9740_IO_CREL00 0x3002
@@ -68,6 +68,7 @@
#define OV9740_ANALOG_CTRL04 0x3604
#define OV9740_ANALOG_CTRL10 0x3610
#define OV9740_ANALOG_CTRL12 0x3612
+#define OV9740_ANALOG_CTRL15 0x3615
#define OV9740_ANALOG_CTRL20 0x3620
#define OV9740_ANALOG_CTRL21 0x3621
#define OV9740_ANALOG_CTRL22 0x3622
@@ -89,28 +90,28 @@
#define OV9740_TIMING_CTRL35 0x3835
/* Banding Filter */
-#define OV9740_AEC_MAXEXPO_60_H 0x3A02
-#define OV9740_AEC_MAXEXPO_60_L 0x3A03
-#define OV9740_AEC_B50_STEP_HI 0x3A08
-#define OV9740_AEC_B50_STEP_LO 0x3A09
-#define OV9740_AEC_B60_STEP_HI 0x3A0A
-#define OV9740_AEC_B60_STEP_LO 0x3A0B
-#define OV9740_AEC_CTRL0D 0x3A0D
-#define OV9740_AEC_CTRL0E 0x3A0E
-#define OV9740_AEC_MAXEXPO_50_H 0x3A14
-#define OV9740_AEC_MAXEXPO_50_L 0x3A15
+#define OV9740_AEC_MAXEXPO_60_H 0x3a02
+#define OV9740_AEC_MAXEXPO_60_L 0x3a03
+#define OV9740_AEC_B50_STEP_HI 0x3a08
+#define OV9740_AEC_B50_STEP_LO 0x3a09
+#define OV9740_AEC_B60_STEP_HI 0x3a0a
+#define OV9740_AEC_B60_STEP_LO 0x3a0b
+#define OV9740_AEC_CTRL0D 0x3a0d
+#define OV9740_AEC_CTRL0E 0x3a0e
+#define OV9740_AEC_MAXEXPO_50_H 0x3a14
+#define OV9740_AEC_MAXEXPO_50_L 0x3a15
/* AEC/AGC Control */
#define OV9740_AEC_ENABLE 0x3503
-#define OV9740_GAIN_CEILING_01 0x3A18
-#define OV9740_GAIN_CEILING_02 0x3A19
-#define OV9740_AEC_HI_THRESHOLD 0x3A11
-#define OV9740_AEC_3A1A 0x3A1A
-#define OV9740_AEC_CTRL1B_WPT2 0x3A1B
-#define OV9740_AEC_CTRL0F_WPT 0x3A0F
-#define OV9740_AEC_CTRL10_BPT 0x3A10
-#define OV9740_AEC_CTRL1E_BPT2 0x3A1E
-#define OV9740_AEC_LO_THRESHOLD 0x3A1F
+#define OV9740_GAIN_CEILING_01 0x3a18
+#define OV9740_GAIN_CEILING_02 0x3a19
+#define OV9740_AEC_HI_THRESHOLD 0x3a11
+#define OV9740_AEC_3A1A 0x3a1a
+#define OV9740_AEC_CTRL1B_WPT2 0x3a1b
+#define OV9740_AEC_CTRL0F_WPT 0x3a0f
+#define OV9740_AEC_CTRL10_BPT 0x3a10
+#define OV9740_AEC_CTRL1E_BPT2 0x3a1e
+#define OV9740_AEC_LO_THRESHOLD 0x3a1f
/* BLC Control */
#define OV9740_BLC_AUTO_ENABLE 0x4002
@@ -132,7 +133,7 @@
#define OV9740_VT_SYS_CLK_DIV 0x0303
#define OV9740_VT_PIX_CLK_DIV 0x0301
#define OV9740_PLL_CTRL3010 0x3010
-#define OV9740_VFIFO_CTRL00 0x460E
+#define OV9740_VFIFO_CTRL00 0x460e
/* ISP Control */
#define OV9740_ISP_CTRL00 0x5000
@@ -141,9 +142,9 @@
#define OV9740_ISP_CTRL05 0x5005
#define OV9740_ISP_CTRL12 0x5012
#define OV9740_ISP_CTRL19 0x5019
-#define OV9740_ISP_CTRL1A 0x501A
-#define OV9740_ISP_CTRL1E 0x501E
-#define OV9740_ISP_CTRL1F 0x501F
+#define OV9740_ISP_CTRL1A 0x501a
+#define OV9740_ISP_CTRL1E 0x501e
+#define OV9740_ISP_CTRL1F 0x501f
#define OV9740_ISP_CTRL20 0x5020
#define OV9740_ISP_CTRL21 0x5021
@@ -158,12 +159,12 @@
#define OV9740_AWB_ADV_CTRL04 0x5187
#define OV9740_AWB_ADV_CTRL05 0x5188
#define OV9740_AWB_ADV_CTRL06 0x5189
-#define OV9740_AWB_ADV_CTRL07 0x518A
-#define OV9740_AWB_ADV_CTRL08 0x518B
-#define OV9740_AWB_ADV_CTRL09 0x518C
-#define OV9740_AWB_ADV_CTRL10 0x518D
-#define OV9740_AWB_ADV_CTRL11 0x518E
-#define OV9740_AWB_CTRL0F 0x518F
+#define OV9740_AWB_ADV_CTRL07 0x518a
+#define OV9740_AWB_ADV_CTRL08 0x518b
+#define OV9740_AWB_ADV_CTRL09 0x518c
+#define OV9740_AWB_ADV_CTRL10 0x518d
+#define OV9740_AWB_ADV_CTRL11 0x518e
+#define OV9740_AWB_CTRL0F 0x518f
#define OV9740_AWB_CTRL10 0x5190
#define OV9740_AWB_CTRL11 0x5191
#define OV9740_AWB_CTRL12 0x5192
@@ -180,27 +181,8 @@
#define OV9740_MIPI_CTRL_3012 0x3012
#define OV9740_SC_CMMM_MIPI_CTR 0x3014
-/* supported resolutions */
-enum {
- OV9740_VGA,
- OV9740_720P,
-};
-
-struct ov9740_resolution {
- unsigned int width;
- unsigned int height;
-};
-
-static struct ov9740_resolution ov9740_resolutions[] = {
- [OV9740_VGA] = {
- .width = 640,
- .height = 480,
- },
- [OV9740_720P] = {
- .width = 1280,
- .height = 720,
- },
-};
+#define OV9740_MAX_WIDTH 1280
+#define OV9740_MAX_HEIGHT 720
/* Misc. structures */
struct ov9740_reg {
@@ -219,9 +201,16 @@ struct ov9740_priv {
bool flag_vflip;
bool flag_hflip;
+
+ /* For suspend/resume. */
+ struct v4l2_mbus_framefmt current_mf;
+ bool current_enable;
};
static const struct ov9740_reg ov9740_defaults[] = {
+ /* Software Reset */
+ { OV9740_SOFTWARE_RESET, 0x01 },
+
/* Banding Filter */
{ OV9740_AEC_B50_STEP_HI, 0x00 },
{ OV9740_AEC_B50_STEP_LO, 0xe8 },
@@ -241,36 +230,36 @@ static const struct ov9740_reg ov9740_defaults[] = {
/* Un-documented OV9740 registers */
{ 0x5800, 0x29 }, { 0x5801, 0x25 }, { 0x5802, 0x20 }, { 0x5803, 0x21 },
{ 0x5804, 0x26 }, { 0x5805, 0x2e }, { 0x5806, 0x11 }, { 0x5807, 0x0c },
- { 0x5808, 0x09 }, { 0x5809, 0x0a }, { 0x580A, 0x0e }, { 0x580B, 0x16 },
- { 0x580C, 0x06 }, { 0x580D, 0x02 }, { 0x580E, 0x00 }, { 0x580F, 0x00 },
+ { 0x5808, 0x09 }, { 0x5809, 0x0a }, { 0x580a, 0x0e }, { 0x580b, 0x16 },
+ { 0x580c, 0x06 }, { 0x580d, 0x02 }, { 0x580e, 0x00 }, { 0x580f, 0x00 },
{ 0x5810, 0x04 }, { 0x5811, 0x0a }, { 0x5812, 0x05 }, { 0x5813, 0x02 },
{ 0x5814, 0x00 }, { 0x5815, 0x00 }, { 0x5816, 0x03 }, { 0x5817, 0x09 },
- { 0x5818, 0x0f }, { 0x5819, 0x0a }, { 0x581A, 0x07 }, { 0x581B, 0x08 },
- { 0x581C, 0x0b }, { 0x581D, 0x14 }, { 0x581E, 0x28 }, { 0x581F, 0x23 },
+ { 0x5818, 0x0f }, { 0x5819, 0x0a }, { 0x581a, 0x07 }, { 0x581b, 0x08 },
+ { 0x581c, 0x0b }, { 0x581d, 0x14 }, { 0x581e, 0x28 }, { 0x581f, 0x23 },
{ 0x5820, 0x1d }, { 0x5821, 0x1e }, { 0x5822, 0x24 }, { 0x5823, 0x2a },
{ 0x5824, 0x4f }, { 0x5825, 0x6f }, { 0x5826, 0x5f }, { 0x5827, 0x7f },
- { 0x5828, 0x9f }, { 0x5829, 0x5f }, { 0x582A, 0x8f }, { 0x582B, 0x9e },
- { 0x582C, 0x8f }, { 0x582D, 0x9f }, { 0x582E, 0x4f }, { 0x582F, 0x87 },
+ { 0x5828, 0x9f }, { 0x5829, 0x5f }, { 0x582a, 0x8f }, { 0x582b, 0x9e },
+ { 0x582c, 0x8f }, { 0x582d, 0x9f }, { 0x582e, 0x4f }, { 0x582f, 0x87 },
{ 0x5830, 0x86 }, { 0x5831, 0x97 }, { 0x5832, 0xae }, { 0x5833, 0x3f },
{ 0x5834, 0x8e }, { 0x5835, 0x7c }, { 0x5836, 0x7e }, { 0x5837, 0xaf },
- { 0x5838, 0x8f }, { 0x5839, 0x8f }, { 0x583A, 0x9f }, { 0x583B, 0x7f },
- { 0x583C, 0x5f },
+ { 0x5838, 0x8f }, { 0x5839, 0x8f }, { 0x583a, 0x9f }, { 0x583b, 0x7f },
+ { 0x583c, 0x5f },
/* Y Gamma */
{ 0x5480, 0x07 }, { 0x5481, 0x18 }, { 0x5482, 0x2c }, { 0x5483, 0x4e },
{ 0x5484, 0x5e }, { 0x5485, 0x6b }, { 0x5486, 0x77 }, { 0x5487, 0x82 },
- { 0x5488, 0x8c }, { 0x5489, 0x95 }, { 0x548A, 0xa4 }, { 0x548B, 0xb1 },
- { 0x548C, 0xc6 }, { 0x548D, 0xd8 }, { 0x548E, 0xe9 },
+ { 0x5488, 0x8c }, { 0x5489, 0x95 }, { 0x548a, 0xa4 }, { 0x548b, 0xb1 },
+ { 0x548c, 0xc6 }, { 0x548d, 0xd8 }, { 0x548e, 0xe9 },
/* UV Gamma */
{ 0x5490, 0x0f }, { 0x5491, 0xff }, { 0x5492, 0x0d }, { 0x5493, 0x05 },
{ 0x5494, 0x07 }, { 0x5495, 0x1a }, { 0x5496, 0x04 }, { 0x5497, 0x01 },
- { 0x5498, 0x03 }, { 0x5499, 0x53 }, { 0x549A, 0x02 }, { 0x549B, 0xeb },
- { 0x549C, 0x02 }, { 0x549D, 0xa0 }, { 0x549E, 0x02 }, { 0x549F, 0x67 },
- { 0x54A0, 0x02 }, { 0x54A1, 0x3b }, { 0x54A2, 0x02 }, { 0x54A3, 0x18 },
- { 0x54A4, 0x01 }, { 0x54A5, 0xe7 }, { 0x54A6, 0x01 }, { 0x54A7, 0xc3 },
- { 0x54A8, 0x01 }, { 0x54A9, 0x94 }, { 0x54AA, 0x01 }, { 0x54AB, 0x72 },
- { 0x54AC, 0x01 }, { 0x54AD, 0x57 },
+ { 0x5498, 0x03 }, { 0x5499, 0x53 }, { 0x549a, 0x02 }, { 0x549b, 0xeb },
+ { 0x549c, 0x02 }, { 0x549d, 0xa0 }, { 0x549e, 0x02 }, { 0x549f, 0x67 },
+ { 0x54a0, 0x02 }, { 0x54a1, 0x3b }, { 0x54a2, 0x02 }, { 0x54a3, 0x18 },
+ { 0x54a4, 0x01 }, { 0x54a5, 0xe7 }, { 0x54a6, 0x01 }, { 0x54a7, 0xc3 },
+ { 0x54a8, 0x01 }, { 0x54a9, 0x94 }, { 0x54aa, 0x01 }, { 0x54ab, 0x72 },
+ { 0x54ac, 0x01 }, { 0x54ad, 0x57 },
/* AWB */
{ OV9740_AWB_CTRL00, 0xf0 },
@@ -296,18 +285,18 @@ static const struct ov9740_reg ov9740_defaults[] = {
{ OV9740_AWB_CTRL14, 0x00 },
/* CIP */
- { 0x530D, 0x12 },
+ { 0x530d, 0x12 },
/* CMX */
{ 0x5380, 0x01 }, { 0x5381, 0x00 }, { 0x5382, 0x00 }, { 0x5383, 0x17 },
{ 0x5384, 0x00 }, { 0x5385, 0x01 }, { 0x5386, 0x00 }, { 0x5387, 0x00 },
- { 0x5388, 0x00 }, { 0x5389, 0xe0 }, { 0x538A, 0x00 }, { 0x538B, 0x20 },
- { 0x538C, 0x00 }, { 0x538D, 0x00 }, { 0x538E, 0x00 }, { 0x538F, 0x16 },
+ { 0x5388, 0x00 }, { 0x5389, 0xe0 }, { 0x538a, 0x00 }, { 0x538b, 0x20 },
+ { 0x538c, 0x00 }, { 0x538d, 0x00 }, { 0x538e, 0x00 }, { 0x538f, 0x16 },
{ 0x5390, 0x00 }, { 0x5391, 0x9c }, { 0x5392, 0x00 }, { 0x5393, 0xa0 },
{ 0x5394, 0x18 },
/* 50/60 Detection */
- { 0x3C0A, 0x9c }, { 0x3C0B, 0x3f },
+ { 0x3c0a, 0x9c }, { 0x3c0b, 0x3f },
/* Output Select */
{ OV9740_IO_OUTPUT_SEL01, 0x00 },
@@ -333,6 +322,7 @@ static const struct ov9740_reg ov9740_defaults[] = {
{ OV9740_ANALOG_CTRL10, 0xa1 },
{ OV9740_ANALOG_CTRL12, 0x24 },
{ OV9740_ANALOG_CTRL22, 0x9f },
+ { OV9740_ANALOG_CTRL15, 0xf0 },
/* Sensor Control */
{ OV9740_SENSOR_CTRL03, 0x42 },
@@ -385,7 +375,7 @@ static const struct ov9740_reg ov9740_defaults[] = {
{ OV9740_LN_LENGTH_PCK_LO, 0x62 },
/* MIPI Control */
- { OV9740_MIPI_CTRL00, 0x44 },
+ { OV9740_MIPI_CTRL00, 0x44 }, /* 0x64 for discontinuous clk */
{ OV9740_MIPI_3837, 0x01 },
{ OV9740_MIPI_CTRL01, 0x0f },
{ OV9740_MIPI_CTRL03, 0x05 },
@@ -393,54 +383,9 @@ static const struct ov9740_reg ov9740_defaults[] = {
{ OV9740_VFIFO_RD_CTRL, 0x16 },
{ OV9740_MIPI_CTRL_3012, 0x70 },
{ OV9740_SC_CMMM_MIPI_CTR, 0x01 },
-};
-
-static const struct ov9740_reg ov9740_regs_vga[] = {
- { OV9740_X_ADDR_START_HI, 0x00 },
- { OV9740_X_ADDR_START_LO, 0xa0 },
- { OV9740_Y_ADDR_START_HI, 0x00 },
- { OV9740_Y_ADDR_START_LO, 0x00 },
- { OV9740_X_ADDR_END_HI, 0x04 },
- { OV9740_X_ADDR_END_LO, 0x63 },
- { OV9740_Y_ADDR_END_HI, 0x02 },
- { OV9740_Y_ADDR_END_LO, 0xd3 },
- { OV9740_X_OUTPUT_SIZE_HI, 0x02 },
- { OV9740_X_OUTPUT_SIZE_LO, 0x80 },
- { OV9740_Y_OUTPUT_SIZE_HI, 0x01 },
- { OV9740_Y_OUTPUT_SIZE_LO, 0xe0 },
- { OV9740_ISP_CTRL1E, 0x03 },
- { OV9740_ISP_CTRL1F, 0xc0 },
- { OV9740_ISP_CTRL20, 0x02 },
- { OV9740_ISP_CTRL21, 0xd0 },
- { OV9740_VFIFO_READ_START_HI, 0x01 },
- { OV9740_VFIFO_READ_START_LO, 0x40 },
- { OV9740_ISP_CTRL00, 0xff },
- { OV9740_ISP_CTRL01, 0xff },
- { OV9740_ISP_CTRL03, 0xff },
-};
-static const struct ov9740_reg ov9740_regs_720p[] = {
- { OV9740_X_ADDR_START_HI, 0x00 },
- { OV9740_X_ADDR_START_LO, 0x00 },
- { OV9740_Y_ADDR_START_HI, 0x00 },
- { OV9740_Y_ADDR_START_LO, 0x00 },
- { OV9740_X_ADDR_END_HI, 0x05 },
- { OV9740_X_ADDR_END_LO, 0x03 },
- { OV9740_Y_ADDR_END_HI, 0x02 },
- { OV9740_Y_ADDR_END_LO, 0xd3 },
- { OV9740_X_OUTPUT_SIZE_HI, 0x05 },
- { OV9740_X_OUTPUT_SIZE_LO, 0x00 },
- { OV9740_Y_OUTPUT_SIZE_HI, 0x02 },
- { OV9740_Y_OUTPUT_SIZE_LO, 0xd0 },
- { OV9740_ISP_CTRL1E, 0x05 },
- { OV9740_ISP_CTRL1F, 0x00 },
- { OV9740_ISP_CTRL20, 0x02 },
- { OV9740_ISP_CTRL21, 0xd0 },
- { OV9740_VFIFO_READ_START_HI, 0x02 },
- { OV9740_VFIFO_READ_START_LO, 0x30 },
- { OV9740_ISP_CTRL00, 0xff },
- { OV9740_ISP_CTRL01, 0xef },
- { OV9740_ISP_CTRL03, 0xff },
+ /* YUYV order */
+ { OV9740_ISP_CTRL19, 0x02 },
};
static enum v4l2_mbus_pixelcode ov9740_codes[] = {
@@ -537,7 +482,8 @@ static int ov9740_reg_rmw(struct i2c_client *client, u16 reg, u8 set, u8 unset)
ret = ov9740_reg_read(client, reg, &val);
if (ret < 0) {
dev_err(&client->dev,
- "[Read]-Modify-Write of register %02x failed!\n", reg);
+ "[Read]-Modify-Write of register 0x%04x failed!\n",
+ reg);
return ret;
}
@@ -547,7 +493,8 @@ static int ov9740_reg_rmw(struct i2c_client *client, u16 reg, u8 set, u8 unset)
ret = ov9740_reg_write(client, reg, val);
if (ret < 0) {
dev_err(&client->dev,
- "Read-Modify-[Write] of register %02x failed!\n", reg);
+ "Read-Modify-[Write] of register 0x%04x failed!\n",
+ reg);
return ret;
}
@@ -608,6 +555,8 @@ static int ov9740_s_stream(struct v4l2_subdev *sd, int enable)
0x00);
}
+ priv->current_enable = enable;
+
return ret;
}
@@ -630,126 +579,127 @@ static unsigned long ov9740_query_bus_param(struct soc_camera_device *icd)
return soc_camera_apply_sensor_flags(icl, flags);
}
-/* Get status of additional camera capabilities */
-static int ov9740_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct ov9740_priv *priv = to_ov9740(sd);
-
- switch (ctrl->id) {
- case V4L2_CID_VFLIP:
- ctrl->value = priv->flag_vflip;
- break;
- case V4L2_CID_HFLIP:
- ctrl->value = priv->flag_hflip;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* Set status of additional camera capabilities */
-static int ov9740_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct ov9740_priv *priv = to_ov9740(sd);
-
- switch (ctrl->id) {
- case V4L2_CID_VFLIP:
- priv->flag_vflip = ctrl->value;
- break;
- case V4L2_CID_HFLIP:
- priv->flag_hflip = ctrl->value;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* Get chip identification */
-static int ov9740_g_chip_ident(struct v4l2_subdev *sd,
- struct v4l2_dbg_chip_ident *id)
+/* select nearest higher resolution for capture */
+static void ov9740_res_roundup(u32 *width, u32 *height)
{
- struct ov9740_priv *priv = to_ov9740(sd);
+ /* Width must be a multiple of 4 pixels. */
+ *width = ALIGN(*width, 4);
- id->ident = priv->ident;
- id->revision = priv->revision;
+ /* Max resolution is 1280x720 (720p). */
+ if (*width > OV9740_MAX_WIDTH)
+ *width = OV9740_MAX_WIDTH;
- return 0;
+ if (*height > OV9740_MAX_HEIGHT)
+ *height = OV9740_MAX_HEIGHT;
}
-#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int ov9740_get_register(struct v4l2_subdev *sd,
- struct v4l2_dbg_register *reg)
+/* Setup registers according to resolution and color encoding */
+static int ov9740_set_res(struct i2c_client *client, u32 width, u32 height)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u32 x_start;
+ u32 y_start;
+ u32 x_end;
+ u32 y_end;
+ bool scaling = 0;
+ u32 scale_input_x;
+ u32 scale_input_y;
int ret;
- u8 val;
-
- if (reg->reg & ~0xffff)
- return -EINVAL;
- reg->size = 2;
-
- ret = ov9740_reg_read(client, reg->reg, &val);
- if (ret)
- return ret;
-
- reg->val = (__u64)val;
+ if ((width != OV9740_MAX_WIDTH) || (height != OV9740_MAX_HEIGHT))
+ scaling = 1;
- return ret;
-}
-
-static int ov9740_set_register(struct v4l2_subdev *sd,
- struct v4l2_dbg_register *reg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
+ /*
+ * Try to use as much of the sensor area as possible when supporting
+ * smaller resolutions. Depending on the aspect ratio of the
+ * chosen resolution, we can either use the full width of the sensor,
+ * or the full height of the sensor (or both if the aspect ratio is
+ * the same as 1280x720.
+ */
+ if ((OV9740_MAX_WIDTH * height) > (OV9740_MAX_HEIGHT * width)) {
+ scale_input_x = (OV9740_MAX_HEIGHT * width) / height;
+ scale_input_y = OV9740_MAX_HEIGHT;
+ } else {
+ scale_input_x = OV9740_MAX_WIDTH;
+ scale_input_y = (OV9740_MAX_WIDTH * height) / width;
+ }
- if (reg->reg & ~0xffff || reg->val & ~0xff)
- return -EINVAL;
+ /* These describe the area of the sensor to use. */
+ x_start = (OV9740_MAX_WIDTH - scale_input_x) / 2;
+ y_start = (OV9740_MAX_HEIGHT - scale_input_y) / 2;
+ x_end = x_start + scale_input_x - 1;
+ y_end = y_start + scale_input_y - 1;
- return ov9740_reg_write(client, reg->reg, reg->val);
-}
-#endif
+ ret = ov9740_reg_write(client, OV9740_X_ADDR_START_HI, x_start >> 8);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_X_ADDR_START_LO, x_start & 0xff);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_Y_ADDR_START_HI, y_start >> 8);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_Y_ADDR_START_LO, y_start & 0xff);
+ if (ret)
+ goto done;
-/* select nearest higher resolution for capture */
-static void ov9740_res_roundup(u32 *width, u32 *height)
-{
- int i;
+ ret = ov9740_reg_write(client, OV9740_X_ADDR_END_HI, x_end >> 8);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_X_ADDR_END_LO, x_end & 0xff);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_Y_ADDR_END_HI, y_end >> 8);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_Y_ADDR_END_LO, y_end & 0xff);
+ if (ret)
+ goto done;
- for (i = 0; i < ARRAY_SIZE(ov9740_resolutions); i++)
- if ((ov9740_resolutions[i].width >= *width) &&
- (ov9740_resolutions[i].height >= *height)) {
- *width = ov9740_resolutions[i].width;
- *height = ov9740_resolutions[i].height;
- return;
- }
+ ret = ov9740_reg_write(client, OV9740_X_OUTPUT_SIZE_HI, width >> 8);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_X_OUTPUT_SIZE_LO, width & 0xff);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_Y_OUTPUT_SIZE_HI, height >> 8);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_Y_OUTPUT_SIZE_LO, height & 0xff);
+ if (ret)
+ goto done;
- *width = ov9740_resolutions[OV9740_720P].width;
- *height = ov9740_resolutions[OV9740_720P].height;
-}
+ ret = ov9740_reg_write(client, OV9740_ISP_CTRL1E, scale_input_x >> 8);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_ISP_CTRL1F, scale_input_x & 0xff);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_ISP_CTRL20, scale_input_y >> 8);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_ISP_CTRL21, scale_input_y & 0xff);
+ if (ret)
+ goto done;
-/* Setup registers according to resolution and color encoding */
-static int ov9740_set_res(struct i2c_client *client, u32 width)
-{
- int ret;
+ ret = ov9740_reg_write(client, OV9740_VFIFO_READ_START_HI,
+ (scale_input_x - width) >> 8);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_VFIFO_READ_START_LO,
+ (scale_input_x - width) & 0xff);
+ if (ret)
+ goto done;
- /* select register configuration for given resolution */
- if (width == ov9740_resolutions[OV9740_VGA].width) {
- dev_dbg(&client->dev, "Setting image size to 640x480\n");
- ret = ov9740_reg_write_array(client, ov9740_regs_vga,
- ARRAY_SIZE(ov9740_regs_vga));
- } else if (width == ov9740_resolutions[OV9740_720P].width) {
- dev_dbg(&client->dev, "Setting image size to 1280x720\n");
- ret = ov9740_reg_write_array(client, ov9740_regs_720p,
- ARRAY_SIZE(ov9740_regs_720p));
- } else {
- dev_err(&client->dev, "Failed to select resolution!\n");
- return -EINVAL;
- }
+ ret = ov9740_reg_write(client, OV9740_ISP_CTRL00, 0xff);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_ISP_CTRL01, 0xef |
+ (scaling << 4));
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_ISP_CTRL03, 0xff);
+done:
return ret;
}
@@ -758,6 +708,7 @@ static int ov9740_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov9740_priv *priv = to_ov9740(sd);
enum v4l2_colorspace cspace;
enum v4l2_mbus_pixelcode code = mf->code;
int ret;
@@ -777,13 +728,15 @@ static int ov9740_s_fmt(struct v4l2_subdev *sd,
if (ret < 0)
return ret;
- ret = ov9740_set_res(client, mf->width);
+ ret = ov9740_set_res(client, mf->width, mf->height);
if (ret < 0)
return ret;
mf->code = code;
mf->colorspace = cspace;
+ memcpy(&priv->current_mf, mf, sizeof(struct v4l2_mbus_framefmt));
+
return ret;
}
@@ -814,8 +767,8 @@ static int ov9740_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
{
a->bounds.left = 0;
a->bounds.top = 0;
- a->bounds.width = ov9740_resolutions[OV9740_720P].width;
- a->bounds.height = ov9740_resolutions[OV9740_720P].height;
+ a->bounds.width = OV9740_MAX_WIDTH;
+ a->bounds.height = OV9740_MAX_HEIGHT;
a->defrect = a->bounds;
a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
a->pixelaspect.numerator = 1;
@@ -828,13 +781,115 @@ static int ov9740_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
a->c.left = 0;
a->c.top = 0;
- a->c.width = ov9740_resolutions[OV9740_720P].width;
- a->c.height = ov9740_resolutions[OV9740_720P].height;
+ a->c.width = OV9740_MAX_WIDTH;
+ a->c.height = OV9740_MAX_HEIGHT;
a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
return 0;
}
+/* Get status of additional camera capabilities */
+static int ov9740_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+ struct ov9740_priv *priv = to_ov9740(sd);
+
+ switch (ctrl->id) {
+ case V4L2_CID_VFLIP:
+ ctrl->value = priv->flag_vflip;
+ break;
+ case V4L2_CID_HFLIP:
+ ctrl->value = priv->flag_hflip;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Set status of additional camera capabilities */
+static int ov9740_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+ struct ov9740_priv *priv = to_ov9740(sd);
+
+ switch (ctrl->id) {
+ case V4L2_CID_VFLIP:
+ priv->flag_vflip = ctrl->value;
+ break;
+ case V4L2_CID_HFLIP:
+ priv->flag_hflip = ctrl->value;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Get chip identification */
+static int ov9740_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+{
+ struct ov9740_priv *priv = to_ov9740(sd);
+
+ id->ident = priv->ident;
+ id->revision = priv->revision;
+
+ return 0;
+}
+
+static int ov9740_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct ov9740_priv *priv = to_ov9740(sd);
+
+ if (!priv->current_enable)
+ return 0;
+
+ if (on) {
+ ov9740_s_fmt(sd, &priv->current_mf);
+ ov9740_s_stream(sd, priv->current_enable);
+ } else {
+ ov9740_s_stream(sd, 0);
+ priv->current_enable = true;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int ov9740_get_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u8 val;
+
+ if (reg->reg & ~0xffff)
+ return -EINVAL;
+
+ reg->size = 2;
+
+ ret = ov9740_reg_read(client, reg->reg, &val);
+ if (ret)
+ return ret;
+
+ reg->val = (__u64)val;
+
+ return ret;
+}
+
+static int ov9740_set_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->reg & ~0xffff || reg->val & ~0xff)
+ return -EINVAL;
+
+ return ov9740_reg_write(client, reg->reg, reg->val);
+}
+#endif
+
static int ov9740_video_probe(struct soc_camera_device *icd,
struct i2c_client *client)
{
@@ -843,16 +898,9 @@ static int ov9740_video_probe(struct soc_camera_device *icd,
u8 modelhi, modello;
int ret;
- /*
- * We must have a parent by now. And it cannot be a wrong one.
- * So this entire test is completely redundant.
- */
- if (!icd->dev.parent ||
- to_soc_camera_host(icd->dev.parent)->nr != icd->iface) {
- dev_err(&client->dev, "Parent missing or invalid!\n");
- ret = -ENODEV;
- goto err;
- }
+ /* We must have a parent by now. And it cannot be a wrong one. */
+ BUG_ON(!icd->parent ||
+ to_soc_camera_host(icd->parent)->nr != icd->iface);
/*
* check and show product ID and manufacturer ID
@@ -901,24 +949,24 @@ static struct soc_camera_ops ov9740_ops = {
.num_controls = ARRAY_SIZE(ov9740_controls),
};
+static struct v4l2_subdev_video_ops ov9740_video_ops = {
+ .s_stream = ov9740_s_stream,
+ .s_mbus_fmt = ov9740_s_fmt,
+ .try_mbus_fmt = ov9740_try_fmt,
+ .enum_mbus_fmt = ov9740_enum_fmt,
+ .cropcap = ov9740_cropcap,
+ .g_crop = ov9740_g_crop,
+};
+
static struct v4l2_subdev_core_ops ov9740_core_ops = {
.g_ctrl = ov9740_g_ctrl,
.s_ctrl = ov9740_s_ctrl,
.g_chip_ident = ov9740_g_chip_ident,
+ .s_power = ov9740_s_power,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov9740_get_register,
.s_register = ov9740_set_register,
#endif
-
-};
-
-static struct v4l2_subdev_video_ops ov9740_video_ops = {
- .s_stream = ov9740_s_stream,
- .s_mbus_fmt = ov9740_s_fmt,
- .try_mbus_fmt = ov9740_try_fmt,
- .enum_mbus_fmt = ov9740_enum_fmt,
- .cropcap = ov9740_cropcap,
- .g_crop = ov9740_g_crop,
};
static struct v4l2_subdev_ops ov9740_subdev_ops = {
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index 7551907..e753b5e 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -28,7 +28,6 @@
#include <linux/mm.h>
#include <linux/ioport.h>
#include <linux/init.h>
-#include <linux/version.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <asm/io.h>
@@ -39,7 +38,7 @@
#include <media/v4l2-device.h>
MODULE_LICENSE("GPL");
-
+MODULE_VERSION("0.0.4");
#define MOTOROLA 1
#define PHILIPS2 2 /* SAA7191 */
@@ -678,7 +677,6 @@ static int pms_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, dev->v4l2_dev.name, sizeof(vcap->driver));
strlcpy(vcap->card, "Mediavision PMS", sizeof(vcap->card));
strlcpy(vcap->bus_info, "ISA", sizeof(vcap->bus_info));
- vcap->version = KERNEL_VERSION(0, 0, 3);
vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
return 0;
}
diff --git a/drivers/media/video/pvrusb2/pvrusb2-main.c b/drivers/media/video/pvrusb2/pvrusb2-main.c
index 2254194..c1d9bb6 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-main.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-main.c
@@ -168,6 +168,7 @@ module_exit(pvr_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.9.1");
/*
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index 3876114..e27f8ab 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -91,7 +91,7 @@ static struct v4l2_capability pvr_capability ={
.driver = "pvrusb2",
.card = "Hauppauge WinTV pvr-usb2",
.bus_info = "usb",
- .version = KERNEL_VERSION(0, 9, 0),
+ .version = LINUX_VERSION_CODE,
.capabilities = (V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
V4L2_CAP_READWRITE),
@@ -369,11 +369,6 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
break;
}
- case VIDIOC_S_AUDIO:
- {
- ret = -EINVAL;
- break;
- }
case VIDIOC_G_TUNER:
{
struct v4l2_tuner *vt = (struct v4l2_tuner *)arg;
@@ -850,7 +845,7 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
#endif
default :
- ret = -EINVAL;
+ ret = -ENOTTY;
break;
}
diff --git a/drivers/media/video/pwc/Kconfig b/drivers/media/video/pwc/Kconfig
index 8da42e4..d63d0a8 100644
--- a/drivers/media/video/pwc/Kconfig
+++ b/drivers/media/video/pwc/Kconfig
@@ -1,6 +1,7 @@
config USB_PWC
tristate "USB Philips Cameras"
depends on VIDEO_V4L2
+ select VIDEOBUF2_VMALLOC
---help---
Say Y or M here if you want to use one of these Philips & OEM
webcams:
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c
index 760b4de..3977add 100644
--- a/drivers/media/video/pwc/pwc-ctrl.c
+++ b/drivers/media/video/pwc/pwc-ctrl.c
@@ -3,6 +3,7 @@
video modes.
(C) 1999-2003 Nemosoft Unv.
(C) 2004-2006 Luc Saillard (luc@saillard.org)
+ (C) 2011 Hans de Goede <hdegoede@redhat.com>
NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
driver and thus may have bugs that are not present in the original version.
@@ -43,61 +44,12 @@
#include <asm/errno.h>
#include "pwc.h"
-#include "pwc-uncompress.h"
#include "pwc-kiara.h"
#include "pwc-timon.h"
#include "pwc-dec1.h"
#include "pwc-dec23.h"
-/* Request types: video */
-#define SET_LUM_CTL 0x01
-#define GET_LUM_CTL 0x02
-#define SET_CHROM_CTL 0x03
-#define GET_CHROM_CTL 0x04
-#define SET_STATUS_CTL 0x05
-#define GET_STATUS_CTL 0x06
-#define SET_EP_STREAM_CTL 0x07
-#define GET_EP_STREAM_CTL 0x08
-#define GET_XX_CTL 0x09
-#define SET_XX_CTL 0x0A
-#define GET_XY_CTL 0x0B
-#define SET_XY_CTL 0x0C
-#define SET_MPT_CTL 0x0D
-#define GET_MPT_CTL 0x0E
-
-/* Selectors for the Luminance controls [GS]ET_LUM_CTL */
-#define AGC_MODE_FORMATTER 0x2000
-#define PRESET_AGC_FORMATTER 0x2100
-#define SHUTTER_MODE_FORMATTER 0x2200
-#define PRESET_SHUTTER_FORMATTER 0x2300
-#define PRESET_CONTOUR_FORMATTER 0x2400
-#define AUTO_CONTOUR_FORMATTER 0x2500
-#define BACK_LIGHT_COMPENSATION_FORMATTER 0x2600
-#define CONTRAST_FORMATTER 0x2700
-#define DYNAMIC_NOISE_CONTROL_FORMATTER 0x2800
-#define FLICKERLESS_MODE_FORMATTER 0x2900
-#define AE_CONTROL_SPEED 0x2A00
-#define BRIGHTNESS_FORMATTER 0x2B00
-#define GAMMA_FORMATTER 0x2C00
-
-/* Selectors for the Chrominance controls [GS]ET_CHROM_CTL */
-#define WB_MODE_FORMATTER 0x1000
-#define AWB_CONTROL_SPEED_FORMATTER 0x1100
-#define AWB_CONTROL_DELAY_FORMATTER 0x1200
-#define PRESET_MANUAL_RED_GAIN_FORMATTER 0x1300
-#define PRESET_MANUAL_BLUE_GAIN_FORMATTER 0x1400
-#define COLOUR_MODE_FORMATTER 0x1500
-#define SATURATION_MODE_FORMATTER1 0x1600
-#define SATURATION_MODE_FORMATTER2 0x1700
-
-/* Selectors for the Status controls [GS]ET_STATUS_CTL */
-#define SAVE_USER_DEFAULTS_FORMATTER 0x0200
-#define RESTORE_USER_DEFAULTS_FORMATTER 0x0300
-#define RESTORE_FACTORY_DEFAULTS_FORMATTER 0x0400
-#define READ_AGC_FORMATTER 0x0500
-#define READ_SHUTTER_FORMATTER 0x0600
-#define READ_RED_GAIN_FORMATTER 0x0700
-#define READ_BLUE_GAIN_FORMATTER 0x0800
+/* Selectors for status controls used only in this file */
#define GET_STATUS_B00 0x0B00
#define SENSOR_TYPE_FORMATTER1 0x0C00
#define GET_STATUS_3000 0x3000
@@ -116,11 +68,6 @@
/* Formatters for the Video Endpoint controls [GS]ET_EP_STREAM_CTL */
#define VIDEO_OUTPUT_CONTROL_FORMATTER 0x0100
-/* Formatters for the motorized pan & tilt [GS]ET_MPT_CTL */
-#define PT_RELATIVE_CONTROL_FORMATTER 0x01
-#define PT_RESET_CONTROL_FORMATTER 0x02
-#define PT_STATUS_FORMATTER 0x03
-
static const char *size2name[PSZ_MAX] =
{
"subQCIF",
@@ -160,7 +107,7 @@ static void pwc_set_image_buffer_size(struct pwc_device *pdev);
/****************************************************************************/
static int _send_control_msg(struct pwc_device *pdev,
- u8 request, u16 value, int index, void *buf, int buflen, int timeout)
+ u8 request, u16 value, int index, void *buf, int buflen)
{
int rc;
void *kbuf = NULL;
@@ -177,7 +124,7 @@ static int _send_control_msg(struct pwc_device *pdev,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value,
index,
- kbuf, buflen, timeout);
+ kbuf, buflen, USB_CTRL_SET_TIMEOUT);
kfree(kbuf);
return rc;
@@ -197,9 +144,13 @@ static int recv_control_msg(struct pwc_device *pdev,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value,
pdev->vcinterface,
- kbuf, buflen, 500);
+ kbuf, buflen, USB_CTRL_GET_TIMEOUT);
memcpy(buf, kbuf, buflen);
kfree(kbuf);
+
+ if (rc < 0)
+ PWC_ERROR("recv_control_msg error %d req %02x val %04x\n",
+ rc, request, value);
return rc;
}
@@ -210,18 +161,16 @@ static inline int send_video_command(struct pwc_device *pdev,
SET_EP_STREAM_CTL,
VIDEO_OUTPUT_CONTROL_FORMATTER,
index,
- buf, buflen, 1000);
+ buf, buflen);
}
-static inline int send_control_msg(struct pwc_device *pdev,
+int send_control_msg(struct pwc_device *pdev,
u8 request, u16 value, void *buf, int buflen)
{
return _send_control_msg(pdev,
- request, value, pdev->vcinterface, buf, buflen, 500);
+ request, value, pdev->vcinterface, buf, buflen);
}
-
-
static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames)
{
unsigned char buf[3];
@@ -261,8 +210,11 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames)
PWC_DEBUG_MODULE("Failed to send video command... %d\n", ret);
return ret;
}
- if (pEntry->compressed && pdev->pixfmt == V4L2_PIX_FMT_YUV420)
- pwc_dec1_init(pdev->type, pdev->release, buf, pdev->decompress_data);
+ if (pEntry->compressed && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
+ ret = pwc_dec1_init(pdev, pdev->type, pdev->release, buf);
+ if (ret < 0)
+ return ret;
+ }
pdev->cmd_len = 3;
memcpy(pdev->cmd_buf, buf, 3);
@@ -321,8 +273,11 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames, i
if (ret < 0)
return ret;
- if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420)
- pwc_dec23_init(pdev, pdev->type, buf);
+ if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
+ ret = pwc_dec23_init(pdev, pdev->type, buf);
+ if (ret < 0)
+ return ret;
+ }
pdev->cmd_len = 13;
memcpy(pdev->cmd_buf, buf, 13);
@@ -394,8 +349,11 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames, i
if (ret < 0)
return ret;
- if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420)
- pwc_dec23_init(pdev, pdev->type, buf);
+ if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
+ ret = pwc_dec23_init(pdev, pdev->type, buf);
+ if (ret < 0)
+ return ret;
+ }
pdev->cmd_len = 12;
memcpy(pdev->cmd_buf, buf, 12);
@@ -452,6 +410,7 @@ int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, int frame
}
pdev->view.x = width;
pdev->view.y = height;
+ pdev->vcompression = compression;
pdev->frame_total_size = pdev->frame_size + pdev->frame_header_size + pdev->frame_trailer_size;
pwc_set_image_buffer_size(pdev);
PWC_DEBUG_SIZE("Set viewport to %dx%d, image size is %dx%d.\n", width, height, pwc_image_sizes[size].x, pwc_image_sizes[size].y);
@@ -511,13 +470,9 @@ unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned i
return ret;
}
-#define BLACK_Y 0
-#define BLACK_U 128
-#define BLACK_V 128
-
static void pwc_set_image_buffer_size(struct pwc_device *pdev)
{
- int i, factor = 0;
+ int factor = 0;
/* for V4L2_PIX_FMT_YUV420 */
switch (pdev->pixfmt) {
@@ -541,442 +496,108 @@ static void pwc_set_image_buffer_size(struct pwc_device *pdev)
*/
pdev->offset.x = ((pdev->view.x - pdev->image.x) / 2) & 0xFFFC;
pdev->offset.y = ((pdev->view.y - pdev->image.y) / 2) & 0xFFFE;
-
- /* Fill buffers with black colors */
- for (i = 0; i < pwc_mbufs; i++) {
- unsigned char *p = pdev->image_data + pdev->images[i].offset;
- memset(p, BLACK_Y, pdev->view.x * pdev->view.y);
- p += pdev->view.x * pdev->view.y;
- memset(p, BLACK_U, pdev->view.x * pdev->view.y/4);
- p += pdev->view.x * pdev->view.y/4;
- memset(p, BLACK_V, pdev->view.x * pdev->view.y/4);
- }
}
-
-
-/* BRIGHTNESS */
-
-int pwc_get_brightness(struct pwc_device *pdev)
+int pwc_get_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
{
- char buf;
int ret;
+ u8 buf;
- ret = recv_control_msg(pdev,
- GET_LUM_CTL, BRIGHTNESS_FORMATTER, &buf, sizeof(buf));
+ ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf));
if (ret < 0)
return ret;
- return buf;
-}
-int pwc_set_brightness(struct pwc_device *pdev, int value)
-{
- char buf;
-
- if (value < 0)
- value = 0;
- if (value > 0xffff)
- value = 0xffff;
- buf = (value >> 9) & 0x7f;
- return send_control_msg(pdev,
- SET_LUM_CTL, BRIGHTNESS_FORMATTER, &buf, sizeof(buf));
+ *data = buf;
+ return 0;
}
-/* CONTRAST */
-
-int pwc_get_contrast(struct pwc_device *pdev)
+int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data)
{
- char buf;
int ret;
- ret = recv_control_msg(pdev,
- GET_LUM_CTL, CONTRAST_FORMATTER, &buf, sizeof(buf));
+ ret = send_control_msg(pdev, request, value, &data, sizeof(data));
if (ret < 0)
return ret;
- return buf;
-}
-int pwc_set_contrast(struct pwc_device *pdev, int value)
-{
- char buf;
-
- if (value < 0)
- value = 0;
- if (value > 0xffff)
- value = 0xffff;
- buf = (value >> 10) & 0x3f;
- return send_control_msg(pdev,
- SET_LUM_CTL, CONTRAST_FORMATTER, &buf, sizeof(buf));
+ return 0;
}
-/* GAMMA */
-
-int pwc_get_gamma(struct pwc_device *pdev)
+int pwc_get_s8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
{
- char buf;
int ret;
+ s8 buf;
- ret = recv_control_msg(pdev,
- GET_LUM_CTL, GAMMA_FORMATTER, &buf, sizeof(buf));
+ ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf));
if (ret < 0)
return ret;
- return buf;
-}
-
-int pwc_set_gamma(struct pwc_device *pdev, int value)
-{
- char buf;
- if (value < 0)
- value = 0;
- if (value > 0xffff)
- value = 0xffff;
- buf = (value >> 11) & 0x1f;
- return send_control_msg(pdev,
- SET_LUM_CTL, GAMMA_FORMATTER, &buf, sizeof(buf));
-}
-
-
-/* SATURATION */
-
-/* return a value between [-100 , 100] */
-int pwc_get_saturation(struct pwc_device *pdev, int *value)
-{
- char buf;
- int ret, saturation_register;
-
- if (pdev->type < 675)
- return -EINVAL;
- if (pdev->type < 730)
- saturation_register = SATURATION_MODE_FORMATTER2;
- else
- saturation_register = SATURATION_MODE_FORMATTER1;
- ret = recv_control_msg(pdev,
- GET_CHROM_CTL, saturation_register, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *value = (signed)buf;
+ *data = buf;
return 0;
}
-/* @param value saturation color between [-100 , 100] */
-int pwc_set_saturation(struct pwc_device *pdev, int value)
+int pwc_get_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
{
- char buf;
- int saturation_register;
-
- if (pdev->type < 675)
- return -EINVAL;
- if (value < -100)
- value = -100;
- if (value > 100)
- value = 100;
- if (pdev->type < 730)
- saturation_register = SATURATION_MODE_FORMATTER2;
- else
- saturation_register = SATURATION_MODE_FORMATTER1;
- return send_control_msg(pdev,
- SET_CHROM_CTL, saturation_register, &buf, sizeof(buf));
-}
-
-/* AGC */
-
-int pwc_set_agc(struct pwc_device *pdev, int mode, int value)
-{
- char buf;
int ret;
+ u8 buf[2];
- if (mode)
- buf = 0x0; /* auto */
- else
- buf = 0xff; /* fixed */
-
- ret = send_control_msg(pdev,
- SET_LUM_CTL, AGC_MODE_FORMATTER, &buf, sizeof(buf));
-
- if (!mode && ret >= 0) {
- if (value < 0)
- value = 0;
- if (value > 0xffff)
- value = 0xffff;
- buf = (value >> 10) & 0x3F;
- ret = send_control_msg(pdev,
- SET_LUM_CTL, PRESET_AGC_FORMATTER, &buf, sizeof(buf));
- }
+ ret = recv_control_msg(pdev, request, value, buf, sizeof(buf));
if (ret < 0)
return ret;
+
+ *data = (buf[1] << 8) | buf[0];
return 0;
}
-int pwc_get_agc(struct pwc_device *pdev, int *value)
+int pwc_set_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, u16 data)
{
- unsigned char buf;
int ret;
+ u8 buf[2];
- ret = recv_control_msg(pdev,
- GET_LUM_CTL, AGC_MODE_FORMATTER, &buf, sizeof(buf));
+ buf[0] = data & 0xff;
+ buf[1] = data >> 8;
+ ret = send_control_msg(pdev, request, value, buf, sizeof(buf));
if (ret < 0)
return ret;
- if (buf != 0) { /* fixed */
- ret = recv_control_msg(pdev,
- GET_LUM_CTL, PRESET_AGC_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- if (buf > 0x3F)
- buf = 0x3F;
- *value = (buf << 10);
- }
- else { /* auto */
- ret = recv_control_msg(pdev,
- GET_STATUS_CTL, READ_AGC_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- /* Gah... this value ranges from 0x00 ... 0x9F */
- if (buf > 0x9F)
- buf = 0x9F;
- *value = -(48 + buf * 409);
- }
-
return 0;
}
-int pwc_set_shutter_speed(struct pwc_device *pdev, int mode, int value)
-{
- char buf[2];
- int speed, ret;
-
-
- if (mode)
- buf[0] = 0x0; /* auto */
- else
- buf[0] = 0xff; /* fixed */
-
- ret = send_control_msg(pdev,
- SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, 1);
-
- if (!mode && ret >= 0) {
- if (value < 0)
- value = 0;
- if (value > 0xffff)
- value = 0xffff;
-
- if (DEVICE_USE_CODEC2(pdev->type)) {
- /* speed ranges from 0x0 to 0x290 (656) */
- speed = (value / 100);
- buf[1] = speed >> 8;
- buf[0] = speed & 0xff;
- } else if (DEVICE_USE_CODEC3(pdev->type)) {
- /* speed seems to range from 0x0 to 0xff */
- buf[1] = 0;
- buf[0] = value >> 8;
- }
-
- ret = send_control_msg(pdev,
- SET_LUM_CTL, PRESET_SHUTTER_FORMATTER,
- &buf, sizeof(buf));
- }
- return ret;
-}
-
-/* This function is not exported to v4l1, so output values between 0 -> 256 */
-int pwc_get_shutter_speed(struct pwc_device *pdev, int *value)
+int pwc_button_ctrl(struct pwc_device *pdev, u16 value)
{
- unsigned char buf[2];
int ret;
- ret = recv_control_msg(pdev,
- GET_STATUS_CTL, READ_SHUTTER_FORMATTER, &buf, sizeof(buf));
+ ret = send_control_msg(pdev, SET_STATUS_CTL, value, NULL, 0);
if (ret < 0)
return ret;
- *value = buf[0] + (buf[1] << 8);
- if (DEVICE_USE_CODEC2(pdev->type)) {
- /* speed ranges from 0x0 to 0x290 (656) */
- *value *= 256/656;
- } else if (DEVICE_USE_CODEC3(pdev->type)) {
- /* speed seems to range from 0x0 to 0xff */
- }
+
return 0;
}
-
/* POWER */
-
-int pwc_camera_power(struct pwc_device *pdev, int power)
+void pwc_camera_power(struct pwc_device *pdev, int power)
{
char buf;
+ int r;
+
+ if (!pdev->power_save)
+ return;
if (pdev->type < 675 || (pdev->type < 730 && pdev->release < 6))
- return 0; /* Not supported by Nala or Timon < release 6 */
+ return; /* Not supported by Nala or Timon < release 6 */
if (power)
buf = 0x00; /* active */
else
buf = 0xFF; /* power save */
- return send_control_msg(pdev,
+ r = send_control_msg(pdev,
SET_STATUS_CTL, SET_POWER_SAVE_MODE_FORMATTER,
&buf, sizeof(buf));
-}
-
-
-
-/* private calls */
-
-int pwc_restore_user(struct pwc_device *pdev)
-{
- return send_control_msg(pdev,
- SET_STATUS_CTL, RESTORE_USER_DEFAULTS_FORMATTER, NULL, 0);
-}
-
-int pwc_save_user(struct pwc_device *pdev)
-{
- return send_control_msg(pdev,
- SET_STATUS_CTL, SAVE_USER_DEFAULTS_FORMATTER, NULL, 0);
-}
-
-int pwc_restore_factory(struct pwc_device *pdev)
-{
- return send_control_msg(pdev,
- SET_STATUS_CTL, RESTORE_FACTORY_DEFAULTS_FORMATTER, NULL, 0);
-}
-
- /* ************************************************* */
- /* Patch by Alvarado: (not in the original version */
-
- /*
- * the camera recognizes modes from 0 to 4:
- *
- * 00: indoor (incandescant lighting)
- * 01: outdoor (sunlight)
- * 02: fluorescent lighting
- * 03: manual
- * 04: auto
- */
-int pwc_set_awb(struct pwc_device *pdev, int mode)
-{
- char buf;
- int ret;
-
- if (mode < 0)
- mode = 0;
-
- if (mode > 4)
- mode = 4;
-
- buf = mode & 0x07; /* just the lowest three bits */
-
- ret = send_control_msg(pdev,
- SET_CHROM_CTL, WB_MODE_FORMATTER, &buf, sizeof(buf));
-
- if (ret < 0)
- return ret;
- return 0;
-}
-
-int pwc_get_awb(struct pwc_device *pdev)
-{
- unsigned char buf;
- int ret;
-
- ret = recv_control_msg(pdev,
- GET_CHROM_CTL, WB_MODE_FORMATTER, &buf, sizeof(buf));
-
- if (ret < 0)
- return ret;
- return buf;
-}
-
-int pwc_set_red_gain(struct pwc_device *pdev, int value)
-{
- unsigned char buf;
-
- if (value < 0)
- value = 0;
- if (value > 0xffff)
- value = 0xffff;
- /* only the msb is considered */
- buf = value >> 8;
- return send_control_msg(pdev,
- SET_CHROM_CTL, PRESET_MANUAL_RED_GAIN_FORMATTER,
- &buf, sizeof(buf));
-}
-
-int pwc_get_red_gain(struct pwc_device *pdev, int *value)
-{
- unsigned char buf;
- int ret;
-
- ret = recv_control_msg(pdev,
- GET_CHROM_CTL, PRESET_MANUAL_RED_GAIN_FORMATTER,
- &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *value = buf << 8;
- return 0;
-}
-
-
-int pwc_set_blue_gain(struct pwc_device *pdev, int value)
-{
- unsigned char buf;
-
- if (value < 0)
- value = 0;
- if (value > 0xffff)
- value = 0xffff;
- /* only the msb is considered */
- buf = value >> 8;
- return send_control_msg(pdev,
- SET_CHROM_CTL, PRESET_MANUAL_BLUE_GAIN_FORMATTER,
- &buf, sizeof(buf));
-}
-
-int pwc_get_blue_gain(struct pwc_device *pdev, int *value)
-{
- unsigned char buf;
- int ret;
-
- ret = recv_control_msg(pdev,
- GET_CHROM_CTL, PRESET_MANUAL_BLUE_GAIN_FORMATTER,
- &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *value = buf << 8;
- return 0;
-}
-
-/* The following two functions are different, since they only read the
- internal red/blue gains, which may be different from the manual
- gains set or read above.
- */
-static int pwc_read_red_gain(struct pwc_device *pdev, int *value)
-{
- unsigned char buf;
- int ret;
-
- ret = recv_control_msg(pdev,
- GET_STATUS_CTL, READ_RED_GAIN_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *value = buf << 8;
- return 0;
+ if (r < 0)
+ PWC_ERROR("Failed to power %s camera (%d)\n",
+ power ? "on" : "off", r);
}
-static int pwc_read_blue_gain(struct pwc_device *pdev, int *value)
-{
- unsigned char buf;
- int ret;
-
- ret = recv_control_msg(pdev,
- GET_STATUS_CTL, READ_BLUE_GAIN_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *value = buf << 8;
- return 0;
-}
-
-
static int pwc_set_wb_speed(struct pwc_device *pdev, int speed)
{
unsigned char buf;
@@ -1028,6 +649,7 @@ static int pwc_get_wb_delay(struct pwc_device *pdev, int *value)
int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
{
unsigned char buf[2];
+ int r;
if (pdev->type < 730)
return 0;
@@ -1045,8 +667,12 @@ int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
buf[0] = on_value;
buf[1] = off_value;
- return send_control_msg(pdev,
+ r = send_control_msg(pdev,
SET_STATUS_CTL, LED_FORMATTER, &buf, sizeof(buf));
+ if (r < 0)
+ PWC_ERROR("Failed to set LED on/off time (%d)\n", r);
+
+ return r;
}
static int pwc_get_leds(struct pwc_device *pdev, int *on_value, int *off_value)
@@ -1069,164 +695,6 @@ static int pwc_get_leds(struct pwc_device *pdev, int *on_value, int *off_value)
return 0;
}
-int pwc_set_contour(struct pwc_device *pdev, int contour)
-{
- unsigned char buf;
- int ret;
-
- if (contour < 0)
- buf = 0xff; /* auto contour on */
- else
- buf = 0x0; /* auto contour off */
- ret = send_control_msg(pdev,
- SET_LUM_CTL, AUTO_CONTOUR_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
-
- if (contour < 0)
- return 0;
- if (contour > 0xffff)
- contour = 0xffff;
-
- buf = (contour >> 10); /* contour preset is [0..3f] */
- ret = send_control_msg(pdev,
- SET_LUM_CTL, PRESET_CONTOUR_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- return 0;
-}
-
-int pwc_get_contour(struct pwc_device *pdev, int *contour)
-{
- unsigned char buf;
- int ret;
-
- ret = recv_control_msg(pdev,
- GET_LUM_CTL, AUTO_CONTOUR_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
-
- if (buf == 0) {
- /* auto mode off, query current preset value */
- ret = recv_control_msg(pdev,
- GET_LUM_CTL, PRESET_CONTOUR_FORMATTER,
- &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *contour = buf << 10;
- }
- else
- *contour = -1;
- return 0;
-}
-
-
-int pwc_set_backlight(struct pwc_device *pdev, int backlight)
-{
- unsigned char buf;
-
- if (backlight)
- buf = 0xff;
- else
- buf = 0x0;
- return send_control_msg(pdev,
- SET_LUM_CTL, BACK_LIGHT_COMPENSATION_FORMATTER,
- &buf, sizeof(buf));
-}
-
-int pwc_get_backlight(struct pwc_device *pdev, int *backlight)
-{
- int ret;
- unsigned char buf;
-
- ret = recv_control_msg(pdev,
- GET_LUM_CTL, BACK_LIGHT_COMPENSATION_FORMATTER,
- &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *backlight = !!buf;
- return 0;
-}
-
-int pwc_set_colour_mode(struct pwc_device *pdev, int colour)
-{
- unsigned char buf;
-
- if (colour)
- buf = 0xff;
- else
- buf = 0x0;
- return send_control_msg(pdev,
- SET_CHROM_CTL, COLOUR_MODE_FORMATTER, &buf, sizeof(buf));
-}
-
-int pwc_get_colour_mode(struct pwc_device *pdev, int *colour)
-{
- int ret;
- unsigned char buf;
-
- ret = recv_control_msg(pdev,
- GET_CHROM_CTL, COLOUR_MODE_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *colour = !!buf;
- return 0;
-}
-
-
-int pwc_set_flicker(struct pwc_device *pdev, int flicker)
-{
- unsigned char buf;
-
- if (flicker)
- buf = 0xff;
- else
- buf = 0x0;
- return send_control_msg(pdev,
- SET_LUM_CTL, FLICKERLESS_MODE_FORMATTER, &buf, sizeof(buf));
-}
-
-int pwc_get_flicker(struct pwc_device *pdev, int *flicker)
-{
- int ret;
- unsigned char buf;
-
- ret = recv_control_msg(pdev,
- GET_LUM_CTL, FLICKERLESS_MODE_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *flicker = !!buf;
- return 0;
-}
-
-int pwc_set_dynamic_noise(struct pwc_device *pdev, int noise)
-{
- unsigned char buf;
-
- if (noise < 0)
- noise = 0;
- if (noise > 3)
- noise = 3;
- buf = noise;
- return send_control_msg(pdev,
- SET_LUM_CTL, DYNAMIC_NOISE_CONTROL_FORMATTER,
- &buf, sizeof(buf));
-}
-
-int pwc_get_dynamic_noise(struct pwc_device *pdev, int *noise)
-{
- int ret;
- unsigned char buf;
-
- ret = recv_control_msg(pdev,
- GET_LUM_CTL, DYNAMIC_NOISE_CONTROL_FORMATTER,
- &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *noise = buf;
- return 0;
-}
-
static int _pwc_mpt_reset(struct pwc_device *pdev, int flags)
{
unsigned char buf;
@@ -1309,7 +777,7 @@ static int pwc_mpt_get_status(struct pwc_device *pdev, struct pwc_mpt_status *st
return 0;
}
-
+#ifdef CONFIG_USB_PWC_DEBUG
int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
{
unsigned char buf;
@@ -1332,7 +800,7 @@ int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
*sensor = buf;
return 0;
}
-
+#endif
/* End of Add-Ons */
/* ************************************************* */
@@ -1356,37 +824,41 @@ int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
/* copy local variable to arg */
#define ARG_OUT(ARG_name) /* nothing */
+/*
+ * Our ctrls use native values, but the old custom pwc ioctl interface expects
+ * values from 0 - 65535, define 2 helper functions to scale things. */
+static int pwc_ioctl_g_ctrl(struct v4l2_ctrl *ctrl)
+{
+ return v4l2_ctrl_g_ctrl(ctrl) * 65535 / ctrl->maximum;
+}
+
+static int pwc_ioctl_s_ctrl(struct v4l2_ctrl *ctrl, int val)
+{
+ return v4l2_ctrl_s_ctrl(ctrl, val * ctrl->maximum / 65535);
+}
+
long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
{
long ret = 0;
switch(cmd) {
case VIDIOCPWCRUSER:
- {
- if (pwc_restore_user(pdev))
- ret = -EINVAL;
+ ret = pwc_button_ctrl(pdev, RESTORE_USER_DEFAULTS_FORMATTER);
break;
- }
case VIDIOCPWCSUSER:
- {
- if (pwc_save_user(pdev))
- ret = -EINVAL;
+ ret = pwc_button_ctrl(pdev, SAVE_USER_DEFAULTS_FORMATTER);
break;
- }
case VIDIOCPWCFACTORY:
- {
- if (pwc_restore_factory(pdev))
- ret = -EINVAL;
+ ret = pwc_button_ctrl(pdev, RESTORE_FACTORY_DEFAULTS_FORMATTER);
break;
- }
case VIDIOCPWCSCQUAL:
{
ARG_DEF(int, qual)
- if (pdev->iso_init) {
+ if (vb2_is_streaming(&pdev->vb_queue)) {
ret = -EBUSY;
break;
}
@@ -1396,8 +868,6 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
ret = -EINVAL;
else
ret = pwc_set_video_mode(pdev, pdev->view.x, pdev->view.y, pdev->vframes, ARGR(qual), pdev->vsnapshot);
- if (ret >= 0)
- pdev->vcompression = ARGR(qual);
break;
}
@@ -1432,71 +902,59 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
case VIDIOCPWCSAGC:
{
ARG_DEF(int, agc)
-
ARG_IN(agc)
- if (pwc_set_agc(pdev, ARGR(agc) < 0 ? 1 : 0, ARGR(agc)))
- ret = -EINVAL;
+ ret = v4l2_ctrl_s_ctrl(pdev->autogain, ARGR(agc) < 0);
+ if (ret == 0 && ARGR(agc) >= 0)
+ ret = pwc_ioctl_s_ctrl(pdev->gain, ARGR(agc));
break;
}
case VIDIOCPWCGAGC:
{
ARG_DEF(int, agc)
-
- if (pwc_get_agc(pdev, ARGA(agc)))
- ret = -EINVAL;
+ if (v4l2_ctrl_g_ctrl(pdev->autogain))
+ ARGR(agc) = -1;
+ else
+ ARGR(agc) = pwc_ioctl_g_ctrl(pdev->gain);
ARG_OUT(agc)
break;
}
case VIDIOCPWCSSHUTTER:
{
- ARG_DEF(int, shutter_speed)
-
- ARG_IN(shutter_speed)
- ret = pwc_set_shutter_speed(pdev, ARGR(shutter_speed) < 0 ? 1 : 0, ARGR(shutter_speed));
+ ARG_DEF(int, shutter)
+ ARG_IN(shutter)
+ ret = v4l2_ctrl_s_ctrl(pdev->exposure_auto,
+ /* Menu idx 0 = auto, idx 1 = manual */
+ ARGR(shutter) >= 0);
+ if (ret == 0 && ARGR(shutter) >= 0)
+ ret = pwc_ioctl_s_ctrl(pdev->exposure, ARGR(shutter));
break;
}
case VIDIOCPWCSAWB:
{
ARG_DEF(struct pwc_whitebalance, wb)
-
ARG_IN(wb)
- ret = pwc_set_awb(pdev, ARGR(wb).mode);
- if (ret >= 0 && ARGR(wb).mode == PWC_WB_MANUAL) {
- pwc_set_red_gain(pdev, ARGR(wb).manual_red);
- pwc_set_blue_gain(pdev, ARGR(wb).manual_blue);
- }
+ ret = v4l2_ctrl_s_ctrl(pdev->auto_white_balance,
+ ARGR(wb).mode);
+ if (ret == 0 && ARGR(wb).mode == PWC_WB_MANUAL)
+ ret = pwc_ioctl_s_ctrl(pdev->red_balance,
+ ARGR(wb).manual_red);
+ if (ret == 0 && ARGR(wb).mode == PWC_WB_MANUAL)
+ ret = pwc_ioctl_s_ctrl(pdev->blue_balance,
+ ARGR(wb).manual_blue);
break;
}
case VIDIOCPWCGAWB:
{
ARG_DEF(struct pwc_whitebalance, wb)
-
- memset(ARGA(wb), 0, sizeof(struct pwc_whitebalance));
- ARGR(wb).mode = pwc_get_awb(pdev);
- if (ARGR(wb).mode < 0)
- ret = -EINVAL;
- else {
- if (ARGR(wb).mode == PWC_WB_MANUAL) {
- ret = pwc_get_red_gain(pdev, &ARGR(wb).manual_red);
- if (ret < 0)
- break;
- ret = pwc_get_blue_gain(pdev, &ARGR(wb).manual_blue);
- if (ret < 0)
- break;
- }
- if (ARGR(wb).mode == PWC_WB_AUTO) {
- ret = pwc_read_red_gain(pdev, &ARGR(wb).read_red);
- if (ret < 0)
- break;
- ret = pwc_read_blue_gain(pdev, &ARGR(wb).read_blue);
- if (ret < 0)
- break;
- }
- }
+ ARGR(wb).mode = v4l2_ctrl_g_ctrl(pdev->auto_white_balance);
+ ARGR(wb).manual_red = ARGR(wb).read_red =
+ pwc_ioctl_g_ctrl(pdev->red_balance);
+ ARGR(wb).manual_blue = ARGR(wb).read_blue =
+ pwc_ioctl_g_ctrl(pdev->blue_balance);
ARG_OUT(wb)
break;
}
@@ -1550,17 +1008,20 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
case VIDIOCPWCSCONTOUR:
{
ARG_DEF(int, contour)
-
ARG_IN(contour)
- ret = pwc_set_contour(pdev, ARGR(contour));
+ ret = v4l2_ctrl_s_ctrl(pdev->autocontour, ARGR(contour) < 0);
+ if (ret == 0 && ARGR(contour) >= 0)
+ ret = pwc_ioctl_s_ctrl(pdev->contour, ARGR(contour));
break;
}
case VIDIOCPWCGCONTOUR:
{
ARG_DEF(int, contour)
-
- ret = pwc_get_contour(pdev, ARGA(contour));
+ if (v4l2_ctrl_g_ctrl(pdev->autocontour))
+ ARGR(contour) = -1;
+ else
+ ARGR(contour) = pwc_ioctl_g_ctrl(pdev->contour);
ARG_OUT(contour)
break;
}
@@ -1568,17 +1029,15 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
case VIDIOCPWCSBACKLIGHT:
{
ARG_DEF(int, backlight)
-
ARG_IN(backlight)
- ret = pwc_set_backlight(pdev, ARGR(backlight));
+ ret = v4l2_ctrl_s_ctrl(pdev->backlight, ARGR(backlight));
break;
}
case VIDIOCPWCGBACKLIGHT:
{
ARG_DEF(int, backlight)
-
- ret = pwc_get_backlight(pdev, ARGA(backlight));
+ ARGR(backlight) = v4l2_ctrl_g_ctrl(pdev->backlight);
ARG_OUT(backlight)
break;
}
@@ -1586,17 +1045,15 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
case VIDIOCPWCSFLICKER:
{
ARG_DEF(int, flicker)
-
ARG_IN(flicker)
- ret = pwc_set_flicker(pdev, ARGR(flicker));
+ ret = v4l2_ctrl_s_ctrl(pdev->flicker, ARGR(flicker));
break;
}
case VIDIOCPWCGFLICKER:
{
ARG_DEF(int, flicker)
-
- ret = pwc_get_flicker(pdev, ARGA(flicker));
+ ARGR(flicker) = v4l2_ctrl_g_ctrl(pdev->flicker);
ARG_OUT(flicker)
break;
}
@@ -1604,17 +1061,15 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
case VIDIOCPWCSDYNNOISE:
{
ARG_DEF(int, dynnoise)
-
ARG_IN(dynnoise)
- ret = pwc_set_dynamic_noise(pdev, ARGR(dynnoise));
+ ret = v4l2_ctrl_s_ctrl(pdev->noise_reduction, ARGR(dynnoise));
break;
}
case VIDIOCPWCGDYNNOISE:
{
ARG_DEF(int, dynnoise)
-
- ret = pwc_get_dynamic_noise(pdev, ARGA(dynnoise));
+ ARGR(dynnoise) = v4l2_ctrl_g_ctrl(pdev->noise_reduction);
ARG_OUT(dynnoise);
break;
}
diff --git a/drivers/media/video/pwc/pwc-dec1.c b/drivers/media/video/pwc/pwc-dec1.c
index c29593f..be0e02c 100644
--- a/drivers/media/video/pwc/pwc-dec1.c
+++ b/drivers/media/video/pwc/pwc-dec1.c
@@ -22,29 +22,19 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-
-
-
#include "pwc-dec1.h"
-
-void pwc_dec1_init(int type, int release, void *buffer, void *table)
+int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer)
{
+ struct pwc_dec1_private *pdec;
-}
-
-void pwc_dec1_exit(void)
-{
+ if (pwc->decompress_data == NULL) {
+ pdec = kmalloc(sizeof(struct pwc_dec1_private), GFP_KERNEL);
+ if (pdec == NULL)
+ return -ENOMEM;
+ pwc->decompress_data = pdec;
+ }
+ pdec = pwc->decompress_data;
-
-
-}
-
-int pwc_dec1_alloc(struct pwc_device *pwc)
-{
- pwc->decompress_data = kmalloc(sizeof(struct pwc_dec1_private), GFP_KERNEL);
- if (pwc->decompress_data == NULL)
- return -ENOMEM;
return 0;
}
-
diff --git a/drivers/media/video/pwc/pwc-dec1.h b/drivers/media/video/pwc/pwc-dec1.h
index 8b62ddc..a57d860 100644
--- a/drivers/media/video/pwc/pwc-dec1.h
+++ b/drivers/media/video/pwc/pwc-dec1.h
@@ -22,8 +22,6 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-
-
#ifndef PWC_DEC1_H
#define PWC_DEC1_H
@@ -32,12 +30,8 @@
struct pwc_dec1_private
{
int version;
-
};
-int pwc_dec1_alloc(struct pwc_device *pwc);
-void pwc_dec1_init(int type, int release, void *buffer, void *private_data);
-void pwc_dec1_exit(void);
+int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer);
#endif
-
diff --git a/drivers/media/video/pwc/pwc-dec23.c b/drivers/media/video/pwc/pwc-dec23.c
index 0c801b8..06a4e87 100644
--- a/drivers/media/video/pwc/pwc-dec23.c
+++ b/drivers/media/video/pwc/pwc-dec23.c
@@ -916,27 +916,5 @@ void pwc_dec23_decompress(const struct pwc_device *pwc,
pout_planar_v += pwc->view.x;
}
-
}
-
}
-
-void pwc_dec23_exit(void)
-{
- /* Do nothing */
-
-}
-
-/**
- * Allocate a private structure used by lookup table.
- * You must call kfree() to free the memory allocated.
- */
-int pwc_dec23_alloc(struct pwc_device *pwc)
-{
- pwc->decompress_data = kmalloc(sizeof(struct pwc_dec23_private), GFP_KERNEL);
- if (pwc->decompress_data == NULL)
- return -ENOMEM;
- return 0;
-}
-
-/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
diff --git a/drivers/media/video/pwc/pwc-dec23.h b/drivers/media/video/pwc/pwc-dec23.h
index 1c55298..a0ac4f3 100644
--- a/drivers/media/video/pwc/pwc-dec23.h
+++ b/drivers/media/video/pwc/pwc-dec23.h
@@ -49,19 +49,9 @@ struct pwc_dec23_private
};
-
-int pwc_dec23_alloc(struct pwc_device *pwc);
int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd);
-void pwc_dec23_exit(void);
void pwc_dec23_decompress(const struct pwc_device *pwc,
const void *src,
void *dst,
int flags);
-
-
-
#endif
-
-
-/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
-
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index b0bde5a..51ca358 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -2,6 +2,7 @@
USB and Video4Linux interface part.
(C) 1999-2004 Nemosoft Unv.
(C) 2004-2006 Luc Saillard (luc@saillard.org)
+ (C) 2011 Hans de Goede <hdegoede@redhat.com>
NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
driver and thus may have bugs that are not present in the original version.
@@ -74,7 +75,6 @@
#include "pwc-timon.h"
#include "pwc-dec23.h"
#include "pwc-dec1.h"
-#include "pwc-uncompress.h"
/* Function prototypes and driver templates */
@@ -116,6 +116,7 @@ MODULE_DEVICE_TABLE(usb, pwc_device_table);
static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id *id);
static void usb_pwc_disconnect(struct usb_interface *intf);
+static void pwc_isoc_cleanup(struct pwc_device *pdev);
static struct usb_driver pwc_driver = {
.name = "Philips webcam", /* name */
@@ -127,14 +128,11 @@ static struct usb_driver pwc_driver = {
#define MAX_DEV_HINTS 20
#define MAX_ISOC_ERRORS 20
-static int default_size = PSZ_QCIF;
static int default_fps = 10;
-static int default_fbufs = 3; /* Default number of frame buffers */
- int pwc_mbufs = 2; /* Default number of mmap() buffers */
#ifdef CONFIG_USB_PWC_DEBUG
int pwc_trace = PWC_DEBUG_LEVEL;
#endif
-static int power_save;
+static int power_save = -1;
static int led_on = 100, led_off; /* defaults to LED that is on while in use */
static int pwc_preferred_compression = 1; /* 0..3 = uncompressed..high */
static struct {
@@ -173,389 +171,20 @@ static struct video_device pwc_template = {
/***************************************************************************/
/* Private functions */
-/* Here we want the physical address of the memory.
- * This is used when initializing the contents of the area.
- */
-
-
-
-static void *pwc_rvmalloc(unsigned long size)
-{
- void * mem;
- unsigned long adr;
-
- mem=vmalloc_32(size);
- if (!mem)
- return NULL;
-
- memset(mem, 0, size); /* Clear the ram out, no junk to the user */
- adr=(unsigned long) mem;
- while (size > 0)
- {
- SetPageReserved(vmalloc_to_page((void *)adr));
- adr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- return mem;
-}
-
-static void pwc_rvfree(void * mem, unsigned long size)
-{
- unsigned long adr;
-
- if (!mem)
- return;
-
- adr=(unsigned long) mem;
- while ((long) size > 0)
- {
- ClearPageReserved(vmalloc_to_page((void *)adr));
- adr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- vfree(mem);
-}
-
-
-
-
-static int pwc_allocate_buffers(struct pwc_device *pdev)
-{
- int i, err;
- void *kbuf;
-
- PWC_DEBUG_MEMORY(">> pwc_allocate_buffers(pdev = 0x%p)\n", pdev);
-
- if (pdev == NULL)
- return -ENXIO;
-
- /* Allocate Isochronuous pipe buffers */
- for (i = 0; i < MAX_ISO_BUFS; i++) {
- if (pdev->sbuf[i].data == NULL) {
- kbuf = kzalloc(ISO_BUFFER_SIZE, GFP_KERNEL);
- if (kbuf == NULL) {
- PWC_ERROR("Failed to allocate iso buffer %d.\n", i);
- return -ENOMEM;
- }
- PWC_DEBUG_MEMORY("Allocated iso buffer at %p.\n", kbuf);
- pdev->sbuf[i].data = kbuf;
- }
- }
-
- /* Allocate frame buffer structure */
- if (pdev->fbuf == NULL) {
- kbuf = kzalloc(default_fbufs * sizeof(struct pwc_frame_buf), GFP_KERNEL);
- if (kbuf == NULL) {
- PWC_ERROR("Failed to allocate frame buffer structure.\n");
- return -ENOMEM;
- }
- PWC_DEBUG_MEMORY("Allocated frame buffer structure at %p.\n", kbuf);
- pdev->fbuf = kbuf;
- }
-
- /* create frame buffers, and make circular ring */
- for (i = 0; i < default_fbufs; i++) {
- if (pdev->fbuf[i].data == NULL) {
- kbuf = vzalloc(PWC_FRAME_SIZE); /* need vmalloc since frame buffer > 128K */
- if (kbuf == NULL) {
- PWC_ERROR("Failed to allocate frame buffer %d.\n", i);
- return -ENOMEM;
- }
- PWC_DEBUG_MEMORY("Allocated frame buffer %d at %p.\n", i, kbuf);
- pdev->fbuf[i].data = kbuf;
- }
- }
-
- /* Allocate decompressor table space */
- if (DEVICE_USE_CODEC1(pdev->type))
- err = pwc_dec1_alloc(pdev);
- else
- err = pwc_dec23_alloc(pdev);
-
- if (err) {
- PWC_ERROR("Failed to allocate decompress table.\n");
- return err;
- }
-
- /* Allocate image buffer; double buffer for mmap() */
- kbuf = pwc_rvmalloc(pwc_mbufs * pdev->len_per_image);
- if (kbuf == NULL) {
- PWC_ERROR("Failed to allocate image buffer(s). needed (%d)\n",
- pwc_mbufs * pdev->len_per_image);
- return -ENOMEM;
- }
- PWC_DEBUG_MEMORY("Allocated image buffer at %p.\n", kbuf);
- pdev->image_data = kbuf;
- for (i = 0; i < pwc_mbufs; i++) {
- pdev->images[i].offset = i * pdev->len_per_image;
- pdev->images[i].vma_use_count = 0;
- }
- for (; i < MAX_IMAGES; i++) {
- pdev->images[i].offset = 0;
- }
-
- kbuf = NULL;
-
- PWC_DEBUG_MEMORY("<< pwc_allocate_buffers()\n");
- return 0;
-}
-
-static void pwc_free_buffers(struct pwc_device *pdev)
-{
- int i;
-
- PWC_DEBUG_MEMORY("Entering free_buffers(%p).\n", pdev);
-
- if (pdev == NULL)
- return;
- /* Release Iso-pipe buffers */
- for (i = 0; i < MAX_ISO_BUFS; i++)
- if (pdev->sbuf[i].data != NULL) {
- PWC_DEBUG_MEMORY("Freeing ISO buffer at %p.\n", pdev->sbuf[i].data);
- kfree(pdev->sbuf[i].data);
- pdev->sbuf[i].data = NULL;
- }
-
- /* The same for frame buffers */
- if (pdev->fbuf != NULL) {
- for (i = 0; i < default_fbufs; i++) {
- if (pdev->fbuf[i].data != NULL) {
- PWC_DEBUG_MEMORY("Freeing frame buffer %d at %p.\n", i, pdev->fbuf[i].data);
- vfree(pdev->fbuf[i].data);
- pdev->fbuf[i].data = NULL;
- }
- }
- kfree(pdev->fbuf);
- pdev->fbuf = NULL;
- }
-
- /* Intermediate decompression buffer & tables */
- if (pdev->decompress_data != NULL) {
- PWC_DEBUG_MEMORY("Freeing decompression buffer at %p.\n", pdev->decompress_data);
- kfree(pdev->decompress_data);
- pdev->decompress_data = NULL;
- }
-
- /* Release image buffers */
- if (pdev->image_data != NULL) {
- PWC_DEBUG_MEMORY("Freeing image buffer at %p.\n", pdev->image_data);
- pwc_rvfree(pdev->image_data, pwc_mbufs * pdev->len_per_image);
- }
- pdev->image_data = NULL;
-
- PWC_DEBUG_MEMORY("Leaving free_buffers().\n");
-}
-
-/* The frame & image buffer mess.
-
- Yes, this is a mess. Well, it used to be simple, but alas... In this
- module, 3 buffers schemes are used to get the data from the USB bus to
- the user program. The first scheme involves the ISO buffers (called thus
- since they transport ISO data from the USB controller), and not really
- interesting. Suffices to say the data from this buffer is quickly
- gathered in an interrupt handler (pwc_isoc_handler) and placed into the
- frame buffer.
-
- The frame buffer is the second scheme, and is the central element here.
- It collects the data from a single frame from the camera (hence, the
- name). Frames are delimited by the USB camera with a short USB packet,
- so that's easy to detect. The frame buffers form a list that is filled
- by the camera+USB controller and drained by the user process through
- either read() or mmap().
-
- The image buffer is the third scheme, in which frames are decompressed
- and converted into planar format. For mmap() there is more than
- one image buffer available.
-
- The frame buffers provide the image buffering. In case the user process
- is a bit slow, this introduces lag and some undesired side-effects.
- The problem arises when the frame buffer is full. I used to drop the last
- frame, which makes the data in the queue stale very quickly. But dropping
- the frame at the head of the queue proved to be a litte bit more difficult.
- I tried a circular linked scheme, but this introduced more problems than
- it solved.
-
- Because filling and draining are completely asynchronous processes, this
- requires some fiddling with pointers and mutexes.
-
- Eventually, I came up with a system with 2 lists: an 'empty' frame list
- and a 'full' frame list:
- * Initially, all frame buffers but one are on the 'empty' list; the one
- remaining buffer is our initial fill frame.
- * If a frame is needed for filling, we try to take it from the 'empty'
- list, unless that list is empty, in which case we take the buffer at
- the head of the 'full' list.
- * When our fill buffer has been filled, it is appended to the 'full'
- list.
- * If a frame is needed by read() or mmap(), it is taken from the head of
- the 'full' list, handled, and then appended to the 'empty' list. If no
- buffer is present on the 'full' list, we wait.
- The advantage is that the buffer that is currently being decompressed/
- converted, is on neither list, and thus not in our way (any other scheme
- I tried had the problem of old data lingering in the queue).
-
- Whatever strategy you choose, it always remains a tradeoff: with more
- frame buffers the chances of a missed frame are reduced. On the other
- hand, on slower machines it introduces lag because the queue will
- always be full.
- */
-
-/**
- \brief Find next frame buffer to fill. Take from empty or full list, whichever comes first.
- */
-static int pwc_next_fill_frame(struct pwc_device *pdev)
-{
- int ret;
- unsigned long flags;
-
- ret = 0;
- spin_lock_irqsave(&pdev->ptrlock, flags);
- if (pdev->fill_frame != NULL) {
- /* append to 'full' list */
- if (pdev->full_frames == NULL) {
- pdev->full_frames = pdev->fill_frame;
- pdev->full_frames_tail = pdev->full_frames;
- }
- else {
- pdev->full_frames_tail->next = pdev->fill_frame;
- pdev->full_frames_tail = pdev->fill_frame;
- }
- }
- if (pdev->empty_frames != NULL) {
- /* We have empty frames available. That's easy */
- pdev->fill_frame = pdev->empty_frames;
- pdev->empty_frames = pdev->empty_frames->next;
- }
- else {
- /* Hmm. Take it from the full list */
- /* sanity check */
- if (pdev->full_frames == NULL) {
- PWC_ERROR("Neither empty or full frames available!\n");
- spin_unlock_irqrestore(&pdev->ptrlock, flags);
- return -EINVAL;
- }
- pdev->fill_frame = pdev->full_frames;
- pdev->full_frames = pdev->full_frames->next;
- ret = 1;
- }
- pdev->fill_frame->next = NULL;
- spin_unlock_irqrestore(&pdev->ptrlock, flags);
- return ret;
-}
-
-
-/**
- \brief Reset all buffers, pointers and lists, except for the image_used[] buffer.
-
- If the image_used[] buffer is cleared too, mmap()/VIDIOCSYNC will run into trouble.
- */
-static void pwc_reset_buffers(struct pwc_device *pdev)
-{
- int i;
- unsigned long flags;
-
- PWC_DEBUG_MEMORY(">> %s __enter__\n", __func__);
-
- spin_lock_irqsave(&pdev->ptrlock, flags);
- pdev->full_frames = NULL;
- pdev->full_frames_tail = NULL;
- for (i = 0; i < default_fbufs; i++) {
- pdev->fbuf[i].filled = 0;
- if (i > 0)
- pdev->fbuf[i].next = &pdev->fbuf[i - 1];
- else
- pdev->fbuf->next = NULL;
- }
- pdev->empty_frames = &pdev->fbuf[default_fbufs - 1];
- pdev->empty_frames_tail = pdev->fbuf;
- pdev->read_frame = NULL;
- pdev->fill_frame = pdev->empty_frames;
- pdev->empty_frames = pdev->empty_frames->next;
-
- pdev->image_read_pos = 0;
- pdev->fill_image = 0;
- spin_unlock_irqrestore(&pdev->ptrlock, flags);
-
- PWC_DEBUG_MEMORY("<< %s __leaving__\n", __func__);
-}
-
-
-/**
- \brief Do all the handling for getting one frame: get pointer, decompress, advance pointers.
- */
-int pwc_handle_frame(struct pwc_device *pdev)
+struct pwc_frame_buf *pwc_get_next_fill_buf(struct pwc_device *pdev)
{
- int ret = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&pdev->ptrlock, flags);
- /* First grab our read_frame; this is removed from all lists, so
- we can release the lock after this without problems */
- if (pdev->read_frame != NULL) {
- /* This can't theoretically happen */
- PWC_ERROR("Huh? Read frame still in use?\n");
- spin_unlock_irqrestore(&pdev->ptrlock, flags);
- return ret;
- }
-
-
- if (pdev->full_frames == NULL) {
- PWC_ERROR("Woops. No frames ready.\n");
- }
- else {
- pdev->read_frame = pdev->full_frames;
- pdev->full_frames = pdev->full_frames->next;
- pdev->read_frame->next = NULL;
- }
-
- if (pdev->read_frame != NULL) {
- /* Decompression is a lengthy process, so it's outside of the lock.
- This gives the isoc_handler the opportunity to fill more frames
- in the mean time.
- */
- spin_unlock_irqrestore(&pdev->ptrlock, flags);
- ret = pwc_decompress(pdev);
- spin_lock_irqsave(&pdev->ptrlock, flags);
-
- /* We're done with read_buffer, tack it to the end of the empty buffer list */
- if (pdev->empty_frames == NULL) {
- pdev->empty_frames = pdev->read_frame;
- pdev->empty_frames_tail = pdev->empty_frames;
- }
- else {
- pdev->empty_frames_tail->next = pdev->read_frame;
- pdev->empty_frames_tail = pdev->read_frame;
- }
- pdev->read_frame = NULL;
- }
- spin_unlock_irqrestore(&pdev->ptrlock, flags);
- return ret;
-}
-
-/**
- \brief Advance pointers of image buffer (after each user request)
-*/
-void pwc_next_image(struct pwc_device *pdev)
-{
- pdev->image_used[pdev->fill_image] = 0;
- pdev->fill_image = (pdev->fill_image + 1) % pwc_mbufs;
-}
-
-/**
- * Print debug information when a frame is discarded because all of our buffer
- * is full
- */
-static void pwc_frame_dumped(struct pwc_device *pdev)
-{
- pdev->vframes_dumped++;
- if (pdev->vframe_count < FRAME_LOWMARK)
- return;
-
- if (pdev->vframes_dumped < 20)
- PWC_DEBUG_FLOW("Dumping frame %d\n", pdev->vframe_count);
- else if (pdev->vframes_dumped == 20)
- PWC_DEBUG_FLOW("Dumping frame %d (last message)\n",
- pdev->vframe_count);
+ unsigned long flags = 0;
+ struct pwc_frame_buf *buf = NULL;
+
+ spin_lock_irqsave(&pdev->queued_bufs_lock, flags);
+ if (list_empty(&pdev->queued_bufs))
+ goto leave;
+
+ buf = list_entry(pdev->queued_bufs.next, struct pwc_frame_buf, list);
+ list_del(&buf->list);
+leave:
+ spin_unlock_irqrestore(&pdev->queued_bufs_lock, flags);
+ return buf;
}
static void pwc_snapshot_button(struct pwc_device *pdev, int down)
@@ -575,9 +204,9 @@ static void pwc_snapshot_button(struct pwc_device *pdev, int down)
#endif
}
-static int pwc_rcv_short_packet(struct pwc_device *pdev, const struct pwc_frame_buf *fbuf)
+static void pwc_frame_complete(struct pwc_device *pdev)
{
- int awake = 0;
+ struct pwc_frame_buf *fbuf = pdev->fill_buf;
/* The ToUCam Fun CMOS sensor causes the firmware to send 2 or 3 bogus
frames on the USB wire after an exposure change. This conditition is
@@ -589,7 +218,6 @@ static int pwc_rcv_short_packet(struct pwc_device *pdev, const struct pwc_frame_
if (ptr[1] == 1 && ptr[0] & 0x10) {
PWC_TRACE("Hyundai CMOS sensor bug. Dropping frame.\n");
pdev->drop_frames += 2;
- pdev->vframes_error++;
}
if ((ptr[0] ^ pdev->vmirror) & 0x01) {
pwc_snapshot_button(pdev, ptr[0] & 0x01);
@@ -612,8 +240,7 @@ static int pwc_rcv_short_packet(struct pwc_device *pdev, const struct pwc_frame_
*/
if (fbuf->filled == 4)
pdev->drop_frames++;
- }
- else if (pdev->type == 740 || pdev->type == 720) {
+ } else if (pdev->type == 740 || pdev->type == 720) {
unsigned char *ptr = (unsigned char *)fbuf->data;
if ((ptr[0] ^ pdev->vmirror) & 0x01) {
pwc_snapshot_button(pdev, ptr[0] & 0x01);
@@ -621,33 +248,23 @@ static int pwc_rcv_short_packet(struct pwc_device *pdev, const struct pwc_frame_
pdev->vmirror = ptr[0] & 0x03;
}
- /* In case we were instructed to drop the frame, do so silently.
- The buffer pointers are not updated either (but the counters are reset below).
- */
- if (pdev->drop_frames > 0)
+ /* In case we were instructed to drop the frame, do so silently. */
+ if (pdev->drop_frames > 0) {
pdev->drop_frames--;
- else {
+ } else {
/* Check for underflow first */
if (fbuf->filled < pdev->frame_total_size) {
PWC_DEBUG_FLOW("Frame buffer underflow (%d bytes);"
" discarded.\n", fbuf->filled);
- pdev->vframes_error++;
- }
- else {
- /* Send only once per EOF */
- awake = 1; /* delay wake_ups */
-
- /* Find our next frame to fill. This will always succeed, since we
- * nick a frame from either empty or full list, but if we had to
- * take it from the full list, it means a frame got dropped.
- */
- if (pwc_next_fill_frame(pdev))
- pwc_frame_dumped(pdev);
-
+ } else {
+ fbuf->vb.v4l2_buf.field = V4L2_FIELD_NONE;
+ fbuf->vb.v4l2_buf.sequence = pdev->vframe_count;
+ vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE);
+ pdev->fill_buf = NULL;
+ pdev->vsync = 0;
}
} /* !drop_frames */
pdev->vframe_count++;
- return awake;
}
/* This gets called for the Isochronous pipe (video). This is done in
@@ -655,24 +272,20 @@ static int pwc_rcv_short_packet(struct pwc_device *pdev, const struct pwc_frame_
*/
static void pwc_isoc_handler(struct urb *urb)
{
- struct pwc_device *pdev;
+ struct pwc_device *pdev = (struct pwc_device *)urb->context;
int i, fst, flen;
- int awake;
- struct pwc_frame_buf *fbuf;
- unsigned char *fillptr = NULL, *iso_buf = NULL;
+ unsigned char *iso_buf = NULL;
- awake = 0;
- pdev = (struct pwc_device *)urb->context;
- if (pdev == NULL) {
- PWC_ERROR("isoc_handler() called with NULL device?!\n");
- return;
- }
-
- if (urb->status == -ENOENT || urb->status == -ECONNRESET) {
+ if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN) {
PWC_DEBUG_OPEN("URB (%p) unlinked %ssynchronuously.\n", urb, urb->status == -ENOENT ? "" : "a");
return;
}
- if (urb->status != -EINPROGRESS && urb->status != 0) {
+
+ if (pdev->fill_buf == NULL)
+ pdev->fill_buf = pwc_get_next_fill_buf(pdev);
+
+ if (urb->status != 0) {
const char *errmsg;
errmsg = "Unknown";
@@ -684,29 +297,21 @@ static void pwc_isoc_handler(struct urb *urb)
case -EILSEQ: errmsg = "CRC/Timeout (could be anything)"; break;
case -ETIME: errmsg = "Device does not respond"; break;
}
- PWC_DEBUG_FLOW("pwc_isoc_handler() called with status %d [%s].\n", urb->status, errmsg);
- /* Give up after a number of contiguous errors on the USB bus.
- Appearantly something is wrong so we simulate an unplug event.
- */
+ PWC_ERROR("pwc_isoc_handler() called with status %d [%s].\n",
+ urb->status, errmsg);
+ /* Give up after a number of contiguous errors */
if (++pdev->visoc_errors > MAX_ISOC_ERRORS)
{
- PWC_INFO("Too many ISOC errors, bailing out.\n");
- pdev->error_status = EIO;
- awake = 1;
- wake_up_interruptible(&pdev->frameq);
+ PWC_ERROR("Too many ISOC errors, bailing out.\n");
+ if (pdev->fill_buf) {
+ vb2_buffer_done(&pdev->fill_buf->vb,
+ VB2_BUF_STATE_ERROR);
+ pdev->fill_buf = NULL;
+ }
}
- goto handler_end; // ugly, but practical
- }
-
- fbuf = pdev->fill_frame;
- if (fbuf == NULL) {
- PWC_ERROR("pwc_isoc_handler without valid fill frame.\n");
- awake = 1;
+ pdev->vsync = 0; /* Drop the current frame */
goto handler_end;
}
- else {
- fillptr = fbuf->data + fbuf->filled;
- }
/* Reset ISOC error counter. We did get here, after all. */
pdev->visoc_errors = 0;
@@ -720,89 +325,73 @@ static void pwc_isoc_handler(struct urb *urb)
fst = urb->iso_frame_desc[i].status;
flen = urb->iso_frame_desc[i].actual_length;
iso_buf = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
- if (fst == 0) {
- if (flen > 0) { /* if valid data... */
- if (pdev->vsync > 0) { /* ...and we are not sync-hunting... */
- pdev->vsync = 2;
-
- /* ...copy data to frame buffer, if possible */
- if (flen + fbuf->filled > pdev->frame_total_size) {
- PWC_DEBUG_FLOW("Frame buffer overflow (flen = %d, frame_total_size = %d).\n", flen, pdev->frame_total_size);
- pdev->vsync = 0; /* Hmm, let's wait for an EOF (end-of-frame) */
- pdev->vframes_error++;
- }
- else {
- memmove(fillptr, iso_buf, flen);
- fillptr += flen;
- }
- }
+ if (fst != 0) {
+ PWC_ERROR("Iso frame %d has error %d\n", i, fst);
+ continue;
+ }
+ if (flen > 0 && pdev->vsync) {
+ struct pwc_frame_buf *fbuf = pdev->fill_buf;
+
+ if (pdev->vsync == 1) {
+ do_gettimeofday(&fbuf->vb.v4l2_buf.timestamp);
+ pdev->vsync = 2;
+ }
+
+ if (flen + fbuf->filled > pdev->frame_total_size) {
+ PWC_ERROR("Frame overflow (%d > %d)\n",
+ flen + fbuf->filled,
+ pdev->frame_total_size);
+ pdev->vsync = 0; /* Let's wait for an EOF */
+ } else {
+ memcpy(fbuf->data + fbuf->filled, iso_buf,
+ flen);
fbuf->filled += flen;
- } /* ..flen > 0 */
-
- if (flen < pdev->vlast_packet_size) {
- /* Shorter packet... We probably have the end of an image-frame;
- wake up read() process and let select()/poll() do something.
- Decompression is done in user time over there.
- */
- if (pdev->vsync == 2) {
- if (pwc_rcv_short_packet(pdev, fbuf)) {
- awake = 1;
- fbuf = pdev->fill_frame;
- }
- }
- fbuf->filled = 0;
- fillptr = fbuf->data;
+ }
+ }
+ if (flen < pdev->vlast_packet_size) {
+ /* Shorter packet... end of frame */
+ if (pdev->vsync == 2)
+ pwc_frame_complete(pdev);
+ if (pdev->fill_buf == NULL)
+ pdev->fill_buf = pwc_get_next_fill_buf(pdev);
+ if (pdev->fill_buf) {
+ pdev->fill_buf->filled = 0;
pdev->vsync = 1;
}
-
- pdev->vlast_packet_size = flen;
- } /* ..status == 0 */
- else {
- /* This is normally not interesting to the user, unless
- * you are really debugging something, default = 0 */
- static int iso_error;
- iso_error++;
- if (iso_error < 20)
- PWC_DEBUG_FLOW("Iso frame %d of USB has error %d\n", i, fst);
}
+ pdev->vlast_packet_size = flen;
}
handler_end:
- if (awake)
- wake_up_interruptible(&pdev->frameq);
-
- urb->dev = pdev->udev;
i = usb_submit_urb(urb, GFP_ATOMIC);
if (i != 0)
PWC_ERROR("Error (%d) re-submitting urb in pwc_isoc_handler.\n", i);
}
-
-int pwc_isoc_init(struct pwc_device *pdev)
+static int pwc_isoc_init(struct pwc_device *pdev)
{
struct usb_device *udev;
struct urb *urb;
int i, j, ret;
-
struct usb_interface *intf;
struct usb_host_interface *idesc = NULL;
- if (pdev == NULL)
- return -EFAULT;
if (pdev->iso_init)
return 0;
+
pdev->vsync = 0;
+ pdev->vlast_packet_size = 0;
+ pdev->fill_buf = NULL;
+ pdev->vframe_count = 0;
+ pdev->visoc_errors = 0;
udev = pdev->udev;
/* Get the current alternate interface, adjust packet size */
- if (!udev->actconfig)
- return -EFAULT;
intf = usb_ifnum_to_if(udev, 0);
if (intf)
idesc = usb_altnum_to_altsetting(intf, pdev->valternate);
-
if (!idesc)
- return -EFAULT;
+ return -EIO;
/* Search video endpoint */
pdev->vmax_packet_size = -1;
@@ -825,34 +414,32 @@ int pwc_isoc_init(struct pwc_device *pdev)
if (ret < 0)
return ret;
+ /* Allocate and init Isochronuous urbs */
for (i = 0; i < MAX_ISO_BUFS; i++) {
urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL);
if (urb == NULL) {
PWC_ERROR("Failed to allocate urb %d\n", i);
- ret = -ENOMEM;
- break;
+ pdev->iso_init = 1;
+ pwc_isoc_cleanup(pdev);
+ return -ENOMEM;
}
- pdev->sbuf[i].urb = urb;
+ pdev->urbs[i] = urb;
PWC_DEBUG_MEMORY("Allocated URB at 0x%p\n", urb);
- }
- if (ret) {
- /* De-allocate in reverse order */
- while (i--) {
- usb_free_urb(pdev->sbuf[i].urb);
- pdev->sbuf[i].urb = NULL;
- }
- return ret;
- }
-
- /* init URB structure */
- for (i = 0; i < MAX_ISO_BUFS; i++) {
- urb = pdev->sbuf[i].urb;
urb->interval = 1; // devik
urb->dev = udev;
urb->pipe = usb_rcvisocpipe(udev, pdev->vendpoint);
- urb->transfer_flags = URB_ISO_ASAP;
- urb->transfer_buffer = pdev->sbuf[i].data;
+ urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_buffer = usb_alloc_coherent(udev,
+ ISO_BUFFER_SIZE,
+ GFP_KERNEL,
+ &urb->transfer_dma);
+ if (urb->transfer_buffer == NULL) {
+ PWC_ERROR("Failed to allocate urb buffer %d\n", i);
+ pdev->iso_init = 1;
+ pwc_isoc_cleanup(pdev);
+ return -ENOMEM;
+ }
urb->transfer_buffer_length = ISO_BUFFER_SIZE;
urb->complete = pwc_isoc_handler;
urb->context = pdev;
@@ -866,14 +453,14 @@ int pwc_isoc_init(struct pwc_device *pdev)
/* link */
for (i = 0; i < MAX_ISO_BUFS; i++) {
- ret = usb_submit_urb(pdev->sbuf[i].urb, GFP_KERNEL);
+ ret = usb_submit_urb(pdev->urbs[i], GFP_KERNEL);
if (ret) {
PWC_ERROR("isoc_init() submit_urb %d failed with error %d\n", i, ret);
pdev->iso_init = 1;
pwc_isoc_cleanup(pdev);
return ret;
}
- PWC_DEBUG_MEMORY("URB 0x%p submitted.\n", pdev->sbuf[i].urb);
+ PWC_DEBUG_MEMORY("URB 0x%p submitted.\n", pdev->urbs[i]);
}
/* All is done... */
@@ -888,12 +475,9 @@ static void pwc_iso_stop(struct pwc_device *pdev)
/* Unlinking ISOC buffers one by one */
for (i = 0; i < MAX_ISO_BUFS; i++) {
- struct urb *urb;
-
- urb = pdev->sbuf[i].urb;
- if (urb) {
- PWC_DEBUG_MEMORY("Unlinking URB %p\n", urb);
- usb_kill_urb(urb);
+ if (pdev->urbs[i]) {
+ PWC_DEBUG_MEMORY("Unlinking URB %p\n", pdev->urbs[i]);
+ usb_kill_urb(pdev->urbs[i]);
}
}
}
@@ -904,40 +488,51 @@ static void pwc_iso_free(struct pwc_device *pdev)
/* Freeing ISOC buffers one by one */
for (i = 0; i < MAX_ISO_BUFS; i++) {
- struct urb *urb;
-
- urb = pdev->sbuf[i].urb;
- if (urb) {
+ if (pdev->urbs[i]) {
PWC_DEBUG_MEMORY("Freeing URB\n");
- usb_free_urb(urb);
- pdev->sbuf[i].urb = NULL;
+ if (pdev->urbs[i]->transfer_buffer) {
+ usb_free_coherent(pdev->udev,
+ pdev->urbs[i]->transfer_buffer_length,
+ pdev->urbs[i]->transfer_buffer,
+ pdev->urbs[i]->transfer_dma);
+ }
+ usb_free_urb(pdev->urbs[i]);
+ pdev->urbs[i] = NULL;
}
}
}
-void pwc_isoc_cleanup(struct pwc_device *pdev)
+static void pwc_isoc_cleanup(struct pwc_device *pdev)
{
PWC_DEBUG_OPEN(">> pwc_isoc_cleanup()\n");
- if (pdev == NULL)
- return;
+
if (pdev->iso_init == 0)
return;
pwc_iso_stop(pdev);
pwc_iso_free(pdev);
-
- /* Stop camera, but only if we are sure the camera is still there (unplug
- is signalled by EPIPE)
- */
- if (pdev->error_status != EPIPE) {
- PWC_DEBUG_OPEN("Setting alternate interface 0.\n");
- usb_set_interface(pdev->udev, 0, 0);
- }
+ usb_set_interface(pdev->udev, 0, 0);
pdev->iso_init = 0;
PWC_DEBUG_OPEN("<< pwc_isoc_cleanup()\n");
}
+/*
+ * Release all queued buffers, no need to take queued_bufs_lock, since all
+ * iso urbs have been killed when we're called so pwc_isoc_handler won't run.
+ */
+static void pwc_cleanup_queued_bufs(struct pwc_device *pdev)
+{
+ while (!list_empty(&pdev->queued_bufs)) {
+ struct pwc_frame_buf *buf;
+
+ buf = list_entry(pdev->queued_bufs.next, struct pwc_frame_buf,
+ list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+}
+
/*********
* sysfs
*********/
@@ -1051,98 +646,15 @@ static const char *pwc_sensor_type_to_string(unsigned int sensor_type)
static int pwc_video_open(struct file *file)
{
- int i, ret;
struct video_device *vdev = video_devdata(file);
struct pwc_device *pdev;
PWC_DEBUG_OPEN(">> video_open called(vdev = 0x%p).\n", vdev);
pdev = video_get_drvdata(vdev);
- BUG_ON(!pdev);
- if (pdev->vopen) {
- PWC_DEBUG_OPEN("I'm busy, someone is using the device.\n");
- return -EBUSY;
- }
-
- pwc_construct(pdev); /* set min/max sizes correct */
- if (!pdev->usb_init) {
- PWC_DEBUG_OPEN("Doing first time initialization.\n");
- pdev->usb_init = 1;
-
- /* Query sensor type */
- ret = pwc_get_cmos_sensor(pdev, &i);
- if (ret >= 0)
- {
- PWC_DEBUG_OPEN("This %s camera is equipped with a %s (%d).\n",
- pdev->vdev.name,
- pwc_sensor_type_to_string(i), i);
- }
- }
-
- /* Turn on camera */
- if (power_save) {
- i = pwc_camera_power(pdev, 1);
- if (i < 0)
- PWC_DEBUG_OPEN("Failed to restore power to the camera! (%d)\n", i);
- }
- /* Set LED on/off time */
- if (pwc_set_leds(pdev, led_on, led_off) < 0)
- PWC_DEBUG_OPEN("Failed to set LED on/off time.\n");
-
-
- /* So far, so good. Allocate memory. */
- i = pwc_allocate_buffers(pdev);
- if (i < 0) {
- PWC_DEBUG_OPEN("Failed to allocate buffers memory.\n");
- pwc_free_buffers(pdev);
- return i;
- }
-
- /* Reset buffers & parameters */
- pwc_reset_buffers(pdev);
- for (i = 0; i < pwc_mbufs; i++)
- pdev->image_used[i] = 0;
- pdev->vframe_count = 0;
- pdev->vframes_dumped = 0;
- pdev->vframes_error = 0;
- pdev->visoc_errors = 0;
- pdev->error_status = 0;
- pwc_construct(pdev); /* set min/max sizes correct */
-
- /* Set some defaults */
- pdev->vsnapshot = 0;
-
- /* Set video size, first try the last used video size
- (or the default one); if that fails try QCIF/10 or QSIF/10;
- it that fails too, give up.
- */
- i = pwc_set_video_mode(pdev, pwc_image_sizes[pdev->vsize].x, pwc_image_sizes[pdev->vsize].y, pdev->vframes, pdev->vcompression, 0);
- if (i) {
- unsigned int default_resolution;
- PWC_DEBUG_OPEN("First attempt at set_video_mode failed.\n");
- if (pdev->type>= 730)
- default_resolution = PSZ_QSIF;
- else
- default_resolution = PSZ_QCIF;
-
- i = pwc_set_video_mode(pdev,
- pwc_image_sizes[default_resolution].x,
- pwc_image_sizes[default_resolution].y,
- 10,
- pdev->vcompression,
- 0);
- }
- if (i) {
- PWC_DEBUG_OPEN("Second attempt at set_video_mode failed.\n");
- pwc_free_buffers(pdev);
- return i;
- }
-
- /* Initialize the webcam to sane value */
- pwc_set_brightness(pdev, 0x7fff);
- pwc_set_agc(pdev, 1, 0);
+ if (!pdev->udev)
+ return -ENODEV;
- pdev->vopen++;
file->private_data = vdev;
PWC_DEBUG_OPEN("<< video_open() returns 0.\n");
return 0;
@@ -1158,239 +670,211 @@ static void pwc_video_release(struct video_device *vfd)
if (device_hint[hint].pdev == pdev)
device_hint[hint].pdev = NULL;
+ /* Free intermediate decompression buffer & tables */
+ if (pdev->decompress_data != NULL) {
+ PWC_DEBUG_MEMORY("Freeing decompression buffer at %p.\n",
+ pdev->decompress_data);
+ kfree(pdev->decompress_data);
+ pdev->decompress_data = NULL;
+ }
+
+ v4l2_ctrl_handler_free(&pdev->ctrl_handler);
+
kfree(pdev);
}
-/* Note that all cleanup is done in the reverse order as in _open */
static int pwc_video_close(struct file *file)
{
struct video_device *vdev = file->private_data;
struct pwc_device *pdev;
- int i;
PWC_DEBUG_OPEN(">> video_close called(vdev = 0x%p).\n", vdev);
pdev = video_get_drvdata(vdev);
- if (pdev->vopen == 0)
- PWC_DEBUG_MODULE("video_close() called on closed device?\n");
-
- /* Dump statistics, but only if a reasonable amount of frames were
- processed (to prevent endless log-entries in case of snap-shot
- programs)
- */
- if (pdev->vframe_count > 20)
- PWC_DEBUG_MODULE("Closing video device: %d frames received, dumped %d frames, %d frames with errors.\n", pdev->vframe_count, pdev->vframes_dumped, pdev->vframes_error);
-
- if (DEVICE_USE_CODEC1(pdev->type))
- pwc_dec1_exit();
- else
- pwc_dec23_exit();
-
- pwc_isoc_cleanup(pdev);
- pwc_free_buffers(pdev);
-
- /* Turn off LEDS and power down camera, but only when not unplugged */
- if (!pdev->unplugged) {
- /* Turn LEDs off */
- if (pwc_set_leds(pdev, 0, 0) < 0)
- PWC_DEBUG_MODULE("Failed to set LED on/off time.\n");
- if (power_save) {
- i = pwc_camera_power(pdev, 0);
- if (i < 0)
- PWC_ERROR("Failed to power down camera (%d)\n", i);
- }
- pdev->vopen--;
- PWC_DEBUG_OPEN("<< video_close() vopen=%d\n", pdev->vopen);
+ if (pdev->capt_file == file) {
+ vb2_queue_release(&pdev->vb_queue);
+ pdev->capt_file = NULL;
}
+ PWC_DEBUG_OPEN("<< video_close()\n");
return 0;
}
-/*
- * FIXME: what about two parallel reads ????
- * ANSWER: Not supported. You can't open the device more than once,
- despite what the V4L1 interface says. First, I don't see
- the need, second there's no mechanism of alerting the
- 2nd/3rd/... process of events like changing image size.
- And I don't see the point of blocking that for the
- 2nd/3rd/... process.
- In multi-threaded environments reading parallel from any
- device is tricky anyhow.
- */
-
static ssize_t pwc_video_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
struct video_device *vdev = file->private_data;
- struct pwc_device *pdev;
- int noblock = file->f_flags & O_NONBLOCK;
- DECLARE_WAITQUEUE(wait, current);
- int bytes_to_read, rv = 0;
- void *image_buffer_addr;
-
- PWC_DEBUG_READ("pwc_video_read(vdev=0x%p, buf=%p, count=%zd) called.\n",
- vdev, buf, count);
- if (vdev == NULL)
- return -EFAULT;
- pdev = video_get_drvdata(vdev);
- if (pdev == NULL)
- return -EFAULT;
+ struct pwc_device *pdev = video_get_drvdata(vdev);
- if (pdev->error_status) {
- rv = -pdev->error_status; /* Something happened, report what. */
- goto err_out;
- }
+ if (!pdev->udev)
+ return -ENODEV;
- /* Start the stream (if not already started) */
- rv = pwc_isoc_init(pdev);
- if (rv)
- goto err_out;
-
- /* In case we're doing partial reads, we don't have to wait for a frame */
- if (pdev->image_read_pos == 0) {
- /* Do wait queueing according to the (doc)book */
- add_wait_queue(&pdev->frameq, &wait);
- while (pdev->full_frames == NULL) {
- /* Check for unplugged/etc. here */
- if (pdev->error_status) {
- remove_wait_queue(&pdev->frameq, &wait);
- set_current_state(TASK_RUNNING);
- rv = -pdev->error_status ;
- goto err_out;
- }
- if (noblock) {
- remove_wait_queue(&pdev->frameq, &wait);
- set_current_state(TASK_RUNNING);
- rv = -EWOULDBLOCK;
- goto err_out;
- }
- if (signal_pending(current)) {
- remove_wait_queue(&pdev->frameq, &wait);
- set_current_state(TASK_RUNNING);
- rv = -ERESTARTSYS;
- goto err_out;
- }
- mutex_unlock(&pdev->modlock);
- schedule();
- set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&pdev->modlock);
- }
- remove_wait_queue(&pdev->frameq, &wait);
- set_current_state(TASK_RUNNING);
+ if (pdev->capt_file != NULL &&
+ pdev->capt_file != file)
+ return -EBUSY;
- /* Decompress and release frame */
- if (pwc_handle_frame(pdev)) {
- rv = -EFAULT;
- goto err_out;
- }
- }
+ pdev->capt_file = file;
- PWC_DEBUG_READ("Copying data to user space.\n");
- if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
- bytes_to_read = pdev->frame_size + sizeof(struct pwc_raw_frame);
- else
- bytes_to_read = pdev->view.size;
-
- /* copy bytes to user space; we allow for partial reads */
- if (count + pdev->image_read_pos > bytes_to_read)
- count = bytes_to_read - pdev->image_read_pos;
- image_buffer_addr = pdev->image_data;
- image_buffer_addr += pdev->images[pdev->fill_image].offset;
- image_buffer_addr += pdev->image_read_pos;
- if (copy_to_user(buf, image_buffer_addr, count)) {
- rv = -EFAULT;
- goto err_out;
- }
- pdev->image_read_pos += count;
- if (pdev->image_read_pos >= bytes_to_read) { /* All data has been read */
- pdev->image_read_pos = 0;
- pwc_next_image(pdev);
- }
- return count;
-err_out:
- return rv;
+ return vb2_read(&pdev->vb_queue, buf, count, ppos,
+ file->f_flags & O_NONBLOCK);
}
static unsigned int pwc_video_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = file->private_data;
- struct pwc_device *pdev;
- int ret;
+ struct pwc_device *pdev = video_get_drvdata(vdev);
- if (vdev == NULL)
- return -EFAULT;
- pdev = video_get_drvdata(vdev);
- if (pdev == NULL)
- return -EFAULT;
+ if (!pdev->udev)
+ return POLL_ERR;
- /* Start the stream (if not already started) */
- ret = pwc_isoc_init(pdev);
- if (ret)
- return ret;
+ return vb2_poll(&pdev->vb_queue, file, wait);
+}
+
+static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct video_device *vdev = file->private_data;
+ struct pwc_device *pdev = video_get_drvdata(vdev);
+
+ if (pdev->capt_file != file)
+ return -EBUSY;
+
+ return vb2_mmap(&pdev->vb_queue, vma);
+}
+
+/***************************************************************************/
+/* Videobuf2 operations */
+
+static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned long sizes[],
+ void *alloc_ctxs[])
+{
+ struct pwc_device *pdev = vb2_get_drv_priv(vq);
+
+ if (*nbuffers < MIN_FRAMES)
+ *nbuffers = MIN_FRAMES;
+ else if (*nbuffers > MAX_FRAMES)
+ *nbuffers = MAX_FRAMES;
+
+ *nplanes = 1;
- poll_wait(file, &pdev->frameq, wait);
- if (pdev->error_status)
- return POLLERR;
- if (pdev->full_frames != NULL) /* we have frames waiting */
- return (POLLIN | POLLRDNORM);
+ sizes[0] = PAGE_ALIGN((pdev->abs_max.x * pdev->abs_max.y * 3) / 2);
return 0;
}
-static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma)
+static int buffer_init(struct vb2_buffer *vb)
{
- struct video_device *vdev = file->private_data;
- struct pwc_device *pdev;
- unsigned long start;
- unsigned long size;
- unsigned long page, pos = 0;
- int index;
+ struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
- PWC_DEBUG_MEMORY(">> %s\n", __func__);
- pdev = video_get_drvdata(vdev);
- size = vma->vm_end - vma->vm_start;
- start = vma->vm_start;
+ /* need vmalloc since frame buffer > 128K */
+ buf->data = vzalloc(PWC_FRAME_SIZE);
+ if (buf->data == NULL)
+ return -ENOMEM;
- /* Find the idx buffer for this mapping */
- for (index = 0; index < pwc_mbufs; index++) {
- pos = pdev->images[index].offset;
- if ((pos>>PAGE_SHIFT) == vma->vm_pgoff)
- break;
+ return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct pwc_device *pdev = vb2_get_drv_priv(vb->vb2_queue);
+
+ /* Don't allow queing new buffers after device disconnection */
+ if (!pdev->udev)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int buffer_finish(struct vb2_buffer *vb)
+{
+ struct pwc_device *pdev = vb2_get_drv_priv(vb->vb2_queue);
+ struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
+
+ /*
+ * Application has called dqbuf and is getting back a buffer we've
+ * filled, take the pwc data we've stored in buf->data and decompress
+ * it into a usable format, storing the result in the vb2_buffer
+ */
+ return pwc_decompress(pdev, buf);
+}
+
+static void buffer_cleanup(struct vb2_buffer *vb)
+{
+ struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
+
+ vfree(buf->data);
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct pwc_device *pdev = vb2_get_drv_priv(vb->vb2_queue);
+ struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&pdev->queued_bufs_lock, flags);
+ list_add_tail(&buf->list, &pdev->queued_bufs);
+ spin_unlock_irqrestore(&pdev->queued_bufs_lock, flags);
+}
+
+static int start_streaming(struct vb2_queue *vq)
+{
+ struct pwc_device *pdev = vb2_get_drv_priv(vq);
+
+ if (!pdev->udev)
+ return -ENODEV;
+
+ /* Turn on camera and set LEDS on */
+ pwc_camera_power(pdev, 1);
+ if (pdev->power_save) {
+ /* Restore video mode */
+ pwc_set_video_mode(pdev, pdev->view.x, pdev->view.y,
+ pdev->vframes, pdev->vcompression,
+ pdev->vsnapshot);
}
- if (index == MAX_IMAGES)
- return -EINVAL;
- if (index == 0) {
- /*
- * Special case for v4l1. In v4l1, we map only one big buffer,
- * but in v4l2 each buffer is mapped
- */
- unsigned long total_size;
- total_size = pwc_mbufs * pdev->len_per_image;
- if (size != pdev->len_per_image && size != total_size) {
- PWC_ERROR("Wrong size (%lu) needed to be len_per_image=%d or total_size=%lu\n",
- size, pdev->len_per_image, total_size);
- return -EINVAL;
- }
- } else if (size > pdev->len_per_image)
- return -EINVAL;
-
- vma->vm_flags |= VM_IO; /* from 2.6.9-acX */
-
- pos += (unsigned long)pdev->image_data;
- while (size > 0) {
- page = vmalloc_to_pfn((void *)pos);
- if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
- return -EAGAIN;
- start += PAGE_SIZE;
- pos += PAGE_SIZE;
- if (size > PAGE_SIZE)
- size -= PAGE_SIZE;
- else
- size = 0;
+ pwc_set_leds(pdev, led_on, led_off);
+
+ return pwc_isoc_init(pdev);
+}
+
+static int stop_streaming(struct vb2_queue *vq)
+{
+ struct pwc_device *pdev = vb2_get_drv_priv(vq);
+
+ if (pdev->udev) {
+ pwc_set_leds(pdev, 0, 0);
+ pwc_camera_power(pdev, 0);
+ pwc_isoc_cleanup(pdev);
}
+ pwc_cleanup_queued_bufs(pdev);
+
return 0;
}
+static void pwc_lock(struct vb2_queue *vq)
+{
+ struct pwc_device *pdev = vb2_get_drv_priv(vq);
+ mutex_lock(&pdev->modlock);
+}
+
+static void pwc_unlock(struct vb2_queue *vq)
+{
+ struct pwc_device *pdev = vb2_get_drv_priv(vq);
+ mutex_unlock(&pdev->modlock);
+}
+
+static struct vb2_ops pwc_vb_queue_ops = {
+ .queue_setup = queue_setup,
+ .buf_init = buffer_init,
+ .buf_prepare = buffer_prepare,
+ .buf_finish = buffer_finish,
+ .buf_cleanup = buffer_cleanup,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = pwc_unlock,
+ .wait_finish = pwc_lock,
+};
+
/***************************************************************************/
/* USB functions */
@@ -1406,6 +890,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
int hint, rc;
int features = 0;
int video_nr = -1; /* default: use next available device */
+ int my_power_save = power_save;
char serial_number[30], *name;
vendor_id = le16_to_cpu(udev->descriptor.idVendor);
@@ -1513,6 +998,8 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
PWC_INFO("Logitech QuickCam 4000 Pro USB webcam detected.\n");
name = "Logitech QuickCam Pro 4000";
type_id = 740; /* CCD sensor */
+ if (my_power_save == -1)
+ my_power_save = 1;
break;
case 0x08b3:
PWC_INFO("Logitech QuickCam Zoom USB webcam detected.\n");
@@ -1523,12 +1010,15 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
PWC_INFO("Logitech QuickCam Zoom (new model) USB webcam detected.\n");
name = "Logitech QuickCam Zoom";
type_id = 740; /* CCD sensor */
- power_save = 1;
+ if (my_power_save == -1)
+ my_power_save = 1;
break;
case 0x08b5:
PWC_INFO("Logitech QuickCam Orbit/Sphere USB webcam detected.\n");
name = "Logitech QuickCam Orbit";
type_id = 740; /* CCD sensor */
+ if (my_power_save == -1)
+ my_power_save = 1;
features |= FEATURE_MOTOR_PANTILT;
break;
case 0x08b6:
@@ -1583,6 +1073,8 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
PWC_INFO("Creative Labs Webcam 5 detected.\n");
name = "Creative Labs Webcam 5";
type_id = 730;
+ if (my_power_save == -1)
+ my_power_save = 1;
break;
case 0x4011:
PWC_INFO("Creative Labs Webcam Pro Ex detected.\n");
@@ -1640,6 +1132,9 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
else
return -ENODEV; /* Not any of the know types; but the list keeps growing. */
+ if (my_power_save == -1)
+ my_power_save = 0;
+
memset(serial_number, 0, 30);
usb_string(udev, udev->descriptor.iSerialNumber, serial_number, 29);
PWC_DEBUG_PROBE("Device serial number is %s\n", serial_number);
@@ -1654,7 +1149,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
return -ENOMEM;
}
pdev->type = type_id;
- pdev->vsize = default_size;
pdev->vframes = default_fps;
strcpy(pdev->serial, serial_number);
pdev->features = features;
@@ -1668,13 +1162,26 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pdev->angle_range.tilt_min = -3000;
pdev->angle_range.tilt_max = 2500;
}
+ pwc_construct(pdev); /* set min/max sizes correct */
mutex_init(&pdev->modlock);
- spin_lock_init(&pdev->ptrlock);
+ mutex_init(&pdev->udevlock);
+ spin_lock_init(&pdev->queued_bufs_lock);
+ INIT_LIST_HEAD(&pdev->queued_bufs);
pdev->udev = udev;
- init_waitqueue_head(&pdev->frameq);
pdev->vcompression = pwc_preferred_compression;
+ pdev->power_save = my_power_save;
+
+ /* Init videobuf2 queue structure */
+ memset(&pdev->vb_queue, 0, sizeof(pdev->vb_queue));
+ pdev->vb_queue.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ pdev->vb_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
+ pdev->vb_queue.drv_priv = pdev;
+ pdev->vb_queue.buf_struct_size = sizeof(struct pwc_frame_buf);
+ pdev->vb_queue.ops = &pwc_vb_queue_ops;
+ pdev->vb_queue.mem_ops = &vb2_vmalloc_memops;
+ vb2_queue_init(&pdev->vb_queue);
/* Init video_device structure */
memcpy(&pdev->vdev, &pwc_template, sizeof(pwc_template));
@@ -1707,14 +1214,40 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
PWC_DEBUG_PROBE("probe() function returning struct at 0x%p.\n", pdev);
usb_set_intfdata(intf, pdev);
+#ifdef CONFIG_USB_PWC_DEBUG
+ /* Query sensor type */
+ if (pwc_get_cmos_sensor(pdev, &rc) >= 0) {
+ PWC_DEBUG_OPEN("This %s camera is equipped with a %s (%d).\n",
+ pdev->vdev.name,
+ pwc_sensor_type_to_string(rc), rc);
+ }
+#endif
+
/* Set the leds off */
pwc_set_leds(pdev, 0, 0);
+
+ /* Setup intial videomode */
+ rc = pwc_set_video_mode(pdev, pdev->view_max.x, pdev->view_max.y,
+ pdev->vframes, pdev->vcompression, 0);
+ if (rc)
+ goto err_free_mem;
+
+ /* Register controls (and read default values from camera */
+ rc = pwc_init_controls(pdev);
+ if (rc) {
+ PWC_ERROR("Failed to register v4l2 controls (%d).\n", rc);
+ goto err_free_mem;
+ }
+
+ pdev->vdev.ctrl_handler = &pdev->ctrl_handler;
+
+ /* And powerdown the camera until streaming starts */
pwc_camera_power(pdev, 0);
rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, video_nr);
if (rc < 0) {
PWC_ERROR("Failed to register as video device (%d).\n", rc);
- goto err_free_mem;
+ goto err_free_controls;
}
rc = pwc_create_sysfs_files(pdev);
if (rc)
@@ -1757,7 +1290,10 @@ err_video_unreg:
if (hint < MAX_DEV_HINTS)
device_hint[hint].pdev = NULL;
video_unregister_device(&pdev->vdev);
+err_free_controls:
+ v4l2_ctrl_handler_free(&pdev->ctrl_handler);
err_free_mem:
+ usb_set_intfdata(intf, NULL);
kfree(pdev);
return rc;
}
@@ -1767,33 +1303,17 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
{
struct pwc_device *pdev = usb_get_intfdata(intf);
+ mutex_lock(&pdev->udevlock);
mutex_lock(&pdev->modlock);
- usb_set_intfdata (intf, NULL);
- if (pdev == NULL) {
- PWC_ERROR("pwc_disconnect() Called without private pointer.\n");
- goto disconnect_out;
- }
- if (pdev->udev == NULL) {
- PWC_ERROR("pwc_disconnect() already called for %p\n", pdev);
- goto disconnect_out;
- }
- if (pdev->udev != interface_to_usbdev(intf)) {
- PWC_ERROR("pwc_disconnect() Woops: pointer mismatch udev/pdev.\n");
- goto disconnect_out;
- }
-
- /* We got unplugged; this is signalled by an EPIPE error code */
- pdev->error_status = EPIPE;
- pdev->unplugged = 1;
-
- /* Alert waiting processes */
- wake_up_interruptible(&pdev->frameq);
+ usb_set_intfdata(intf, NULL);
/* No need to keep the urbs around after disconnection */
pwc_isoc_cleanup(pdev);
+ pwc_cleanup_queued_bufs(pdev);
+ pdev->udev = NULL;
-disconnect_out:
mutex_unlock(&pdev->modlock);
+ mutex_unlock(&pdev->udevlock);
pwc_remove_sysfs_files(pdev);
video_unregister_device(&pdev->vdev);
@@ -1809,36 +1329,27 @@ disconnect_out:
* Initialization code & module stuff
*/
-static char *size;
static int fps;
-static int fbufs;
-static int mbufs;
static int compression = -1;
static int leds[2] = { -1, -1 };
static unsigned int leds_nargs;
static char *dev_hint[MAX_DEV_HINTS];
static unsigned int dev_hint_nargs;
-module_param(size, charp, 0444);
module_param(fps, int, 0444);
-module_param(fbufs, int, 0444);
-module_param(mbufs, int, 0444);
#ifdef CONFIG_USB_PWC_DEBUG
module_param_named(trace, pwc_trace, int, 0644);
#endif
-module_param(power_save, int, 0444);
+module_param(power_save, int, 0644);
module_param(compression, int, 0444);
module_param_array(leds, int, &leds_nargs, 0444);
module_param_array(dev_hint, charp, &dev_hint_nargs, 0444);
-MODULE_PARM_DESC(size, "Initial image size. One of sqcif, qsif, qcif, sif, cif, vga");
MODULE_PARM_DESC(fps, "Initial frames per second. Varies with model, useful range 5-30");
-MODULE_PARM_DESC(fbufs, "Number of internal frame buffers to reserve");
-MODULE_PARM_DESC(mbufs, "Number of external (mmap()ed) image buffers");
#ifdef CONFIG_USB_PWC_DEBUG
MODULE_PARM_DESC(trace, "For debugging purposes");
#endif
-MODULE_PARM_DESC(power_save, "Turn power save feature in camera on or off");
+MODULE_PARM_DESC(power_save, "Turn power saving for new cameras on or off");
MODULE_PARM_DESC(compression, "Preferred compression quality. Range 0 (uncompressed) to 3 (high compression)");
MODULE_PARM_DESC(leds, "LED on,off time in milliseconds");
MODULE_PARM_DESC(dev_hint, "Device node hints");
@@ -1851,14 +1362,19 @@ MODULE_VERSION( PWC_VERSION );
static int __init usb_pwc_init(void)
{
- int i, sz;
- char *sizenames[PSZ_MAX] = { "sqcif", "qsif", "qcif", "sif", "cif", "vga" };
+ int i;
+#ifdef CONFIG_USB_PWC_DEBUG
PWC_INFO("Philips webcam module version " PWC_VERSION " loaded.\n");
PWC_INFO("Supports Philips PCA645/646, PCVC675/680/690, PCVC720[40]/730/740/750 & PCVC830/840.\n");
PWC_INFO("Also supports the Askey VC010, various Logitech Quickcams, Samsung MPC-C10 and MPC-C30,\n");
PWC_INFO("the Creative WebCam 5 & Pro Ex, SOTEC Afina Eye and Visionite VCS-UC300 and VCS-UM100.\n");
+ if (pwc_trace >= 0) {
+ PWC_DEBUG_MODULE("Trace options: 0x%04x\n", pwc_trace);
+ }
+#endif
+
if (fps) {
if (fps < 4 || fps > 30) {
PWC_ERROR("Framerate out of bounds (4-30).\n");
@@ -1868,41 +1384,6 @@ static int __init usb_pwc_init(void)
PWC_DEBUG_MODULE("Default framerate set to %d.\n", default_fps);
}
- if (size) {
- /* string; try matching with array */
- for (sz = 0; sz < PSZ_MAX; sz++) {
- if (!strcmp(sizenames[sz], size)) { /* Found! */
- default_size = sz;
- break;
- }
- }
- if (sz == PSZ_MAX) {
- PWC_ERROR("Size not recognized; try size=[sqcif | qsif | qcif | sif | cif | vga].\n");
- return -EINVAL;
- }
- PWC_DEBUG_MODULE("Default image size set to %s [%dx%d].\n", sizenames[default_size], pwc_image_sizes[default_size].x, pwc_image_sizes[default_size].y);
- }
- if (mbufs) {
- if (mbufs < 1 || mbufs > MAX_IMAGES) {
- PWC_ERROR("Illegal number of mmap() buffers; use a number between 1 and %d.\n", MAX_IMAGES);
- return -EINVAL;
- }
- pwc_mbufs = mbufs;
- PWC_DEBUG_MODULE("Number of image buffers set to %d.\n", pwc_mbufs);
- }
- if (fbufs) {
- if (fbufs < 2 || fbufs > MAX_FRAMES) {
- PWC_ERROR("Illegal number of frame buffers; use a number between 2 and %d.\n", MAX_FRAMES);
- return -EINVAL;
- }
- default_fbufs = fbufs;
- PWC_DEBUG_MODULE("Number of frame buffers set to %d.\n", default_fbufs);
- }
-#ifdef CONFIG_USB_PWC_DEBUG
- if (pwc_trace >= 0) {
- PWC_DEBUG_MODULE("Trace options: 0x%04x\n", pwc_trace);
- }
-#endif
if (compression >= 0) {
if (compression > 3) {
PWC_ERROR("Invalid compression setting; use a number between 0 (uncompressed) and 3 (high).\n");
@@ -1911,8 +1392,6 @@ static int __init usb_pwc_init(void)
pwc_preferred_compression = compression;
PWC_DEBUG_MODULE("Preferred compression set to %d.\n", pwc_preferred_compression);
}
- if (power_save)
- PWC_DEBUG_MODULE("Enabling power save on open/close.\n");
if (leds[0] >= 0)
led_on = leds[0];
if (leds[1] >= 0)
diff --git a/drivers/media/video/pwc/pwc-ioctl.h b/drivers/media/video/pwc/pwc-ioctl.h
deleted file mode 100644
index 8c0cae7..0000000
--- a/drivers/media/video/pwc/pwc-ioctl.h
+++ /dev/null
@@ -1,323 +0,0 @@
-#ifndef PWC_IOCTL_H
-#define PWC_IOCTL_H
-
-/* (C) 2001-2004 Nemosoft Unv.
- (C) 2004-2006 Luc Saillard (luc@saillard.org)
-
- NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
- driver and thus may have bugs that are not present in the original version.
- Please send bug reports and support requests to <luc@saillard.org>.
- The decompression routines have been implemented by reverse-engineering the
- Nemosoft binary pwcx module. Caveat emptor.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-
-/* This is pwc-ioctl.h belonging to PWC 10.0.10
- It contains structures and defines to communicate from user space
- directly to the driver.
- */
-
-/*
- Changes
- 2001/08/03 Alvarado Added ioctl constants to access methods for
- changing white balance and red/blue gains
- 2002/12/15 G. H. Fernandez-Toribio VIDIOCGREALSIZE
- 2003/12/13 Nemosft Unv. Some modifications to make interfacing to
- PWCX easier
- */
-
-/* These are private ioctl() commands, specific for the Philips webcams.
- They contain functions not found in other webcams, and settings not
- specified in the Video4Linux API.
-
- The #define names are built up like follows:
- VIDIOC VIDeo IOCtl prefix
- PWC Philps WebCam
- G optional: Get
- S optional: Set
- ... the function
- */
-
-#include <linux/types.h>
-#include <linux/version.h>
-
- /* Enumeration of image sizes */
-#define PSZ_SQCIF 0x00
-#define PSZ_QSIF 0x01
-#define PSZ_QCIF 0x02
-#define PSZ_SIF 0x03
-#define PSZ_CIF 0x04
-#define PSZ_VGA 0x05
-#define PSZ_MAX 6
-
-
-/* The frame rate is encoded in the video_window.flags parameter using
- the upper 16 bits, since some flags are defined nowadays. The following
- defines provide a mask and shift to filter out this value.
- This value can also be passing using the private flag when using v4l2 and
- VIDIOC_S_FMT ioctl.
-
- In 'Snapshot' mode the camera freezes its automatic exposure and colour
- balance controls.
- */
-#define PWC_FPS_SHIFT 16
-#define PWC_FPS_MASK 0x00FF0000
-#define PWC_FPS_FRMASK 0x003F0000
-#define PWC_FPS_SNAPSHOT 0x00400000
-#define PWC_QLT_MASK 0x03000000
-#define PWC_QLT_SHIFT 24
-
-
-/* structure for transferring x & y coordinates */
-struct pwc_coord
-{
- int x, y; /* guess what */
- int size; /* size, or offset */
-};
-
-
-/* Used with VIDIOCPWCPROBE */
-struct pwc_probe
-{
- char name[32];
- int type;
-};
-
-struct pwc_serial
-{
- char serial[30]; /* String with serial number. Contains terminating 0 */
-};
-
-/* pwc_whitebalance.mode values */
-#define PWC_WB_INDOOR 0
-#define PWC_WB_OUTDOOR 1
-#define PWC_WB_FL 2
-#define PWC_WB_MANUAL 3
-#define PWC_WB_AUTO 4
-
-/* Used with VIDIOCPWC[SG]AWB (Auto White Balance).
- Set mode to one of the PWC_WB_* values above.
- *red and *blue are the respective gains of these colour components inside
- the camera; range 0..65535
- When 'mode' == PWC_WB_MANUAL, 'manual_red' and 'manual_blue' are set or read;
- otherwise undefined.
- 'read_red' and 'read_blue' are read-only.
-*/
-struct pwc_whitebalance
-{
- int mode;
- int manual_red, manual_blue; /* R/W */
- int read_red, read_blue; /* R/O */
-};
-
-/*
- 'control_speed' and 'control_delay' are used in automatic whitebalance mode,
- and tell the camera how fast it should react to changes in lighting, and
- with how much delay. Valid values are 0..65535.
-*/
-struct pwc_wb_speed
-{
- int control_speed;
- int control_delay;
-
-};
-
-/* Used with VIDIOCPWC[SG]LED */
-struct pwc_leds
-{
- int led_on; /* Led on-time; range = 0..25000 */
- int led_off; /* Led off-time; range = 0..25000 */
-};
-
-/* Image size (used with GREALSIZE) */
-struct pwc_imagesize
-{
- int width;
- int height;
-};
-
-/* Defines and structures for Motorized Pan & Tilt */
-#define PWC_MPT_PAN 0x01
-#define PWC_MPT_TILT 0x02
-#define PWC_MPT_TIMEOUT 0x04 /* for status */
-
-/* Set angles; when absolute != 0, the angle is absolute and the
- driver calculates the relative offset for you. This can only
- be used with VIDIOCPWCSANGLE; VIDIOCPWCGANGLE always returns
- absolute angles.
- */
-struct pwc_mpt_angles
-{
- int absolute; /* write-only */
- int pan; /* degrees * 100 */
- int tilt; /* degress * 100 */
-};
-
-/* Range of angles of the camera, both horizontally and vertically.
- */
-struct pwc_mpt_range
-{
- int pan_min, pan_max; /* degrees * 100 */
- int tilt_min, tilt_max;
-};
-
-struct pwc_mpt_status
-{
- int status;
- int time_pan;
- int time_tilt;
-};
-
-
-/* This is used for out-of-kernel decompression. With it, you can get
- all the necessary information to initialize and use the decompressor
- routines in standalone applications.
- */
-struct pwc_video_command
-{
- int type; /* camera type (645, 675, 730, etc.) */
- int release; /* release number */
-
- int size; /* one of PSZ_* */
- int alternate;
- int command_len; /* length of USB video command */
- unsigned char command_buf[13]; /* Actual USB video command */
- int bandlength; /* >0 = compressed */
- int frame_size; /* Size of one (un)compressed frame */
-};
-
-/* Flags for PWCX subroutines. Not all modules honour all flags. */
-#define PWCX_FLAG_PLANAR 0x0001
-#define PWCX_FLAG_BAYER 0x0008
-
-
-/* IOCTL definitions */
-
- /* Restore user settings */
-#define VIDIOCPWCRUSER _IO('v', 192)
- /* Save user settings */
-#define VIDIOCPWCSUSER _IO('v', 193)
- /* Restore factory settings */
-#define VIDIOCPWCFACTORY _IO('v', 194)
-
- /* You can manipulate the compression factor. A compression preference of 0
- means use uncompressed modes when available; 1 is low compression, 2 is
- medium and 3 is high compression preferred. Of course, the higher the
- compression, the lower the bandwidth used but more chance of artefacts
- in the image. The driver automatically chooses a higher compression when
- the preferred mode is not available.
- */
- /* Set preferred compression quality (0 = uncompressed, 3 = highest compression) */
-#define VIDIOCPWCSCQUAL _IOW('v', 195, int)
- /* Get preferred compression quality */
-#define VIDIOCPWCGCQUAL _IOR('v', 195, int)
-
-
-/* Retrieve serial number of camera */
-#define VIDIOCPWCGSERIAL _IOR('v', 198, struct pwc_serial)
-
- /* This is a probe function; since so many devices are supported, it
- becomes difficult to include all the names in programs that want to
- check for the enhanced Philips stuff. So in stead, try this PROBE;
- it returns a structure with the original name, and the corresponding
- Philips type.
- To use, fill the structure with zeroes, call PROBE and if that succeeds,
- compare the name with that returned from VIDIOCGCAP; they should be the
- same. If so, you can be assured it is a Philips (OEM) cam and the type
- is valid.
- */
-#define VIDIOCPWCPROBE _IOR('v', 199, struct pwc_probe)
-
- /* Set AGC (Automatic Gain Control); int < 0 = auto, 0..65535 = fixed */
-#define VIDIOCPWCSAGC _IOW('v', 200, int)
- /* Get AGC; int < 0 = auto; >= 0 = fixed, range 0..65535 */
-#define VIDIOCPWCGAGC _IOR('v', 200, int)
- /* Set shutter speed; int < 0 = auto; >= 0 = fixed, range 0..65535 */
-#define VIDIOCPWCSSHUTTER _IOW('v', 201, int)
-
- /* Color compensation (Auto White Balance) */
-#define VIDIOCPWCSAWB _IOW('v', 202, struct pwc_whitebalance)
-#define VIDIOCPWCGAWB _IOR('v', 202, struct pwc_whitebalance)
-
- /* Auto WB speed */
-#define VIDIOCPWCSAWBSPEED _IOW('v', 203, struct pwc_wb_speed)
-#define VIDIOCPWCGAWBSPEED _IOR('v', 203, struct pwc_wb_speed)
-
- /* LEDs on/off/blink; int range 0..65535 */
-#define VIDIOCPWCSLED _IOW('v', 205, struct pwc_leds)
-#define VIDIOCPWCGLED _IOR('v', 205, struct pwc_leds)
-
- /* Contour (sharpness); int < 0 = auto, 0..65536 = fixed */
-#define VIDIOCPWCSCONTOUR _IOW('v', 206, int)
-#define VIDIOCPWCGCONTOUR _IOR('v', 206, int)
-
- /* Backlight compensation; 0 = off, otherwise on */
-#define VIDIOCPWCSBACKLIGHT _IOW('v', 207, int)
-#define VIDIOCPWCGBACKLIGHT _IOR('v', 207, int)
-
- /* Flickerless mode; = 0 off, otherwise on */
-#define VIDIOCPWCSFLICKER _IOW('v', 208, int)
-#define VIDIOCPWCGFLICKER _IOR('v', 208, int)
-
- /* Dynamic noise reduction; 0 off, 3 = high noise reduction */
-#define VIDIOCPWCSDYNNOISE _IOW('v', 209, int)
-#define VIDIOCPWCGDYNNOISE _IOR('v', 209, int)
-
- /* Real image size as used by the camera; tells you whether or not there's a gray border around the image */
-#define VIDIOCPWCGREALSIZE _IOR('v', 210, struct pwc_imagesize)
-
- /* Motorized pan & tilt functions */
-#define VIDIOCPWCMPTRESET _IOW('v', 211, int)
-#define VIDIOCPWCMPTGRANGE _IOR('v', 211, struct pwc_mpt_range)
-#define VIDIOCPWCMPTSANGLE _IOW('v', 212, struct pwc_mpt_angles)
-#define VIDIOCPWCMPTGANGLE _IOR('v', 212, struct pwc_mpt_angles)
-#define VIDIOCPWCMPTSTATUS _IOR('v', 213, struct pwc_mpt_status)
-
- /* Get the USB set-video command; needed for initializing libpwcx */
-#define VIDIOCPWCGVIDCMD _IOR('v', 215, struct pwc_video_command)
-struct pwc_table_init_buffer {
- int len;
- char *buffer;
-
-};
-#define VIDIOCPWCGVIDTABLE _IOR('v', 216, struct pwc_table_init_buffer)
-
-/*
- * This is private command used when communicating with v4l2.
- * In the future all private ioctl will be remove/replace to
- * use interface offer by v4l2.
- */
-
-#define V4L2_CID_PRIVATE_SAVE_USER (V4L2_CID_PRIVATE_BASE + 0)
-#define V4L2_CID_PRIVATE_RESTORE_USER (V4L2_CID_PRIVATE_BASE + 1)
-#define V4L2_CID_PRIVATE_RESTORE_FACTORY (V4L2_CID_PRIVATE_BASE + 2)
-#define V4L2_CID_PRIVATE_COLOUR_MODE (V4L2_CID_PRIVATE_BASE + 3)
-#define V4L2_CID_PRIVATE_AUTOCONTOUR (V4L2_CID_PRIVATE_BASE + 4)
-#define V4L2_CID_PRIVATE_CONTOUR (V4L2_CID_PRIVATE_BASE + 5)
-#define V4L2_CID_PRIVATE_BACKLIGHT (V4L2_CID_PRIVATE_BASE + 6)
-#define V4L2_CID_PRIVATE_FLICKERLESS (V4L2_CID_PRIVATE_BASE + 7)
-#define V4L2_CID_PRIVATE_NOISE_REDUCTION (V4L2_CID_PRIVATE_BASE + 8)
-
-struct pwc_raw_frame {
- __le16 type; /* type of the webcam */
- __le16 vbandlength; /* Size of 4lines compressed (used by the decompressor) */
- __u8 cmd[4]; /* the four byte of the command (in case of nala,
- only the first 3 bytes is filled) */
- __u8 rawframe[0]; /* frame_size = H/4*vbandlength */
-} __attribute__ ((packed));
-
-
-#endif
diff --git a/drivers/media/video/pwc/pwc-kiara.c b/drivers/media/video/pwc/pwc-kiara.c
index f4ae83c..e5f4fd8 100644
--- a/drivers/media/video/pwc/pwc-kiara.c
+++ b/drivers/media/video/pwc/pwc-kiara.c
@@ -40,7 +40,6 @@
#include "pwc-kiara.h"
-#include "pwc-uncompress.h"
const unsigned int Kiara_fps_vector[PWC_FPS_MAX_KIARA] = { 5, 10, 15, 20, 25, 30 };
diff --git a/drivers/media/video/pwc/pwc-misc.c b/drivers/media/video/pwc/pwc-misc.c
index 6af5bb5..0b03133 100644
--- a/drivers/media/video/pwc/pwc-misc.c
+++ b/drivers/media/video/pwc/pwc-misc.c
@@ -126,8 +126,4 @@ void pwc_construct(struct pwc_device *pdev)
pdev->pixfmt = V4L2_PIX_FMT_YUV420; /* default */
pdev->view_min.size = pdev->view_min.x * pdev->view_min.y;
pdev->view_max.size = pdev->view_max.x * pdev->view_max.y;
- /* length of image, in YUV format; always allocate enough memory. */
- pdev->len_per_image = PAGE_ALIGN((pdev->abs_max.x * pdev->abs_max.y * 3) / 2);
}
-
-
diff --git a/drivers/media/video/pwc/pwc-uncompress.c b/drivers/media/video/pwc/pwc-uncompress.c
index 3b73f29..5126509 100644
--- a/drivers/media/video/pwc/pwc-uncompress.c
+++ b/drivers/media/video/pwc/pwc-uncompress.c
@@ -30,26 +30,17 @@
#include <asm/types.h>
#include "pwc.h"
-#include "pwc-uncompress.h"
#include "pwc-dec1.h"
#include "pwc-dec23.h"
-int pwc_decompress(struct pwc_device *pdev)
+int pwc_decompress(struct pwc_device *pdev, struct pwc_frame_buf *fbuf)
{
- struct pwc_frame_buf *fbuf;
int n, line, col, stride;
void *yuv, *image;
u16 *src;
u16 *dsty, *dstu, *dstv;
- if (pdev == NULL)
- return -EFAULT;
-
- fbuf = pdev->read_frame;
- if (fbuf == NULL)
- return -EFAULT;
- image = pdev->image_data;
- image += pdev->images[pdev->fill_image].offset;
+ image = vb2_plane_vaddr(&fbuf->vb, 0);
yuv = fbuf->data + pdev->frame_header_size; /* Skip header */
@@ -64,9 +55,13 @@ int pwc_decompress(struct pwc_device *pdev)
* determine this using the type of the webcam */
memcpy(raw_frame->cmd, pdev->cmd_buf, 4);
memcpy(raw_frame+1, yuv, pdev->frame_size);
+ vb2_set_plane_payload(&fbuf->vb, 0,
+ pdev->frame_size + sizeof(struct pwc_raw_frame));
return 0;
}
+ vb2_set_plane_payload(&fbuf->vb, 0, pdev->view.size);
+
if (pdev->vbandlength == 0) {
/* Uncompressed mode.
* We copy the data into the output buffer, using the viewport
diff --git a/drivers/media/video/pwc/pwc-uncompress.h b/drivers/media/video/pwc/pwc-uncompress.h
deleted file mode 100644
index 43028e7..0000000
--- a/drivers/media/video/pwc/pwc-uncompress.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* (C) 1999-2003 Nemosoft Unv.
- (C) 2004-2006 Luc Saillard (luc@saillard.org)
-
- NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
- driver and thus may have bugs that are not present in the original version.
- Please send bug reports and support requests to <luc@saillard.org>.
- The decompression routines have been implemented by reverse-engineering the
- Nemosoft binary pwcx module. Caveat emptor.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-
-/* This file is the bridge between the kernel module and the plugin; it
- describes the structures and datatypes used in both modules. Any
- significant change should be reflected by increasing the
- pwc_decompressor_version major number.
- */
-#ifndef PWC_UNCOMPRESS_H
-#define PWC_UNCOMPRESS_H
-
-
-#include <media/pwc-ioctl.h>
-
-/* from pwc-dec.h */
-#define PWCX_FLAG_PLANAR 0x0001
-/* */
-
-#endif
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index f85c512..8c70e64 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -2,6 +2,7 @@
USB and Video4Linux interface part.
(C) 1999-2004 Nemosoft Unv.
(C) 2004-2006 Luc Saillard (luc@saillard.org)
+ (C) 2011 Hans de Goede <hdegoede@redhat.com>
NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
driver and thus may have bugs that are not present in the original version.
@@ -31,184 +32,330 @@
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/vmalloc.h>
+#include <linux/jiffies.h>
#include <asm/io.h>
#include "pwc.h"
-static struct v4l2_queryctrl pwc_controls[] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 128,
- .step = 1,
- .default_value = 64,
- },
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 64,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = -100,
- .maximum = 100,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_GAMMA,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gamma",
- .minimum = 0,
- .maximum = 32,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Red Gain",
- .minimum = 0,
- .maximum = 256,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Blue Gain",
- .minimum = 0,
- .maximum = 256,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_AUTO_WHITE_BALANCE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto White Balance",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Shutter Speed (Exposure)",
- .minimum = 0,
- .maximum = 256,
- .step = 1,
- .default_value = 200,
- },
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Gain Enabled",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- },
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain Level",
- .minimum = 0,
- .maximum = 256,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_PRIVATE_SAVE_USER,
- .type = V4L2_CTRL_TYPE_BUTTON,
- .name = "Save User Settings",
- .minimum = 0,
- .maximum = 0,
- .step = 0,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_PRIVATE_RESTORE_USER,
- .type = V4L2_CTRL_TYPE_BUTTON,
- .name = "Restore User Settings",
- .minimum = 0,
- .maximum = 0,
- .step = 0,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_PRIVATE_RESTORE_FACTORY,
- .type = V4L2_CTRL_TYPE_BUTTON,
- .name = "Restore Factory Settings",
- .minimum = 0,
- .maximum = 0,
- .step = 0,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_PRIVATE_COLOUR_MODE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Colour mode",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_PRIVATE_AUTOCONTOUR,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto contour",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_PRIVATE_CONTOUR,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contour",
- .minimum = 0,
- .maximum = 63,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_PRIVATE_BACKLIGHT,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Backlight compensation",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_PRIVATE_FLICKERLESS,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flickerless",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_PRIVATE_NOISE_REDUCTION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Noise reduction",
- .minimum = 0,
- .maximum = 3,
- .step = 1,
- .default_value = 0,
- },
+#define PWC_CID_CUSTOM(ctrl) ((V4L2_CID_USER_BASE | 0xf000) + custom_ ## ctrl)
+
+static int pwc_g_volatile_ctrl(struct v4l2_ctrl *ctrl);
+static int pwc_s_ctrl(struct v4l2_ctrl *ctrl);
+
+static const struct v4l2_ctrl_ops pwc_ctrl_ops = {
+ .g_volatile_ctrl = pwc_g_volatile_ctrl,
+ .s_ctrl = pwc_s_ctrl,
+};
+
+enum { awb_indoor, awb_outdoor, awb_fl, awb_manual, awb_auto };
+enum { custom_autocontour, custom_contour, custom_noise_reduction,
+ custom_save_user, custom_restore_user, custom_restore_factory };
+
+const char * const pwc_auto_whitebal_qmenu[] = {
+ "Indoor (Incandescant Lighting) Mode",
+ "Outdoor (Sunlight) Mode",
+ "Indoor (Fluorescent Lighting) Mode",
+ "Manual Mode",
+ "Auto Mode",
+ NULL
+};
+
+static const struct v4l2_ctrl_config pwc_auto_white_balance_cfg = {
+ .ops = &pwc_ctrl_ops,
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = awb_auto,
+ .qmenu = pwc_auto_whitebal_qmenu,
+};
+
+static const struct v4l2_ctrl_config pwc_autocontour_cfg = {
+ .ops = &pwc_ctrl_ops,
+ .id = PWC_CID_CUSTOM(autocontour),
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Auto contour",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config pwc_contour_cfg = {
+ .ops = &pwc_ctrl_ops,
+ .id = PWC_CID_CUSTOM(contour),
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Contour",
+ .min = 0,
+ .max = 63,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config pwc_backlight_cfg = {
+ .ops = &pwc_ctrl_ops,
+ .id = V4L2_CID_BACKLIGHT_COMPENSATION,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config pwc_flicker_cfg = {
+ .ops = &pwc_ctrl_ops,
+ .id = V4L2_CID_BAND_STOP_FILTER,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config pwc_noise_reduction_cfg = {
+ .ops = &pwc_ctrl_ops,
+ .id = PWC_CID_CUSTOM(noise_reduction),
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Dynamic Noise Reduction",
+ .min = 0,
+ .max = 3,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config pwc_save_user_cfg = {
+ .ops = &pwc_ctrl_ops,
+ .id = PWC_CID_CUSTOM(save_user),
+ .type = V4L2_CTRL_TYPE_BUTTON,
+ .name = "Save User Settings",
};
+static const struct v4l2_ctrl_config pwc_restore_user_cfg = {
+ .ops = &pwc_ctrl_ops,
+ .id = PWC_CID_CUSTOM(restore_user),
+ .type = V4L2_CTRL_TYPE_BUTTON,
+ .name = "Restore User Settings",
+};
+
+static const struct v4l2_ctrl_config pwc_restore_factory_cfg = {
+ .ops = &pwc_ctrl_ops,
+ .id = PWC_CID_CUSTOM(restore_factory),
+ .type = V4L2_CTRL_TYPE_BUTTON,
+ .name = "Restore Factory Settings",
+};
+
+int pwc_init_controls(struct pwc_device *pdev)
+{
+ struct v4l2_ctrl_handler *hdl;
+ struct v4l2_ctrl_config cfg;
+ int r, def;
+
+ hdl = &pdev->ctrl_handler;
+ r = v4l2_ctrl_handler_init(hdl, 20);
+ if (r)
+ return r;
+
+ /* Brightness, contrast, saturation, gamma */
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, BRIGHTNESS_FORMATTER, &def);
+ if (r || def > 127)
+ def = 63;
+ pdev->brightness = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 127, 1, def);
+
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, CONTRAST_FORMATTER, &def);
+ if (r || def > 63)
+ def = 31;
+ pdev->contrast = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 63, 1, def);
+
+ if (pdev->type >= 675) {
+ if (pdev->type < 730)
+ pdev->saturation_fmt = SATURATION_MODE_FORMATTER2;
+ else
+ pdev->saturation_fmt = SATURATION_MODE_FORMATTER1;
+ r = pwc_get_s8_ctrl(pdev, GET_CHROM_CTL, pdev->saturation_fmt,
+ &def);
+ if (r || def < -100 || def > 100)
+ def = 0;
+ pdev->saturation = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_SATURATION, -100, 100, 1, def);
+ }
+
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, GAMMA_FORMATTER, &def);
+ if (r || def > 31)
+ def = 15;
+ pdev->gamma = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_GAMMA, 0, 31, 1, def);
+
+ /* auto white balance, red gain, blue gain */
+ r = pwc_get_u8_ctrl(pdev, GET_CHROM_CTL, WB_MODE_FORMATTER, &def);
+ if (r || def > awb_auto)
+ def = awb_auto;
+ cfg = pwc_auto_white_balance_cfg;
+ cfg.name = v4l2_ctrl_get_name(cfg.id);
+ cfg.def = def;
+ pdev->auto_white_balance = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
+ /* check auto controls to avoid NULL deref in v4l2_ctrl_auto_cluster */
+ if (!pdev->auto_white_balance)
+ return hdl->error;
+
+ r = pwc_get_u8_ctrl(pdev, GET_CHROM_CTL,
+ PRESET_MANUAL_RED_GAIN_FORMATTER, &def);
+ if (r)
+ def = 127;
+ pdev->red_balance = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_RED_BALANCE, 0, 255, 1, def);
+
+ r = pwc_get_u8_ctrl(pdev, GET_CHROM_CTL,
+ PRESET_MANUAL_BLUE_GAIN_FORMATTER, &def);
+ if (r)
+ def = 127;
+ pdev->blue_balance = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_BLUE_BALANCE, 0, 255, 1, def);
+
+ v4l2_ctrl_auto_cluster(3, &pdev->auto_white_balance, awb_manual,
+ pdev->auto_white_balance->cur.val == awb_auto);
+
+ /* autogain, gain */
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, AGC_MODE_FORMATTER, &def);
+ if (r || (def != 0 && def != 0xff))
+ def = 0;
+ /* Note a register value if 0 means auto gain is on */
+ pdev->autogain = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, def == 0);
+ if (!pdev->autogain)
+ return hdl->error;
+
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, PRESET_AGC_FORMATTER, &def);
+ if (r || def > 63)
+ def = 31;
+ pdev->gain = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_GAIN, 0, 63, 1, def);
+
+ /* auto exposure, exposure */
+ if (DEVICE_USE_CODEC2(pdev->type)) {
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, SHUTTER_MODE_FORMATTER,
+ &def);
+ if (r || (def != 0 && def != 0xff))
+ def = 0;
+ /*
+ * def = 0 auto, def = ff manual
+ * menu idx 0 = auto, idx 1 = manual
+ */
+ pdev->exposure_auto = v4l2_ctrl_new_std_menu(hdl,
+ &pwc_ctrl_ops,
+ V4L2_CID_EXPOSURE_AUTO,
+ 1, 0, def != 0);
+ if (!pdev->exposure_auto)
+ return hdl->error;
+
+ /* GET_LUM_CTL, PRESET_SHUTTER_FORMATTER is unreliable */
+ r = pwc_get_u16_ctrl(pdev, GET_STATUS_CTL,
+ READ_SHUTTER_FORMATTER, &def);
+ if (r || def > 655)
+ def = 655;
+ pdev->exposure = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 655, 1, def);
+ /* CODEC2: separate auto gain & auto exposure */
+ v4l2_ctrl_auto_cluster(2, &pdev->autogain, 0, true);
+ v4l2_ctrl_auto_cluster(2, &pdev->exposure_auto,
+ V4L2_EXPOSURE_MANUAL, true);
+ } else if (DEVICE_USE_CODEC3(pdev->type)) {
+ /* GET_LUM_CTL, PRESET_SHUTTER_FORMATTER is unreliable */
+ r = pwc_get_u16_ctrl(pdev, GET_STATUS_CTL,
+ READ_SHUTTER_FORMATTER, &def);
+ if (r || def > 255)
+ def = 255;
+ pdev->exposure = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 255, 1, def);
+ /* CODEC3: both gain and exposure controlled by autogain */
+ pdev->autogain_expo_cluster[0] = pdev->autogain;
+ pdev->autogain_expo_cluster[1] = pdev->gain;
+ pdev->autogain_expo_cluster[2] = pdev->exposure;
+ v4l2_ctrl_auto_cluster(3, pdev->autogain_expo_cluster,
+ 0, true);
+ }
+
+ /* color / bw setting */
+ r = pwc_get_u8_ctrl(pdev, GET_CHROM_CTL, COLOUR_MODE_FORMATTER,
+ &def);
+ if (r || (def != 0 && def != 0xff))
+ def = 0xff;
+ /* def = 0 bw, def = ff color, menu idx 0 = color, idx 1 = bw */
+ pdev->colorfx = v4l2_ctrl_new_std_menu(hdl, &pwc_ctrl_ops,
+ V4L2_CID_COLORFX, 1, 0, def == 0);
+
+ /* autocontour, contour */
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, AUTO_CONTOUR_FORMATTER, &def);
+ if (r || (def != 0 && def != 0xff))
+ def = 0;
+ cfg = pwc_autocontour_cfg;
+ cfg.def = def == 0;
+ pdev->autocontour = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
+ if (!pdev->autocontour)
+ return hdl->error;
+
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, PRESET_CONTOUR_FORMATTER, &def);
+ if (r || def > 63)
+ def = 31;
+ cfg = pwc_contour_cfg;
+ cfg.def = def;
+ pdev->contour = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
+
+ v4l2_ctrl_auto_cluster(2, &pdev->autocontour, 0, false);
+
+ /* backlight */
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL,
+ BACK_LIGHT_COMPENSATION_FORMATTER, &def);
+ if (r || (def != 0 && def != 0xff))
+ def = 0;
+ cfg = pwc_backlight_cfg;
+ cfg.name = v4l2_ctrl_get_name(cfg.id);
+ cfg.def = def == 0;
+ pdev->backlight = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
+
+ /* flikker rediction */
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL,
+ FLICKERLESS_MODE_FORMATTER, &def);
+ if (r || (def != 0 && def != 0xff))
+ def = 0;
+ cfg = pwc_flicker_cfg;
+ cfg.name = v4l2_ctrl_get_name(cfg.id);
+ cfg.def = def == 0;
+ pdev->flicker = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
+
+ /* Dynamic noise reduction */
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL,
+ DYNAMIC_NOISE_CONTROL_FORMATTER, &def);
+ if (r || def > 3)
+ def = 2;
+ cfg = pwc_noise_reduction_cfg;
+ cfg.def = def;
+ pdev->noise_reduction = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
+
+ /* Save / Restore User / Factory Settings */
+ pdev->save_user = v4l2_ctrl_new_custom(hdl, &pwc_save_user_cfg, NULL);
+ pdev->restore_user = v4l2_ctrl_new_custom(hdl, &pwc_restore_user_cfg,
+ NULL);
+ if (pdev->restore_user)
+ pdev->restore_user->flags = V4L2_CTRL_FLAG_UPDATE;
+ pdev->restore_factory = v4l2_ctrl_new_custom(hdl,
+ &pwc_restore_factory_cfg,
+ NULL);
+ if (pdev->restore_factory)
+ pdev->restore_factory->flags = V4L2_CTRL_FLAG_UPDATE;
+
+ if (!(pdev->features & FEATURE_MOTOR_PANTILT))
+ return hdl->error;
+
+ /* Motor pan / tilt / reset */
+ pdev->motor_pan = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_PAN_RELATIVE, -4480, 4480, 64, 0);
+ if (!pdev->motor_pan)
+ return hdl->error;
+ pdev->motor_tilt = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_TILT_RELATIVE, -1920, 1920, 64, 0);
+ pdev->motor_pan_reset = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_PAN_RESET, 0, 0, 0, 0);
+ pdev->motor_tilt_reset = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_TILT_RESET, 0, 0, 0, 0);
+ v4l2_ctrl_cluster(4, &pdev->motor_pan);
+
+ return hdl->error;
+}
static void pwc_vidioc_fill_fmt(const struct pwc_device *pdev, struct v4l2_format *f)
{
@@ -284,10 +431,21 @@ static int pwc_vidioc_try_fmt(struct pwc_device *pdev, struct v4l2_format *f)
}
/* ioctl(VIDIOC_SET_FMT) */
-static int pwc_vidioc_set_fmt(struct pwc_device *pdev, struct v4l2_format *f)
+
+static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
{
+ struct pwc_device *pdev = video_drvdata(file);
int ret, fps, snapshot, compression, pixelformat;
+ if (!pdev->udev)
+ return -ENODEV;
+
+ if (pdev->capt_file != NULL &&
+ pdev->capt_file != file)
+ return -EBUSY;
+
+ pdev->capt_file = file;
+
ret = pwc_vidioc_try_fmt(pdev, f);
if (ret<0)
return ret;
@@ -309,7 +467,7 @@ static int pwc_vidioc_set_fmt(struct pwc_device *pdev, struct v4l2_format *f)
pixelformat != V4L2_PIX_FMT_PWC2)
return -EINVAL;
- if (pdev->iso_init)
+ if (vb2_is_streaming(&pdev->vb_queue))
return -EBUSY;
PWC_DEBUG_IOCTL("Trying to set format to: width=%d height=%d fps=%d "
@@ -343,13 +501,14 @@ static int pwc_vidioc_set_fmt(struct pwc_device *pdev, struct v4l2_format *f)
static int pwc_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
- struct video_device *vdev = video_devdata(file);
struct pwc_device *pdev = video_drvdata(file);
+ if (!pdev->udev)
+ return -ENODEV;
+
strcpy(cap->driver, PWC_NAME);
- strlcpy(cap->card, vdev->name, sizeof(cap->card));
+ strlcpy(cap->card, pdev->vdev.name, sizeof(cap->card));
usb_make_path(pdev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->version = PWC_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING |
@@ -377,255 +536,396 @@ static int pwc_s_input(struct file *file, void *fh, unsigned int i)
return i ? -EINVAL : 0;
}
-static int pwc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *c)
+static int pwc_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
- int i, idx;
- u32 id;
-
- id = c->id;
- if (id & V4L2_CTRL_FLAG_NEXT_CTRL) {
- id &= V4L2_CTRL_ID_MASK;
- id++;
- idx = -1;
- for (i = 0; i < ARRAY_SIZE(pwc_controls); i++) {
- if (pwc_controls[i].id < id)
- continue;
- if (idx >= 0
- && pwc_controls[i].id > pwc_controls[idx].id)
- continue;
- idx = i;
+ struct pwc_device *pdev =
+ container_of(ctrl->handler, struct pwc_device, ctrl_handler);
+ int ret = 0;
+
+ /*
+ * Sometimes it can take quite long for the pwc to complete usb control
+ * transfers, so release the modlock to give streaming by another
+ * process / thread the chance to continue with a dqbuf.
+ */
+ mutex_unlock(&pdev->modlock);
+
+ /*
+ * Take the udev-lock to protect against the disconnect handler
+ * completing and setting dev->udev to NULL underneath us. Other code
+ * does not need to do this since it is protected by the modlock.
+ */
+ mutex_lock(&pdev->udevlock);
+
+ if (!pdev->udev) {
+ ret = -ENODEV;
+ goto leave;
+ }
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ if (pdev->color_bal_valid && time_before(jiffies,
+ pdev->last_color_bal_update + HZ / 4)) {
+ pdev->red_balance->val = pdev->last_red_balance;
+ pdev->blue_balance->val = pdev->last_blue_balance;
+ break;
}
- if (idx < 0)
- return -EINVAL;
- memcpy(c, &pwc_controls[idx], sizeof pwc_controls[0]);
- return 0;
+ ret = pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
+ READ_RED_GAIN_FORMATTER,
+ &pdev->red_balance->val);
+ if (ret)
+ break;
+ ret = pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
+ READ_BLUE_GAIN_FORMATTER,
+ &pdev->blue_balance->val);
+ if (ret)
+ break;
+ pdev->last_red_balance = pdev->red_balance->val;
+ pdev->last_blue_balance = pdev->blue_balance->val;
+ pdev->last_color_bal_update = jiffies;
+ pdev->color_bal_valid = true;
+ break;
+ case V4L2_CID_AUTOGAIN:
+ if (pdev->gain_valid && time_before(jiffies,
+ pdev->last_gain_update + HZ / 4)) {
+ pdev->gain->val = pdev->last_gain;
+ break;
+ }
+ ret = pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
+ READ_AGC_FORMATTER, &pdev->gain->val);
+ if (ret)
+ break;
+ pdev->last_gain = pdev->gain->val;
+ pdev->last_gain_update = jiffies;
+ pdev->gain_valid = true;
+ if (!DEVICE_USE_CODEC3(pdev->type))
+ break;
+ /* Fall through for CODEC3 where autogain also controls expo */
+ case V4L2_CID_EXPOSURE_AUTO:
+ if (pdev->exposure_valid && time_before(jiffies,
+ pdev->last_exposure_update + HZ / 4)) {
+ pdev->exposure->val = pdev->last_exposure;
+ break;
+ }
+ ret = pwc_get_u16_ctrl(pdev, GET_STATUS_CTL,
+ READ_SHUTTER_FORMATTER,
+ &pdev->exposure->val);
+ if (ret)
+ break;
+ pdev->last_exposure = pdev->exposure->val;
+ pdev->last_exposure_update = jiffies;
+ pdev->exposure_valid = true;
+ break;
+ default:
+ ret = -EINVAL;
}
- for (i = 0; i < sizeof(pwc_controls) / sizeof(struct v4l2_queryctrl); i++) {
- if (pwc_controls[i].id == c->id) {
- PWC_DEBUG_IOCTL("ioctl(VIDIOC_QUERYCTRL) found\n");
- memcpy(c, &pwc_controls[i], sizeof(struct v4l2_queryctrl));
- return 0;
+
+ if (ret)
+ PWC_ERROR("g_ctrl %s error %d\n", ctrl->name, ret);
+
+leave:
+ mutex_unlock(&pdev->udevlock);
+ mutex_lock(&pdev->modlock);
+ return ret;
+}
+
+static int pwc_set_awb(struct pwc_device *pdev)
+{
+ int ret = 0;
+
+ if (pdev->auto_white_balance->is_new) {
+ ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL,
+ WB_MODE_FORMATTER,
+ pdev->auto_white_balance->val);
+ if (ret)
+ return ret;
+
+ /* Update val when coming from auto or going to a preset */
+ if (pdev->red_balance->is_volatile ||
+ pdev->auto_white_balance->val == awb_indoor ||
+ pdev->auto_white_balance->val == awb_outdoor ||
+ pdev->auto_white_balance->val == awb_fl) {
+ if (!pdev->red_balance->is_new)
+ pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
+ READ_RED_GAIN_FORMATTER,
+ &pdev->red_balance->val);
+ if (!pdev->blue_balance->is_new)
+ pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
+ READ_BLUE_GAIN_FORMATTER,
+ &pdev->blue_balance->val);
+ }
+ if (pdev->auto_white_balance->val == awb_auto) {
+ pdev->red_balance->is_volatile = true;
+ pdev->blue_balance->is_volatile = true;
+ pdev->color_bal_valid = false; /* Force cache update */
+ } else {
+ pdev->red_balance->is_volatile = false;
+ pdev->blue_balance->is_volatile = false;
}
}
- return -EINVAL;
+
+ if (ret == 0 && pdev->red_balance->is_new) {
+ if (pdev->auto_white_balance->val != awb_manual)
+ return -EBUSY;
+ ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL,
+ PRESET_MANUAL_RED_GAIN_FORMATTER,
+ pdev->red_balance->val);
+ }
+
+ if (ret == 0 && pdev->blue_balance->is_new) {
+ if (pdev->auto_white_balance->val != awb_manual)
+ return -EBUSY;
+ ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL,
+ PRESET_MANUAL_BLUE_GAIN_FORMATTER,
+ pdev->blue_balance->val);
+ }
+ return ret;
}
-static int pwc_g_ctrl(struct file *file, void *fh, struct v4l2_control *c)
+/* For CODEC2 models which have separate autogain and auto exposure */
+static int pwc_set_autogain(struct pwc_device *pdev)
{
- struct pwc_device *pdev = video_drvdata(file);
- int ret;
+ int ret = 0;
+
+ if (pdev->autogain->is_new) {
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ AGC_MODE_FORMATTER,
+ pdev->autogain->val ? 0 : 0xff);
+ if (ret)
+ return ret;
+ if (pdev->autogain->val)
+ pdev->gain_valid = false; /* Force cache update */
+ else if (!pdev->gain->is_new)
+ pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
+ READ_AGC_FORMATTER,
+ &pdev->gain->val);
+ }
+ if (ret == 0 && pdev->gain->is_new) {
+ if (pdev->autogain->val)
+ return -EBUSY;
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ PRESET_AGC_FORMATTER,
+ pdev->gain->val);
+ }
+ return ret;
+}
- switch (c->id) {
- case V4L2_CID_BRIGHTNESS:
- c->value = pwc_get_brightness(pdev);
- if (c->value < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_CONTRAST:
- c->value = pwc_get_contrast(pdev);
- if (c->value < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_SATURATION:
- ret = pwc_get_saturation(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_GAMMA:
- c->value = pwc_get_gamma(pdev);
- if (c->value < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_RED_BALANCE:
- ret = pwc_get_red_gain(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- c->value >>= 8;
- return 0;
- case V4L2_CID_BLUE_BALANCE:
- ret = pwc_get_blue_gain(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- c->value >>= 8;
- return 0;
- case V4L2_CID_AUTO_WHITE_BALANCE:
- ret = pwc_get_awb(pdev);
- if (ret < 0)
- return -EINVAL;
- c->value = (ret == PWC_WB_MANUAL) ? 0 : 1;
- return 0;
- case V4L2_CID_GAIN:
- ret = pwc_get_agc(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- c->value >>= 8;
- return 0;
- case V4L2_CID_AUTOGAIN:
- ret = pwc_get_agc(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- c->value = (c->value < 0) ? 1 : 0;
- return 0;
- case V4L2_CID_EXPOSURE:
- ret = pwc_get_shutter_speed(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_COLOUR_MODE:
- ret = pwc_get_colour_mode(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_AUTOCONTOUR:
- ret = pwc_get_contour(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- c->value = (c->value == -1 ? 1 : 0);
- return 0;
- case V4L2_CID_PRIVATE_CONTOUR:
- ret = pwc_get_contour(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- c->value >>= 10;
- return 0;
- case V4L2_CID_PRIVATE_BACKLIGHT:
- ret = pwc_get_backlight(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_FLICKERLESS:
- ret = pwc_get_flicker(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- c->value = (c->value ? 1 : 0);
- return 0;
- case V4L2_CID_PRIVATE_NOISE_REDUCTION:
- ret = pwc_get_dynamic_noise(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
+/* For CODEC2 models which have separate autogain and auto exposure */
+static int pwc_set_exposure_auto(struct pwc_device *pdev)
+{
+ int ret = 0;
+ int is_auto = pdev->exposure_auto->val == V4L2_EXPOSURE_AUTO;
+
+ if (pdev->exposure_auto->is_new) {
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ SHUTTER_MODE_FORMATTER,
+ is_auto ? 0 : 0xff);
+ if (ret)
+ return ret;
+ if (is_auto)
+ pdev->exposure_valid = false; /* Force cache update */
+ else if (!pdev->exposure->is_new)
+ pwc_get_u16_ctrl(pdev, GET_STATUS_CTL,
+ READ_SHUTTER_FORMATTER,
+ &pdev->exposure->val);
+ }
+ if (ret == 0 && pdev->exposure->is_new) {
+ if (is_auto)
+ return -EBUSY;
+ ret = pwc_set_u16_ctrl(pdev, SET_LUM_CTL,
+ PRESET_SHUTTER_FORMATTER,
+ pdev->exposure->val);
+ }
+ return ret;
+}
- case V4L2_CID_PRIVATE_SAVE_USER:
- case V4L2_CID_PRIVATE_RESTORE_USER:
- case V4L2_CID_PRIVATE_RESTORE_FACTORY:
- return -EINVAL;
+/* For CODEC3 models which have autogain controlling both gain and exposure */
+static int pwc_set_autogain_expo(struct pwc_device *pdev)
+{
+ int ret = 0;
+
+ if (pdev->autogain->is_new) {
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ AGC_MODE_FORMATTER,
+ pdev->autogain->val ? 0 : 0xff);
+ if (ret)
+ return ret;
+ if (pdev->autogain->val) {
+ pdev->gain_valid = false; /* Force cache update */
+ pdev->exposure_valid = false; /* Force cache update */
+ } else {
+ if (!pdev->gain->is_new)
+ pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
+ READ_AGC_FORMATTER,
+ &pdev->gain->val);
+ if (!pdev->exposure->is_new)
+ pwc_get_u16_ctrl(pdev, GET_STATUS_CTL,
+ READ_SHUTTER_FORMATTER,
+ &pdev->exposure->val);
+ }
}
- return -EINVAL;
+ if (ret == 0 && pdev->gain->is_new) {
+ if (pdev->autogain->val)
+ return -EBUSY;
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ PRESET_AGC_FORMATTER,
+ pdev->gain->val);
+ }
+ if (ret == 0 && pdev->exposure->is_new) {
+ if (pdev->autogain->val)
+ return -EBUSY;
+ ret = pwc_set_u16_ctrl(pdev, SET_LUM_CTL,
+ PRESET_SHUTTER_FORMATTER,
+ pdev->exposure->val);
+ }
+ return ret;
}
-static int pwc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
+static int pwc_set_motor(struct pwc_device *pdev)
{
- struct pwc_device *pdev = video_drvdata(file);
int ret;
+ u8 buf[4];
+
+ buf[0] = 0;
+ if (pdev->motor_pan_reset->is_new)
+ buf[0] |= 0x01;
+ if (pdev->motor_tilt_reset->is_new)
+ buf[0] |= 0x02;
+ if (pdev->motor_pan_reset->is_new || pdev->motor_tilt_reset->is_new) {
+ ret = send_control_msg(pdev, SET_MPT_CTL,
+ PT_RESET_CONTROL_FORMATTER, buf, 1);
+ if (ret < 0)
+ return ret;
+ }
- switch (c->id) {
- case V4L2_CID_BRIGHTNESS:
- c->value <<= 9;
- ret = pwc_set_brightness(pdev, c->value);
+ memset(buf, 0, sizeof(buf));
+ if (pdev->motor_pan->is_new) {
+ buf[0] = pdev->motor_pan->val & 0xFF;
+ buf[1] = (pdev->motor_pan->val >> 8);
+ }
+ if (pdev->motor_tilt->is_new) {
+ buf[2] = pdev->motor_tilt->val & 0xFF;
+ buf[3] = (pdev->motor_tilt->val >> 8);
+ }
+ if (pdev->motor_pan->is_new || pdev->motor_tilt->is_new) {
+ ret = send_control_msg(pdev, SET_MPT_CTL,
+ PT_RELATIVE_CONTROL_FORMATTER,
+ buf, sizeof(buf));
if (ret < 0)
- return -EINVAL;
- return 0;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pwc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct pwc_device *pdev =
+ container_of(ctrl->handler, struct pwc_device, ctrl_handler);
+ int ret = 0;
+
+ /* See the comments on locking in pwc_g_volatile_ctrl */
+ mutex_unlock(&pdev->modlock);
+ mutex_lock(&pdev->udevlock);
+
+ if (!pdev->udev) {
+ ret = -ENODEV;
+ goto leave;
+ }
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ BRIGHTNESS_FORMATTER, ctrl->val);
+ break;
case V4L2_CID_CONTRAST:
- c->value <<= 10;
- ret = pwc_set_contrast(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ CONTRAST_FORMATTER, ctrl->val);
+ break;
case V4L2_CID_SATURATION:
- ret = pwc_set_saturation(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
+ ret = pwc_set_s8_ctrl(pdev, SET_CHROM_CTL,
+ pdev->saturation_fmt, ctrl->val);
+ break;
case V4L2_CID_GAMMA:
- c->value <<= 11;
- ret = pwc_set_gamma(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_RED_BALANCE:
- c->value <<= 8;
- ret = pwc_set_red_gain(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_BLUE_BALANCE:
- c->value <<= 8;
- ret = pwc_set_blue_gain(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ GAMMA_FORMATTER, ctrl->val);
+ break;
case V4L2_CID_AUTO_WHITE_BALANCE:
- c->value = (c->value == 0) ? PWC_WB_MANUAL : PWC_WB_AUTO;
- ret = pwc_set_awb(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_EXPOSURE:
- c->value <<= 8;
- ret = pwc_set_shutter_speed(pdev, c->value ? 0 : 1, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
+ ret = pwc_set_awb(pdev);
+ break;
case V4L2_CID_AUTOGAIN:
- /* autogain off means nothing without a gain */
- if (c->value == 0)
- return 0;
- ret = pwc_set_agc(pdev, c->value, 0);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_GAIN:
- c->value <<= 8;
- ret = pwc_set_agc(pdev, 0, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_SAVE_USER:
- if (pwc_save_user(pdev))
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_RESTORE_USER:
- if (pwc_restore_user(pdev))
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_RESTORE_FACTORY:
- if (pwc_restore_factory(pdev))
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_COLOUR_MODE:
- ret = pwc_set_colour_mode(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_AUTOCONTOUR:
- c->value = (c->value == 1) ? -1 : 0;
- ret = pwc_set_contour(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_CONTOUR:
- c->value <<= 10;
- ret = pwc_set_contour(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_BACKLIGHT:
- ret = pwc_set_backlight(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_FLICKERLESS:
- ret = pwc_set_flicker(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- case V4L2_CID_PRIVATE_NOISE_REDUCTION:
- ret = pwc_set_dynamic_noise(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
-
+ if (DEVICE_USE_CODEC2(pdev->type))
+ ret = pwc_set_autogain(pdev);
+ else if (DEVICE_USE_CODEC3(pdev->type))
+ ret = pwc_set_autogain_expo(pdev);
+ else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ if (DEVICE_USE_CODEC2(pdev->type))
+ ret = pwc_set_exposure_auto(pdev);
+ else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_COLORFX:
+ ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL,
+ COLOUR_MODE_FORMATTER,
+ ctrl->val ? 0 : 0xff);
+ break;
+ case PWC_CID_CUSTOM(autocontour):
+ if (pdev->autocontour->is_new) {
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ AUTO_CONTOUR_FORMATTER,
+ pdev->autocontour->val ? 0 : 0xff);
+ }
+ if (ret == 0 && pdev->contour->is_new) {
+ if (pdev->autocontour->val) {
+ ret = -EBUSY;
+ break;
+ }
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ PRESET_CONTOUR_FORMATTER,
+ pdev->contour->val);
+ }
+ break;
+ case V4L2_CID_BACKLIGHT_COMPENSATION:
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ BACK_LIGHT_COMPENSATION_FORMATTER,
+ ctrl->val ? 0 : 0xff);
+ break;
+ case V4L2_CID_BAND_STOP_FILTER:
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ FLICKERLESS_MODE_FORMATTER,
+ ctrl->val ? 0 : 0xff);
+ break;
+ case PWC_CID_CUSTOM(noise_reduction):
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ DYNAMIC_NOISE_CONTROL_FORMATTER,
+ ctrl->val);
+ break;
+ case PWC_CID_CUSTOM(save_user):
+ ret = pwc_button_ctrl(pdev, SAVE_USER_DEFAULTS_FORMATTER);
+ break;
+ case PWC_CID_CUSTOM(restore_user):
+ ret = pwc_button_ctrl(pdev, RESTORE_USER_DEFAULTS_FORMATTER);
+ break;
+ case PWC_CID_CUSTOM(restore_factory):
+ ret = pwc_button_ctrl(pdev,
+ RESTORE_FACTORY_DEFAULTS_FORMATTER);
+ break;
+ case V4L2_CID_PAN_RELATIVE:
+ ret = pwc_set_motor(pdev);
+ break;
+ default:
+ ret = -EINVAL;
}
- return -EINVAL;
+
+ if (ret)
+ PWC_ERROR("s_ctrl %s error %d\n", ctrl->name, ret);
+
+leave:
+ mutex_unlock(&pdev->udevlock);
+ mutex_lock(&pdev->modlock);
+ return ret;
}
static int pwc_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f)
@@ -667,157 +967,77 @@ static int pwc_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *
return pwc_vidioc_try_fmt(pdev, f);
}
-static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
+static int pwc_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *rb)
{
struct pwc_device *pdev = video_drvdata(file);
- return pwc_vidioc_set_fmt(pdev, f);
-}
-
-static int pwc_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
-{
- int nbuffers;
+ if (pdev->capt_file != NULL &&
+ pdev->capt_file != file)
+ return -EBUSY;
- PWC_DEBUG_IOCTL("ioctl(VIDIOC_REQBUFS) count=%d\n", rb->count);
- if (rb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- if (rb->memory != V4L2_MEMORY_MMAP)
- return -EINVAL;
+ pdev->capt_file = file;
- nbuffers = rb->count;
- if (nbuffers < 2)
- nbuffers = 2;
- else if (nbuffers > pwc_mbufs)
- nbuffers = pwc_mbufs;
- /* Force to use our # of buffers */
- rb->count = pwc_mbufs;
- return 0;
+ return vb2_reqbufs(&pdev->vb_queue, rb);
}
static int pwc_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
{
struct pwc_device *pdev = video_drvdata(file);
- int index;
- PWC_DEBUG_IOCTL("ioctl(VIDIOC_QUERYBUF) index=%d\n", buf->index);
- if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- PWC_DEBUG_IOCTL("ioctl(VIDIOC_QUERYBUF) Bad type\n");
- return -EINVAL;
- }
- index = buf->index;
- if (index < 0 || index >= pwc_mbufs) {
- PWC_DEBUG_IOCTL("ioctl(VIDIOC_QUERYBUF) Bad index %d\n", buf->index);
- return -EINVAL;
- }
-
- buf->m.offset = index * pdev->len_per_image;
- if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
- buf->bytesused = pdev->frame_size + sizeof(struct pwc_raw_frame);
- else
- buf->bytesused = pdev->view.size;
- buf->field = V4L2_FIELD_NONE;
- buf->memory = V4L2_MEMORY_MMAP;
- /*buf->flags = V4L2_BUF_FLAG_MAPPED;*/
- buf->length = pdev->len_per_image;
-
- PWC_DEBUG_READ("VIDIOC_QUERYBUF: index=%d\n", buf->index);
- PWC_DEBUG_READ("VIDIOC_QUERYBUF: m.offset=%d\n", buf->m.offset);
- PWC_DEBUG_READ("VIDIOC_QUERYBUF: bytesused=%d\n", buf->bytesused);
-
- return 0;
+ return vb2_querybuf(&pdev->vb_queue, buf);
}
static int pwc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
{
- PWC_DEBUG_IOCTL("ioctl(VIDIOC_QBUF) index=%d\n", buf->index);
- if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- if (buf->memory != V4L2_MEMORY_MMAP)
- return -EINVAL;
- if (buf->index >= pwc_mbufs)
- return -EINVAL;
+ struct pwc_device *pdev = video_drvdata(file);
- buf->flags |= V4L2_BUF_FLAG_QUEUED;
- buf->flags &= ~V4L2_BUF_FLAG_DONE;
+ if (!pdev->udev)
+ return -ENODEV;
- return 0;
+ if (pdev->capt_file != file)
+ return -EBUSY;
+
+ return vb2_qbuf(&pdev->vb_queue, buf);
}
static int pwc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
{
- DECLARE_WAITQUEUE(wait, current);
struct pwc_device *pdev = video_drvdata(file);
- int ret;
- PWC_DEBUG_IOCTL("ioctl(VIDIOC_DQBUF)\n");
+ if (!pdev->udev)
+ return -ENODEV;
- if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- add_wait_queue(&pdev->frameq, &wait);
- while (pdev->full_frames == NULL) {
- if (pdev->error_status) {
- remove_wait_queue(&pdev->frameq, &wait);
- set_current_state(TASK_RUNNING);
- return -pdev->error_status;
- }
-
- if (signal_pending(current)) {
- remove_wait_queue(&pdev->frameq, &wait);
- set_current_state(TASK_RUNNING);
- return -ERESTARTSYS;
- }
- mutex_unlock(&pdev->modlock);
- schedule();
- set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&pdev->modlock);
- }
- remove_wait_queue(&pdev->frameq, &wait);
- set_current_state(TASK_RUNNING);
-
- PWC_DEBUG_IOCTL("VIDIOC_DQBUF: frame ready.\n");
- /* Decompress data in pdev->images[pdev->fill_image] */
- ret = pwc_handle_frame(pdev);
- if (ret)
- return -EFAULT;
- PWC_DEBUG_IOCTL("VIDIOC_DQBUF: after pwc_handle_frame\n");
-
- buf->index = pdev->fill_image;
- if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
- buf->bytesused = pdev->frame_size + sizeof(struct pwc_raw_frame);
- else
- buf->bytesused = pdev->view.size;
- buf->flags = V4L2_BUF_FLAG_MAPPED;
- buf->field = V4L2_FIELD_NONE;
- do_gettimeofday(&buf->timestamp);
- buf->sequence = 0;
- buf->memory = V4L2_MEMORY_MMAP;
- buf->m.offset = pdev->fill_image * pdev->len_per_image;
- buf->length = pdev->len_per_image;
- pwc_next_image(pdev);
-
- PWC_DEBUG_IOCTL("VIDIOC_DQBUF: buf->index=%d\n", buf->index);
- PWC_DEBUG_IOCTL("VIDIOC_DQBUF: buf->length=%d\n", buf->length);
- PWC_DEBUG_IOCTL("VIDIOC_DQBUF: m.offset=%d\n", buf->m.offset);
- PWC_DEBUG_IOCTL("VIDIOC_DQBUF: bytesused=%d\n", buf->bytesused);
- PWC_DEBUG_IOCTL("VIDIOC_DQBUF: leaving\n");
- return 0;
+ if (pdev->capt_file != file)
+ return -EBUSY;
+ return vb2_dqbuf(&pdev->vb_queue, buf, file->f_flags & O_NONBLOCK);
}
static int pwc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
{
struct pwc_device *pdev = video_drvdata(file);
- return pwc_isoc_init(pdev);
+ if (!pdev->udev)
+ return -ENODEV;
+
+ if (pdev->capt_file != file)
+ return -EBUSY;
+
+ return vb2_streamon(&pdev->vb_queue, i);
}
static int pwc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
{
struct pwc_device *pdev = video_drvdata(file);
- pwc_isoc_cleanup(pdev);
- return 0;
+ if (!pdev->udev)
+ return -ENODEV;
+
+ if (pdev->capt_file != file)
+ return -EBUSY;
+
+ return vb2_streamoff(&pdev->vb_queue, i);
}
static int pwc_enum_framesizes(struct file *file, void *fh,
@@ -896,9 +1116,6 @@ const struct v4l2_ioctl_ops pwc_ioctl_ops = {
.vidioc_g_fmt_vid_cap = pwc_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = pwc_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = pwc_try_fmt_vid_cap,
- .vidioc_queryctrl = pwc_queryctrl,
- .vidioc_g_ctrl = pwc_g_ctrl,
- .vidioc_s_ctrl = pwc_s_ctrl,
.vidioc_reqbufs = pwc_reqbufs,
.vidioc_querybuf = pwc_querybuf,
.vidioc_qbuf = pwc_qbuf,
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index 083f8b1..0e4e2d7 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -29,7 +29,6 @@
#include <linux/usb.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
-#include <linux/version.h>
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/slab.h>
@@ -37,19 +36,16 @@
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-vmalloc.h>
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
#include <linux/input.h>
#endif
-#include "pwc-uncompress.h"
#include <media/pwc-ioctl.h>
/* Version block */
-#define PWC_MAJOR 10
-#define PWC_MINOR 0
-#define PWC_EXTRAMINOR 12
-#define PWC_VERSION_CODE KERNEL_VERSION(PWC_MAJOR,PWC_MINOR,PWC_EXTRAMINOR)
-#define PWC_VERSION "10.0.14"
+#define PWC_VERSION "10.0.15"
#define PWC_NAME "pwc"
#define PFX PWC_NAME ": "
@@ -81,9 +77,9 @@
#define PWC_DEBUG_LEVEL (PWC_DEBUG_LEVEL_MODULE)
#define PWC_DEBUG(level, fmt, args...) do {\
- if ((PWC_DEBUG_LEVEL_ ##level) & pwc_trace) \
- printk(KERN_DEBUG PFX fmt, ##args); \
- } while(0)
+ if ((PWC_DEBUG_LEVEL_ ##level) & pwc_trace) \
+ printk(KERN_DEBUG PFX fmt, ##args); \
+ } while (0)
#define PWC_ERROR(fmt, args...) printk(KERN_ERR PFX fmt, ##args)
#define PWC_WARNING(fmt, args...) printk(KERN_WARNING PFX fmt, ##args)
@@ -110,25 +106,21 @@
#define FEATURE_CODEC1 0x0002
#define FEATURE_CODEC2 0x0004
-/* Turn certain features on/off */
-#define PWC_INT_PIPE 0
-
/* Ignore errors in the first N frames, to allow for startup delays */
#define FRAME_LOWMARK 5
/* Size and number of buffers for the ISO pipe. */
-#define MAX_ISO_BUFS 2
+#define MAX_ISO_BUFS 3
#define ISO_FRAMES_PER_DESC 10
#define ISO_MAX_FRAME_SIZE 960
#define ISO_BUFFER_SIZE (ISO_FRAMES_PER_DESC * ISO_MAX_FRAME_SIZE)
-/* Frame buffers: contains compressed or uncompressed video data. */
-#define MAX_FRAMES 5
/* Maximum size after decompression is 640x480 YUV data, 1.5 * 640 * 480 */
#define PWC_FRAME_SIZE (460800 + TOUCAM_HEADER_SIZE + TOUCAM_TRAILER_SIZE)
-/* Absolute maximum number of buffers available for mmap() */
-#define MAX_IMAGES 10
+/* Absolute minimum and maximum number of buffers available for mmap() */
+#define MIN_FRAMES 2
+#define MAX_FRAMES 16
/* Some macros to quickly find the type of a webcam */
#define DEVICE_USE_CODEC1(x) ((x)<675)
@@ -136,149 +128,221 @@
#define DEVICE_USE_CODEC3(x) ((x)>=700)
#define DEVICE_USE_CODEC23(x) ((x)>=675)
-/* The following structures were based on cpia.h. Why reinvent the wheel? :-) */
-struct pwc_iso_buf
-{
- void *data;
- int length;
- int read;
- struct urb *urb;
-};
+/* from pwc-dec.h */
+#define PWCX_FLAG_PLANAR 0x0001
+
+/* Request types: video */
+#define SET_LUM_CTL 0x01
+#define GET_LUM_CTL 0x02
+#define SET_CHROM_CTL 0x03
+#define GET_CHROM_CTL 0x04
+#define SET_STATUS_CTL 0x05
+#define GET_STATUS_CTL 0x06
+#define SET_EP_STREAM_CTL 0x07
+#define GET_EP_STREAM_CTL 0x08
+#define GET_XX_CTL 0x09
+#define SET_XX_CTL 0x0A
+#define GET_XY_CTL 0x0B
+#define SET_XY_CTL 0x0C
+#define SET_MPT_CTL 0x0D
+#define GET_MPT_CTL 0x0E
+
+/* Selectors for the Luminance controls [GS]ET_LUM_CTL */
+#define AGC_MODE_FORMATTER 0x2000
+#define PRESET_AGC_FORMATTER 0x2100
+#define SHUTTER_MODE_FORMATTER 0x2200
+#define PRESET_SHUTTER_FORMATTER 0x2300
+#define PRESET_CONTOUR_FORMATTER 0x2400
+#define AUTO_CONTOUR_FORMATTER 0x2500
+#define BACK_LIGHT_COMPENSATION_FORMATTER 0x2600
+#define CONTRAST_FORMATTER 0x2700
+#define DYNAMIC_NOISE_CONTROL_FORMATTER 0x2800
+#define FLICKERLESS_MODE_FORMATTER 0x2900
+#define AE_CONTROL_SPEED 0x2A00
+#define BRIGHTNESS_FORMATTER 0x2B00
+#define GAMMA_FORMATTER 0x2C00
+
+/* Selectors for the Chrominance controls [GS]ET_CHROM_CTL */
+#define WB_MODE_FORMATTER 0x1000
+#define AWB_CONTROL_SPEED_FORMATTER 0x1100
+#define AWB_CONTROL_DELAY_FORMATTER 0x1200
+#define PRESET_MANUAL_RED_GAIN_FORMATTER 0x1300
+#define PRESET_MANUAL_BLUE_GAIN_FORMATTER 0x1400
+#define COLOUR_MODE_FORMATTER 0x1500
+#define SATURATION_MODE_FORMATTER1 0x1600
+#define SATURATION_MODE_FORMATTER2 0x1700
+
+/* Selectors for the Status controls [GS]ET_STATUS_CTL */
+#define SAVE_USER_DEFAULTS_FORMATTER 0x0200
+#define RESTORE_USER_DEFAULTS_FORMATTER 0x0300
+#define RESTORE_FACTORY_DEFAULTS_FORMATTER 0x0400
+#define READ_AGC_FORMATTER 0x0500
+#define READ_SHUTTER_FORMATTER 0x0600
+#define READ_RED_GAIN_FORMATTER 0x0700
+#define READ_BLUE_GAIN_FORMATTER 0x0800
+
+/* Formatters for the motorized pan & tilt [GS]ET_MPT_CTL */
+#define PT_RELATIVE_CONTROL_FORMATTER 0x01
+#define PT_RESET_CONTROL_FORMATTER 0x02
+#define PT_STATUS_FORMATTER 0x03
/* intermediate buffers with raw data from the USB cam */
struct pwc_frame_buf
{
- void *data;
- volatile int filled; /* number of bytes filled */
- struct pwc_frame_buf *next; /* list */
-};
-
-/* additionnal informations used when dealing image between kernel and userland */
-struct pwc_imgbuf
-{
- unsigned long offset; /* offset of this buffer in the big array of image_data */
- int vma_use_count; /* count the number of time this memory is mapped */
+ struct vb2_buffer vb; /* common v4l buffer stuff -- must be first */
+ struct list_head list;
+ void *data;
+ int filled; /* number of bytes filled */
};
struct pwc_device
{
struct video_device vdev;
-
- /* Pointer to our usb_device, may be NULL after unplug */
- struct usb_device *udev;
-
- int type; /* type of cam (645, 646, 675, 680, 690, 720, 730, 740, 750) */
- int release; /* release number */
- int features; /* feature bits */
- char serial[30]; /* serial number (string) */
- int error_status; /* set when something goes wrong with the cam (unplugged, USB errors) */
- int usb_init; /* set when the cam has been initialized over USB */
-
- /*** Video data ***/
- int vopen; /* flag */
- int vendpoint; /* video isoc endpoint */
- int vcinterface; /* video control interface */
- int valternate; /* alternate interface needed */
- int vframes, vsize; /* frames-per-second & size (see PSZ_*) */
- int pixfmt; /* pixelformat: V4L2_PIX_FMT_YUV420 or raw: _PWC1, _PWC2 */
- int vframe_count; /* received frames */
- int vframes_dumped; /* counter for dumped frames */
- int vframes_error; /* frames received in error */
- int vmax_packet_size; /* USB maxpacket size */
- int vlast_packet_size; /* for frame synchronisation */
- int visoc_errors; /* number of contiguous ISOC errors */
- int vcompression; /* desired compression factor */
- int vbandlength; /* compressed band length; 0 is uncompressed */
- char vsnapshot; /* snapshot mode */
- char vsync; /* used by isoc handler */
- char vmirror; /* for ToUCaM series */
- char unplugged;
-
- int cmd_len;
- unsigned char cmd_buf[13];
-
- /* The image acquisition requires 3 to 4 steps:
- 1. data is gathered in short packets from the USB controller
- 2. data is synchronized and packed into a frame buffer
- 3a. in case data is compressed, decompress it directly into image buffer
- 3b. in case data is uncompressed, copy into image buffer with viewport
- 4. data is transferred to the user process
-
- Note that MAX_ISO_BUFS != MAX_FRAMES != MAX_IMAGES....
- We have in effect a back-to-back-double-buffer system.
- */
- /* 1: isoc */
- struct pwc_iso_buf sbuf[MAX_ISO_BUFS];
- char iso_init;
-
- /* 2: frame */
- struct pwc_frame_buf *fbuf; /* all frames */
- struct pwc_frame_buf *empty_frames, *empty_frames_tail; /* all empty frames */
- struct pwc_frame_buf *full_frames, *full_frames_tail; /* all filled frames */
- struct pwc_frame_buf *fill_frame; /* frame currently being filled */
- struct pwc_frame_buf *read_frame; /* frame currently read by user process */
- int frame_header_size, frame_trailer_size;
- int frame_size;
- int frame_total_size; /* including header & trailer */
- int drop_frames;
-
- /* 3: decompression */
- void *decompress_data; /* private data for decompression engine */
-
- /* 4: image */
- /* We have an 'image' and a 'view', where 'image' is the fixed-size image
- as delivered by the camera, and 'view' is the size requested by the
- program. The camera image is centered in this viewport, laced with
- a gray or black border. view_min <= image <= view <= view_max;
- */
- int image_mask; /* bitmask of supported sizes */
- struct pwc_coord view_min, view_max; /* minimum and maximum viewable sizes */
- struct pwc_coord abs_max; /* maximum supported size with compression */
- struct pwc_coord image, view; /* image and viewport size */
- struct pwc_coord offset; /* offset within the viewport */
-
- void *image_data; /* total buffer, which is subdivided into ... */
- struct pwc_imgbuf images[MAX_IMAGES];/* ...several images... */
- int fill_image; /* ...which are rotated. */
- int len_per_image; /* length per image */
- int image_read_pos; /* In case we read data in pieces, keep track of were we are in the imagebuffer */
- int image_used[MAX_IMAGES]; /* For MCAPTURE and SYNC */
-
- struct mutex modlock; /* to prevent races in video_open(), etc */
- spinlock_t ptrlock; /* for manipulating the buffer pointers */
-
- /*** motorized pan/tilt feature */
- struct pwc_mpt_range angle_range;
- int pan_angle; /* in degrees * 100 */
- int tilt_angle; /* absolute angle; 0,0 is home position */
- int snapshot_button_status; /* set to 1 when the user push the button, reset to 0 when this value is read */
+ struct mutex modlock;
+
+ /* Pointer to our usb_device, may be NULL after unplug */
+ struct usb_device *udev;
+ /* Protects the setting of udev to NULL by our disconnect handler */
+ struct mutex udevlock;
+
+ /* type of cam (645, 646, 675, 680, 690, 720, 730, 740, 750) */
+ int type;
+ int release; /* release number */
+ int features; /* feature bits */
+ char serial[30]; /* serial number (string) */
+
+ /*** Video data ***/
+ struct file *capt_file; /* file doing video capture */
+ int vendpoint; /* video isoc endpoint */
+ int vcinterface; /* video control interface */
+ int valternate; /* alternate interface needed */
+ int vframes, vsize; /* frames-per-second & size (see PSZ_*) */
+ int pixfmt; /* pixelformat: V4L2_PIX_FMT_YUV420 or _PWCX */
+ int vframe_count; /* received frames */
+ int vmax_packet_size; /* USB maxpacket size */
+ int vlast_packet_size; /* for frame synchronisation */
+ int visoc_errors; /* number of contiguous ISOC errors */
+ int vcompression; /* desired compression factor */
+ int vbandlength; /* compressed band length; 0 is uncompressed */
+ char vsnapshot; /* snapshot mode */
+ char vsync; /* used by isoc handler */
+ char vmirror; /* for ToUCaM series */
+ char power_save; /* Do powersaving for this cam */
+
+ int cmd_len;
+ unsigned char cmd_buf[13];
+
+ struct urb *urbs[MAX_ISO_BUFS];
+ char iso_init;
+
+ /* videobuf2 queue and queued buffers list */
+ struct vb2_queue vb_queue;
+ struct list_head queued_bufs;
+ spinlock_t queued_bufs_lock;
+
+ /*
+ * Frame currently being filled, this only gets touched by the
+ * isoc urb complete handler, and by stream start / stop since
+ * start / stop touch it before / after starting / killing the urbs
+ * no locking is needed around this
+ */
+ struct pwc_frame_buf *fill_buf;
+
+ int frame_header_size, frame_trailer_size;
+ int frame_size;
+ int frame_total_size; /* including header & trailer */
+ int drop_frames;
+
+ void *decompress_data; /* private data for decompression engine */
+
+ /*
+ * We have an 'image' and a 'view', where 'image' is the fixed-size img
+ * as delivered by the camera, and 'view' is the size requested by the
+ * program. The camera image is centered in this viewport, laced with
+ * a gray or black border. view_min <= image <= view <= view_max;
+ */
+ int image_mask; /* supported sizes */
+ struct pwc_coord view_min, view_max; /* minimum and maximum view */
+ struct pwc_coord abs_max; /* maximum supported size */
+ struct pwc_coord image, view; /* image and viewport size */
+ struct pwc_coord offset; /* offset of the viewport */
+
+ /*** motorized pan/tilt feature */
+ struct pwc_mpt_range angle_range;
+ int pan_angle; /* in degrees * 100 */
+ int tilt_angle; /* absolute angle; 0,0 is home */
+
+ /*
+ * Set to 1 when the user push the button, reset to 0
+ * when this value is read from sysfs.
+ */
+ int snapshot_button_status;
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
- struct input_dev *button_dev; /* webcam snapshot button input */
- char button_phys[64];
+ struct input_dev *button_dev; /* webcam snapshot button input */
+ char button_phys[64];
#endif
- /*** Misc. data ***/
- wait_queue_head_t frameq; /* When waiting for a frame to finish... */
-#if PWC_INT_PIPE
- void *usb_int_handler; /* for the interrupt endpoint */
-#endif
+ /* controls */
+ struct v4l2_ctrl_handler ctrl_handler;
+ u16 saturation_fmt;
+ struct v4l2_ctrl *brightness;
+ struct v4l2_ctrl *contrast;
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *gamma;
+ struct {
+ /* awb / red-blue balance cluster */
+ struct v4l2_ctrl *auto_white_balance;
+ struct v4l2_ctrl *red_balance;
+ struct v4l2_ctrl *blue_balance;
+ /* usb ctrl transfers are slow, so we cache things */
+ int color_bal_valid;
+ unsigned long last_color_bal_update; /* In jiffies */
+ s32 last_red_balance;
+ s32 last_blue_balance;
+ };
+ struct {
+ /* autogain / gain cluster */
+ struct v4l2_ctrl *autogain;
+ struct v4l2_ctrl *gain;
+ int gain_valid;
+ unsigned long last_gain_update; /* In jiffies */
+ s32 last_gain;
+ };
+ struct {
+ /* exposure_auto / exposure cluster */
+ struct v4l2_ctrl *exposure_auto;
+ struct v4l2_ctrl *exposure;
+ int exposure_valid;
+ unsigned long last_exposure_update; /* In jiffies */
+ s32 last_exposure;
+ };
+ struct v4l2_ctrl *colorfx;
+ struct {
+ /* autocontour/contour cluster */
+ struct v4l2_ctrl *autocontour;
+ struct v4l2_ctrl *contour;
+ };
+ struct v4l2_ctrl *backlight;
+ struct v4l2_ctrl *flicker;
+ struct v4l2_ctrl *noise_reduction;
+ struct v4l2_ctrl *save_user;
+ struct v4l2_ctrl *restore_user;
+ struct v4l2_ctrl *restore_factory;
+ struct {
+ /* motor control cluster */
+ struct v4l2_ctrl *motor_pan;
+ struct v4l2_ctrl *motor_tilt;
+ struct v4l2_ctrl *motor_pan_reset;
+ struct v4l2_ctrl *motor_tilt_reset;
+ };
+ /* CODEC3 models have both gain and exposure controlled by autogain */
+ struct v4l2_ctrl *autogain_expo_cluster[3];
};
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/* Global variables */
#ifdef CONFIG_USB_PWC_DEBUG
extern int pwc_trace;
#endif
-extern int pwc_mbufs;
-
-/** functions in pwc-if.c */
-int pwc_handle_frame(struct pwc_device *pdev);
-void pwc_next_image(struct pwc_device *pdev);
-int pwc_isoc_init(struct pwc_device *pdev);
-void pwc_isoc_cleanup(struct pwc_device *pdev);
/** Functions in pwc-misc.c */
/* sizes in pixels */
@@ -291,50 +355,25 @@ void pwc_construct(struct pwc_device *pdev);
/* Request a certain video mode. Returns < 0 if not possible */
extern int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, int frames, int compression, int snapshot);
extern unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned int size);
-/* Calculate the number of bytes per image (not frame) */
extern int pwc_mpt_reset(struct pwc_device *pdev, int flags);
extern int pwc_mpt_set_angle(struct pwc_device *pdev, int pan, int tilt);
-
-/* Various controls; should be obvious. Value 0..65535, or < 0 on error */
-extern int pwc_get_brightness(struct pwc_device *pdev);
-extern int pwc_set_brightness(struct pwc_device *pdev, int value);
-extern int pwc_get_contrast(struct pwc_device *pdev);
-extern int pwc_set_contrast(struct pwc_device *pdev, int value);
-extern int pwc_get_gamma(struct pwc_device *pdev);
-extern int pwc_set_gamma(struct pwc_device *pdev, int value);
-extern int pwc_get_saturation(struct pwc_device *pdev, int *value);
-extern int pwc_set_saturation(struct pwc_device *pdev, int value);
extern int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value);
extern int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor);
-extern int pwc_restore_user(struct pwc_device *pdev);
-extern int pwc_save_user(struct pwc_device *pdev);
-extern int pwc_restore_factory(struct pwc_device *pdev);
-
-/* exported for use by v4l2 controls */
-extern int pwc_get_red_gain(struct pwc_device *pdev, int *value);
-extern int pwc_set_red_gain(struct pwc_device *pdev, int value);
-extern int pwc_get_blue_gain(struct pwc_device *pdev, int *value);
-extern int pwc_set_blue_gain(struct pwc_device *pdev, int value);
-extern int pwc_get_awb(struct pwc_device *pdev);
-extern int pwc_set_awb(struct pwc_device *pdev, int mode);
-extern int pwc_set_agc(struct pwc_device *pdev, int mode, int value);
-extern int pwc_get_agc(struct pwc_device *pdev, int *value);
-extern int pwc_set_shutter_speed(struct pwc_device *pdev, int mode, int value);
-extern int pwc_get_shutter_speed(struct pwc_device *pdev, int *value);
-
-extern int pwc_set_colour_mode(struct pwc_device *pdev, int colour);
-extern int pwc_get_colour_mode(struct pwc_device *pdev, int *colour);
-extern int pwc_set_contour(struct pwc_device *pdev, int contour);
-extern int pwc_get_contour(struct pwc_device *pdev, int *contour);
-extern int pwc_set_backlight(struct pwc_device *pdev, int backlight);
-extern int pwc_get_backlight(struct pwc_device *pdev, int *backlight);
-extern int pwc_set_flicker(struct pwc_device *pdev, int flicker);
-extern int pwc_get_flicker(struct pwc_device *pdev, int *flicker);
-extern int pwc_set_dynamic_noise(struct pwc_device *pdev, int noise);
-extern int pwc_get_dynamic_noise(struct pwc_device *pdev, int *noise);
+extern int send_control_msg(struct pwc_device *pdev,
+ u8 request, u16 value, void *buf, int buflen);
+
+/* Control get / set helpers */
+int pwc_get_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data);
+int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data);
+int pwc_get_s8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data);
+#define pwc_set_s8_ctrl pwc_set_u8_ctrl
+int pwc_get_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *dat);
+int pwc_set_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, u16 data);
+int pwc_button_ctrl(struct pwc_device *pdev, u16 value);
+int pwc_init_controls(struct pwc_device *pdev);
/* Power down or up the camera; not supported by all models */
-extern int pwc_camera_power(struct pwc_device *pdev, int power);
+extern void pwc_camera_power(struct pwc_device *pdev, int power);
/* Private ioctl()s; see pwc-ioctl.h */
extern long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg);
@@ -343,12 +382,6 @@ extern const struct v4l2_ioctl_ops pwc_ioctl_ops;
/** pwc-uncompress.c */
/* Expand frame to image, possibly including decompression. Uses read_frame and fill_image */
-extern int pwc_decompress(struct pwc_device *pdev);
-
-#ifdef __cplusplus
-}
-#endif
-
+int pwc_decompress(struct pwc_device *pdev, struct pwc_frame_buf *fbuf);
#endif
-/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index b42bfa5..d07df22 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -22,7 +22,6 @@
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/time.h>
-#include <linux/version.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
@@ -40,7 +39,7 @@
#include <mach/dma.h>
#include <mach/camera.h>
-#define PXA_CAM_VERSION_CODE KERNEL_VERSION(0, 0, 5)
+#define PXA_CAM_VERSION "0.0.6"
#define PXA_CAM_DRV_NAME "pxa27x-camera"
/* Camera Interface */
@@ -247,7 +246,7 @@ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
if (bytes_per_line < 0)
return bytes_per_line;
- dev_dbg(icd->dev.parent, "count=%d, size=%d\n", *count, *size);
+ dev_dbg(icd->parent, "count=%d, size=%d\n", *count, *size);
*size = bytes_per_line * icd->user_height;
@@ -262,13 +261,13 @@ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf)
{
struct soc_camera_device *icd = vq->priv_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
int i;
BUG_ON(in_interrupt());
- dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
&buf->vb, buf->vb.baddr, buf->vb.bsize);
/*
@@ -429,7 +428,7 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
struct videobuf_buffer *vb, enum v4l2_field field)
{
struct soc_camera_device *icd = vq->priv_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct pxa_camera_dev *pcdev = ici->priv;
struct device *dev = pcdev->soc_host.v4l2_dev.dev;
struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
@@ -636,11 +635,11 @@ static void pxa_videobuf_queue(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct soc_camera_device *icd = vq->priv_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct pxa_camera_dev *pcdev = ici->priv;
struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
- dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d active=%p\n",
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d active=%p\n",
__func__, vb, vb->baddr, vb->bsize, pcdev->active);
list_add_tail(&vb->queue, &pcdev->capture);
@@ -658,7 +657,7 @@ static void pxa_videobuf_release(struct videobuf_queue *vq,
struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
#ifdef DEBUG
struct soc_camera_device *icd = vq->priv_data;
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
@@ -843,7 +842,7 @@ static struct videobuf_queue_ops pxa_videobuf_ops = {
static void pxa_camera_init_videobuf(struct videobuf_queue *q,
struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct pxa_camera_dev *pcdev = ici->priv;
/*
@@ -972,7 +971,7 @@ static irqreturn_t pxa_camera_irq(int irq, void *data)
*/
static int pxa_camera_add_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct pxa_camera_dev *pcdev = ici->priv;
if (pcdev->icd)
@@ -982,7 +981,7 @@ static int pxa_camera_add_device(struct soc_camera_device *icd)
pcdev->icd = icd;
- dev_info(icd->dev.parent, "PXA Camera driver attached to camera %d\n",
+ dev_info(icd->parent, "PXA Camera driver attached to camera %d\n",
icd->devnum);
return 0;
@@ -991,12 +990,12 @@ static int pxa_camera_add_device(struct soc_camera_device *icd)
/* Called with .video_lock held */
static void pxa_camera_remove_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct pxa_camera_dev *pcdev = ici->priv;
BUG_ON(icd != pcdev->icd);
- dev_info(icd->dev.parent, "PXA Camera driver detached from camera %d\n",
+ dev_info(icd->parent, "PXA Camera driver detached from camera %d\n",
icd->devnum);
/* disable capture, disable interrupts */
@@ -1057,7 +1056,7 @@ static int test_platform_param(struct pxa_camera_dev *pcdev,
static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
unsigned long flags, __u32 pixfmt)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct pxa_camera_dev *pcdev = ici->priv;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
unsigned long dw, bpp;
@@ -1152,7 +1151,7 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct pxa_camera_dev *pcdev = ici->priv;
unsigned long bus_flags, camera_flags, common_flags;
int ret;
@@ -1210,7 +1209,7 @@ static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
static int pxa_camera_try_bus_param(struct soc_camera_device *icd,
unsigned char buswidth)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct pxa_camera_dev *pcdev = ici->priv;
unsigned long bus_flags, camera_flags;
int ret = test_platform_param(pcdev, buswidth, &bus_flags);
@@ -1247,7 +1246,7 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, unsigned int id
struct soc_camera_format_xlate *xlate)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
int formats = 0, ret;
struct pxa_cam *cam;
enum v4l2_mbus_pixelcode code;
@@ -1335,9 +1334,9 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd,
struct v4l2_crop *a)
{
struct v4l2_rect *rect = &a->c;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
struct pxa_camera_dev *pcdev = ici->priv;
- struct device *dev = icd->dev.parent;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct soc_camera_sense sense = {
.master_clock = pcdev->mclk,
@@ -1379,7 +1378,7 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd,
return ret;
if (pxa_camera_check_frame(mf.width, mf.height)) {
- dev_warn(icd->dev.parent,
+ dev_warn(icd->parent,
"Inconsistent state. Use S_FMT to repair\n");
return -EINVAL;
}
@@ -1406,9 +1405,9 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd,
static int pxa_camera_set_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
struct pxa_camera_dev *pcdev = ici->priv;
- struct device *dev = icd->dev.parent;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
const struct soc_camera_format_xlate *xlate = NULL;
struct soc_camera_sense sense = {
@@ -1485,7 +1484,7 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (!xlate) {
- dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt);
+ dev_warn(icd->parent, "Format %x not found\n", pixfmt);
return -EINVAL;
}
@@ -1499,16 +1498,11 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd,
&pix->height, 32, 2048, 0,
pixfmt == V4L2_PIX_FMT_YUV422P ? 4 : 0);
- pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
- xlate->host_fmt);
- if (pix->bytesperline < 0)
- return pix->bytesperline;
- pix->sizeimage = pix->height * pix->bytesperline;
-
/* limit to sensor capabilities */
mf.width = pix->width;
mf.height = pix->height;
- mf.field = pix->field;
+ /* Only progressive video supported so far */
+ mf.field = V4L2_FIELD_NONE;
mf.colorspace = pix->colorspace;
mf.code = xlate->code;
@@ -1527,7 +1521,7 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd,
break;
default:
/* TODO: support interlaced at least in pass-through mode */
- dev_err(icd->dev.parent, "Field type %d unsupported.\n",
+ dev_err(icd->parent, "Field type %d unsupported.\n",
mf.field);
return -EINVAL;
}
@@ -1578,15 +1572,14 @@ static int pxa_camera_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the firendly caller:-> */
strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card));
- cap->version = PXA_CAM_VERSION_CODE;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
}
-static int pxa_camera_suspend(struct soc_camera_device *icd, pm_message_t state)
+static int pxa_camera_suspend(struct device *dev)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
struct pxa_camera_dev *pcdev = ici->priv;
int i = 0, ret = 0;
@@ -1596,15 +1589,19 @@ static int pxa_camera_suspend(struct soc_camera_device *icd, pm_message_t state)
pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR3);
pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR4);
- if ((pcdev->icd) && (pcdev->icd->ops->suspend))
- ret = pcdev->icd->ops->suspend(pcdev->icd, state);
+ if (pcdev->icd) {
+ struct v4l2_subdev *sd = soc_camera_to_subdev(pcdev->icd);
+ ret = v4l2_subdev_call(sd, core, s_power, 0);
+ if (ret == -ENOIOCTLCMD)
+ ret = 0;
+ }
return ret;
}
-static int pxa_camera_resume(struct soc_camera_device *icd)
+static int pxa_camera_resume(struct device *dev)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
struct pxa_camera_dev *pcdev = ici->priv;
int i = 0, ret = 0;
@@ -1618,8 +1615,12 @@ static int pxa_camera_resume(struct soc_camera_device *icd)
__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR3);
__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR4);
- if ((pcdev->icd) && (pcdev->icd->ops->resume))
- ret = pcdev->icd->ops->resume(pcdev->icd);
+ if (pcdev->icd) {
+ struct v4l2_subdev *sd = soc_camera_to_subdev(pcdev->icd);
+ ret = v4l2_subdev_call(sd, core, s_power, 1);
+ if (ret == -ENOIOCTLCMD)
+ ret = 0;
+ }
/* Restart frame capture if active buffer exists */
if (!ret && pcdev->active)
@@ -1632,8 +1633,6 @@ static struct soc_camera_host_ops pxa_soc_camera_host_ops = {
.owner = THIS_MODULE,
.add = pxa_camera_add_device,
.remove = pxa_camera_remove_device,
- .suspend = pxa_camera_suspend,
- .resume = pxa_camera_resume,
.set_crop = pxa_camera_set_crop,
.get_formats = pxa_camera_get_formats,
.put_formats = pxa_camera_put_formats,
@@ -1818,9 +1817,15 @@ static int __devexit pxa_camera_remove(struct platform_device *pdev)
return 0;
}
+static struct dev_pm_ops pxa_camera_pm = {
+ .suspend = pxa_camera_suspend,
+ .resume = pxa_camera_resume,
+};
+
static struct platform_driver pxa_camera_driver = {
.driver = {
.name = PXA_CAM_DRV_NAME,
+ .pm = &pxa_camera_pm,
},
.probe = pxa_camera_probe,
.remove = __devexit_p(pxa_camera_remove),
@@ -1843,4 +1848,5 @@ module_exit(pxa_camera_exit);
MODULE_DESCRIPTION("PXA27x SoC Camera Host driver");
MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
MODULE_LICENSE("GPL");
+MODULE_VERSION(PXA_CAM_VERSION);
MODULE_ALIAS("platform:" PXA_CAM_DRV_NAME);
diff --git a/drivers/media/video/rj54n1cb0c.c b/drivers/media/video/rj54n1cb0c.c
index 57e11b6..847ccc0 100644
--- a/drivers/media/video/rj54n1cb0c.c
+++ b/drivers/media/video/rj54n1cb0c.c
@@ -1364,10 +1364,9 @@ static int rj54n1_video_probe(struct soc_camera_device *icd,
int data1, data2;
int ret;
- /* This could be a BUG_ON() or a WARN_ON(), or remove it completely */
- if (!icd->dev.parent ||
- to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
- return -ENODEV;
+ /* We must have a parent by now. And it cannot be a wrong one. */
+ BUG_ON(!icd->parent ||
+ to_soc_camera_host(icd->parent)->nr != icd->iface);
/* Read out the chip version register */
data1 = reg_read(client, RJ54N1_DEV_CODE);
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 5b9dce8..803c9c8 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -16,15 +16,10 @@
* Example maximum bandwidth utilization:
*
* -full size, color mode YUYV or YUV422P: 2 channels at once
- *
* -full or half size Grey scale: all 4 channels at once
- *
* -half size, color mode YUYV or YUV422P: all 4 channels at once
- *
* -full size, color mode YUYV or YUV422P 1/2 frame rate: all 4 channels
* at once.
- * (TODO: Incorporate videodev2 frame rate(FR) enumeration,
- * which is currently experimental.)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -47,7 +42,6 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
-#include <linux/version.h>
#include <linux/mm.h>
#include <media/videobuf-vmalloc.h>
#include <media/v4l2-common.h>
@@ -56,12 +50,7 @@
#include <linux/vmalloc.h>
#include <linux/usb.h>
-#define S2255_MAJOR_VERSION 1
-#define S2255_MINOR_VERSION 21
-#define S2255_RELEASE 0
-#define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \
- S2255_MINOR_VERSION, \
- S2255_RELEASE)
+#define S2255_VERSION "1.22.1"
#define FIRMWARE_FILE_NAME "f2255usb.bin"
/* default JPEG quality */
@@ -126,7 +115,7 @@
#define MASK_COLOR 0x000000ff
#define MASK_JPG_QUALITY 0x0000ff00
#define MASK_INPUT_TYPE 0x000f0000
-/* frame decimation. Not implemented by V4L yet(experimental in V4L) */
+/* frame decimation. */
#define FDEC_1 1 /* capture every frame. default */
#define FDEC_2 2 /* capture every 2nd frame */
#define FDEC_3 3 /* capture every 3rd frame */
@@ -312,9 +301,9 @@ struct s2255_fh {
};
/* current cypress EEPROM firmware version */
-#define S2255_CUR_USB_FWVER ((3 << 8) | 11)
+#define S2255_CUR_USB_FWVER ((3 << 8) | 12)
/* current DSP FW version */
-#define S2255_CUR_DSP_FWVER 10102
+#define S2255_CUR_DSP_FWVER 10104
/* Need DSP version 5+ for video status feature */
#define S2255_MIN_DSP_STATUS 5
#define S2255_MIN_DSP_COLORFILTER 8
@@ -502,7 +491,7 @@ static void planar422p_to_yuv_packed(const unsigned char *in,
static void s2255_reset_dsppower(struct s2255_dev *dev)
{
- s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b01, NULL, 0, 1);
+ s2255_vendor_req(dev, 0x40, 0x0000, 0x0001, NULL, 0, 1);
msleep(10);
s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1);
msleep(600);
@@ -856,7 +845,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->driver, "s2255", sizeof(cap->driver));
strlcpy(cap->card, "s2255", sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->version = S2255_VERSION;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
}
@@ -1984,9 +1972,8 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
video_device_node_name(&channel->vdev));
}
- printk(KERN_INFO "Sensoray 2255 V4L driver Revision: %d.%d\n",
- S2255_MAJOR_VERSION,
- S2255_MINOR_VERSION);
+ printk(KERN_INFO "Sensoray 2255 V4L driver Revision: %s\n",
+ S2255_VERSION);
/* if no channels registered, return error and probe will fail*/
if (atomic_read(&dev->num_channels) == 0) {
v4l2_device_unregister(&dev->v4l2_dev);
@@ -2302,15 +2289,12 @@ static int s2255_board_init(struct s2255_dev *dev)
/* query the firmware */
fw_ver = s2255_get_fx2fw(dev);
- printk(KERN_INFO "2255 usb firmware version %d.%d\n",
+ printk(KERN_INFO "s2255: usb firmware version %d.%d\n",
(fw_ver >> 8) & 0xff,
fw_ver & 0xff);
if (fw_ver < S2255_CUR_USB_FWVER)
- dev_err(&dev->udev->dev,
- "usb firmware not up to date %d.%d\n",
- (fw_ver >> 8) & 0xff,
- fw_ver & 0xff);
+ printk(KERN_INFO "s2255: newer USB firmware available\n");
for (j = 0; j < MAX_CHANNELS; j++) {
struct s2255_channel *channel = &dev->channel[j];
@@ -2721,3 +2705,4 @@ module_exit(usb_s2255_exit);
MODULE_DESCRIPTION("Sensoray 2255 Video for Linux driver");
MODULE_AUTHOR("Dean Anderson (Sensoray Company Inc.)");
MODULE_LICENSE("GPL");
+MODULE_VERSION(S2255_VERSION);
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index 81b4a82..0d730e5 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/bug.h>
@@ -451,7 +450,6 @@ static int fimc_vidioc_querycap_capture(struct file *file, void *priv,
strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1);
strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1);
cap->bus_info[0] = 0;
- cap->version = KERNEL_VERSION(1, 0, 0);
cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_VIDEO_CAPTURE_MPLANE;
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index bdf19ad..aa55066 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/bug.h>
@@ -774,7 +773,6 @@ static int fimc_m2m_querycap(struct file *file, void *priv,
strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1);
strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1);
cap->bus_info[0] = 0;
- cap->version = KERNEL_VERSION(1, 0, 0);
cap->capabilities = V4L2_CAP_STREAMING |
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
@@ -1937,3 +1935,4 @@ module_exit(fimc_exit);
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
MODULE_DESCRIPTION("S5P FIMC camera host interface/video postprocessor driver");
MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0.1");
diff --git a/drivers/media/video/s5p-mfc/Makefile b/drivers/media/video/s5p-mfc/Makefile
new file mode 100644
index 0000000..d066340
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) := s5p-mfc.o
+s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o s5p_mfc_opr.o
+s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o
+s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_cmd.o
+s5p-mfc-y += s5p_mfc_pm.o s5p_mfc_shm.o
diff --git a/drivers/media/video/s5p-mfc/regs-mfc.h b/drivers/media/video/s5p-mfc/regs-mfc.h
new file mode 100644
index 0000000..053a8a8
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/regs-mfc.h
@@ -0,0 +1,413 @@
+/*
+ * Register definition file for Samsung MFC V5.1 Interface (FIMV) driver
+ *
+ * Kamil Debski, Copyright (c) 2010 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef _REGS_FIMV_H
+#define _REGS_FIMV_H
+
+#define S5P_FIMV_REG_SIZE (S5P_FIMV_END_ADDR - S5P_FIMV_START_ADDR)
+#define S5P_FIMV_REG_COUNT ((S5P_FIMV_END_ADDR - S5P_FIMV_START_ADDR) / 4)
+
+/* Number of bits that the buffer address should be shifted for particular
+ * MFC buffers. */
+#define S5P_FIMV_START_ADDR 0x0000
+#define S5P_FIMV_END_ADDR 0xe008
+
+#define S5P_FIMV_SW_RESET 0x0000
+#define S5P_FIMV_RISC_HOST_INT 0x0008
+
+/* Command from HOST to RISC */
+#define S5P_FIMV_HOST2RISC_CMD 0x0030
+#define S5P_FIMV_HOST2RISC_ARG1 0x0034
+#define S5P_FIMV_HOST2RISC_ARG2 0x0038
+#define S5P_FIMV_HOST2RISC_ARG3 0x003c
+#define S5P_FIMV_HOST2RISC_ARG4 0x0040
+
+/* Command from RISC to HOST */
+#define S5P_FIMV_RISC2HOST_CMD 0x0044
+#define S5P_FIMV_RISC2HOST_CMD_MASK 0x1FFFF
+#define S5P_FIMV_RISC2HOST_ARG1 0x0048
+#define S5P_FIMV_RISC2HOST_ARG2 0x004c
+#define S5P_FIMV_RISC2HOST_ARG3 0x0050
+#define S5P_FIMV_RISC2HOST_ARG4 0x0054
+
+#define S5P_FIMV_FW_VERSION 0x0058
+#define S5P_FIMV_SYS_MEM_SZ 0x005c
+#define S5P_FIMV_FW_STATUS 0x0080
+
+/* Memory controller register */
+#define S5P_FIMV_MC_DRAMBASE_ADR_A 0x0508
+#define S5P_FIMV_MC_DRAMBASE_ADR_B 0x050c
+#define S5P_FIMV_MC_STATUS 0x0510
+
+/* Common register */
+#define S5P_FIMV_COMMON_BASE_A 0x0600
+#define S5P_FIMV_COMMON_BASE_B 0x0700
+
+/* Decoder */
+#define S5P_FIMV_DEC_CHROMA_ADR (S5P_FIMV_COMMON_BASE_A)
+#define S5P_FIMV_DEC_LUMA_ADR (S5P_FIMV_COMMON_BASE_B)
+
+/* H.264 decoding */
+#define S5P_FIMV_H264_VERT_NB_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x8c)
+ /* vertical neighbor motion vector */
+#define S5P_FIMV_H264_NB_IP_ADR (S5P_FIMV_COMMON_BASE_A + 0x90)
+ /* neighbor pixels for intra pred */
+#define S5P_FIMV_H264_MV_ADR (S5P_FIMV_COMMON_BASE_B + 0x80)
+ /* H264 motion vector */
+
+/* MPEG4 decoding */
+#define S5P_FIMV_MPEG4_NB_DCAC_ADR (S5P_FIMV_COMMON_BASE_A + 0x8c)
+ /* neighbor AC/DC coeff. */
+#define S5P_FIMV_MPEG4_UP_NB_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x90)
+ /* upper neighbor motion vector */
+#define S5P_FIMV_MPEG4_SA_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x94)
+ /* subseq. anchor motion vector */
+#define S5P_FIMV_MPEG4_OT_LINE_ADR (S5P_FIMV_COMMON_BASE_A + 0x98)
+ /* overlap transform line */
+#define S5P_FIMV_MPEG4_SP_ADR (S5P_FIMV_COMMON_BASE_A + 0xa8)
+ /* syntax parser */
+
+/* H.263 decoding */
+#define S5P_FIMV_H263_NB_DCAC_ADR (S5P_FIMV_COMMON_BASE_A + 0x8c)
+#define S5P_FIMV_H263_UP_NB_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x90)
+#define S5P_FIMV_H263_SA_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x94)
+#define S5P_FIMV_H263_OT_LINE_ADR (S5P_FIMV_COMMON_BASE_A + 0x98)
+
+/* VC-1 decoding */
+#define S5P_FIMV_VC1_NB_DCAC_ADR (S5P_FIMV_COMMON_BASE_A + 0x8c)
+#define S5P_FIMV_VC1_UP_NB_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x90)
+#define S5P_FIMV_VC1_SA_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x94)
+#define S5P_FIMV_VC1_OT_LINE_ADR (S5P_FIMV_COMMON_BASE_A + 0x98)
+#define S5P_FIMV_VC1_BITPLANE3_ADR (S5P_FIMV_COMMON_BASE_A + 0x9c)
+ /* bitplane3 */
+#define S5P_FIMV_VC1_BITPLANE2_ADR (S5P_FIMV_COMMON_BASE_A + 0xa0)
+ /* bitplane2 */
+#define S5P_FIMV_VC1_BITPLANE1_ADR (S5P_FIMV_COMMON_BASE_A + 0xa4)
+ /* bitplane1 */
+
+/* Encoder */
+#define S5P_FIMV_ENC_REF0_LUMA_ADR (S5P_FIMV_COMMON_BASE_A + 0x1c)
+#define S5P_FIMV_ENC_REF1_LUMA_ADR (S5P_FIMV_COMMON_BASE_A + 0x20)
+ /* reconstructed luma */
+#define S5P_FIMV_ENC_REF0_CHROMA_ADR (S5P_FIMV_COMMON_BASE_B)
+#define S5P_FIMV_ENC_REF1_CHROMA_ADR (S5P_FIMV_COMMON_BASE_B + 0x04)
+ /* reconstructed chroma */
+#define S5P_FIMV_ENC_REF2_LUMA_ADR (S5P_FIMV_COMMON_BASE_B + 0x10)
+#define S5P_FIMV_ENC_REF2_CHROMA_ADR (S5P_FIMV_COMMON_BASE_B + 0x08)
+#define S5P_FIMV_ENC_REF3_LUMA_ADR (S5P_FIMV_COMMON_BASE_B + 0x14)
+#define S5P_FIMV_ENC_REF3_CHROMA_ADR (S5P_FIMV_COMMON_BASE_B + 0x0c)
+
+/* H.264 encoding */
+#define S5P_FIMV_H264_UP_MV_ADR (S5P_FIMV_COMMON_BASE_A)
+ /* upper motion vector */
+#define S5P_FIMV_H264_NBOR_INFO_ADR (S5P_FIMV_COMMON_BASE_A + 0x04)
+ /* entropy engine's neighbor info. */
+#define S5P_FIMV_H264_UP_INTRA_MD_ADR (S5P_FIMV_COMMON_BASE_A + 0x08)
+ /* upper intra MD */
+#define S5P_FIMV_H264_COZERO_FLAG_ADR (S5P_FIMV_COMMON_BASE_A + 0x10)
+ /* direct cozero flag */
+#define S5P_FIMV_H264_UP_INTRA_PRED_ADR (S5P_FIMV_COMMON_BASE_B + 0x40)
+ /* upper intra PRED */
+
+/* H.263 encoding */
+#define S5P_FIMV_H263_UP_MV_ADR (S5P_FIMV_COMMON_BASE_A)
+ /* upper motion vector */
+#define S5P_FIMV_H263_ACDC_COEF_ADR (S5P_FIMV_COMMON_BASE_A + 0x04)
+ /* upper Q coeff. */
+
+/* MPEG4 encoding */
+#define S5P_FIMV_MPEG4_UP_MV_ADR (S5P_FIMV_COMMON_BASE_A)
+ /* upper motion vector */
+#define S5P_FIMV_MPEG4_ACDC_COEF_ADR (S5P_FIMV_COMMON_BASE_A + 0x04)
+ /* upper Q coeff. */
+#define S5P_FIMV_MPEG4_COZERO_FLAG_ADR (S5P_FIMV_COMMON_BASE_A + 0x10)
+ /* direct cozero flag */
+
+#define S5P_FIMV_ENC_REF_B_LUMA_ADR 0x062c /* ref B Luma addr */
+#define S5P_FIMV_ENC_REF_B_CHROMA_ADR 0x0630 /* ref B Chroma addr */
+
+#define S5P_FIMV_ENC_CUR_LUMA_ADR 0x0718 /* current Luma addr */
+#define S5P_FIMV_ENC_CUR_CHROMA_ADR 0x071C /* current Chroma addr */
+
+/* Codec common register */
+#define S5P_FIMV_ENC_HSIZE_PX 0x0818 /* frame width at encoder */
+#define S5P_FIMV_ENC_VSIZE_PX 0x081c /* frame height at encoder */
+#define S5P_FIMV_ENC_PROFILE 0x0830 /* profile register */
+#define S5P_FIMV_ENC_PROFILE_H264_MAIN 0
+#define S5P_FIMV_ENC_PROFILE_H264_HIGH 1
+#define S5P_FIMV_ENC_PROFILE_H264_BASELINE 2
+#define S5P_FIMV_ENC_PROFILE_MPEG4_SIMPLE 0
+#define S5P_FIMV_ENC_PROFILE_MPEG4_ADVANCED_SIMPLE 1
+#define S5P_FIMV_ENC_PIC_STRUCT 0x083c /* picture field/frame flag */
+#define S5P_FIMV_ENC_LF_CTRL 0x0848 /* loop filter control */
+#define S5P_FIMV_ENC_ALPHA_OFF 0x084c /* loop filter alpha offset */
+#define S5P_FIMV_ENC_BETA_OFF 0x0850 /* loop filter beta offset */
+#define S5P_FIMV_MR_BUSIF_CTRL 0x0854 /* hidden, bus interface ctrl */
+#define S5P_FIMV_ENC_PXL_CACHE_CTRL 0x0a00 /* pixel cache control */
+
+/* Channel & stream interface register */
+#define S5P_FIMV_SI_RTN_CHID 0x2000 /* Return CH inst ID register */
+#define S5P_FIMV_SI_CH0_INST_ID 0x2040 /* codec instance ID */
+#define S5P_FIMV_SI_CH1_INST_ID 0x2080 /* codec instance ID */
+/* Decoder */
+#define S5P_FIMV_SI_VRESOL 0x2004 /* vertical res of decoder */
+#define S5P_FIMV_SI_HRESOL 0x2008 /* horizontal res of decoder */
+#define S5P_FIMV_SI_BUF_NUMBER 0x200c /* number of frames in the
+ decoded pic */
+#define S5P_FIMV_SI_DISPLAY_Y_ADR 0x2010 /* luma addr of displayed pic */
+#define S5P_FIMV_SI_DISPLAY_C_ADR 0x2014 /* chroma addrof displayed pic */
+#define S5P_FIMV_SI_CONSUMED_BYTES 0x2018 /* Consumed number of bytes to
+ decode a frame */
+#define S5P_FIMV_SI_DISPLAY_STATUS 0x201c /* status of decoded picture */
+
+#define S5P_FIMV_SI_CH0_SB_ST_ADR 0x2044 /* start addr of stream buf */
+#define S5P_FIMV_SI_CH0_SB_FRM_SIZE 0x2048 /* size of stream buf */
+#define S5P_FIMV_SI_CH0_DESC_ADR 0x204c /* addr of descriptor buf */
+#define S5P_FIMV_SI_CH0_CPB_SIZE 0x2058 /* max size of coded pic. buf */
+#define S5P_FIMV_SI_CH0_DESC_SIZE 0x205c /* max size of descriptor buf */
+
+#define S5P_FIMV_SI_CH1_SB_ST_ADR 0x2084 /* start addr of stream buf */
+#define S5P_FIMV_SI_CH1_SB_FRM_SIZE 0x2088 /* size of stream buf */
+#define S5P_FIMV_SI_CH1_DESC_ADR 0x208c /* addr of descriptor buf */
+#define S5P_FIMV_SI_CH1_CPB_SIZE 0x2098 /* max size of coded pic. buf */
+#define S5P_FIMV_SI_CH1_DESC_SIZE 0x209c /* max size of descriptor buf */
+
+#define S5P_FIMV_CRC_LUMA0 0x2030 /* luma crc data per frame
+ (top field) */
+#define S5P_FIMV_CRC_CHROMA0 0x2034 /* chroma crc data per frame
+ (top field) */
+#define S5P_FIMV_CRC_LUMA1 0x2038 /* luma crc data per bottom
+ field */
+#define S5P_FIMV_CRC_CHROMA1 0x203c /* chroma crc data per bottom
+ field */
+
+/* Display status */
+#define S5P_FIMV_DEC_STATUS_DECODING_ONLY 0
+#define S5P_FIMV_DEC_STATUS_DECODING_DISPLAY 1
+#define S5P_FIMV_DEC_STATUS_DISPLAY_ONLY 2
+#define S5P_FIMV_DEC_STATUS_DECODING_EMPTY 3
+#define S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK 7
+#define S5P_FIMV_DEC_STATUS_PROGRESSIVE (0<<3)
+#define S5P_FIMV_DEC_STATUS_INTERLACE (1<<3)
+#define S5P_FIMV_DEC_STATUS_INTERLACE_MASK (1<<3)
+#define S5P_FIMV_DEC_STATUS_CRC_NUMBER_TWO (0<<4)
+#define S5P_FIMV_DEC_STATUS_CRC_NUMBER_FOUR (1<<4)
+#define S5P_FIMV_DEC_STATUS_CRC_NUMBER_MASK (1<<4)
+#define S5P_FIMV_DEC_STATUS_CRC_GENERATED (1<<5)
+#define S5P_FIMV_DEC_STATUS_CRC_NOT_GENERATED (0<<5)
+#define S5P_FIMV_DEC_STATUS_CRC_MASK (1<<5)
+
+#define S5P_FIMV_DEC_STATUS_RESOLUTION_MASK (3<<4)
+#define S5P_FIMV_DEC_STATUS_RESOLUTION_INC (1<<4)
+#define S5P_FIMV_DEC_STATUS_RESOLUTION_DEC (2<<4)
+
+/* Decode frame address */
+#define S5P_FIMV_DECODE_Y_ADR 0x2024
+#define S5P_FIMV_DECODE_C_ADR 0x2028
+
+/* Decoded frame tpe */
+#define S5P_FIMV_DECODE_FRAME_TYPE 0x2020
+#define S5P_FIMV_DECODE_FRAME_MASK 7
+
+#define S5P_FIMV_DECODE_FRAME_SKIPPED 0
+#define S5P_FIMV_DECODE_FRAME_I_FRAME 1
+#define S5P_FIMV_DECODE_FRAME_P_FRAME 2
+#define S5P_FIMV_DECODE_FRAME_B_FRAME 3
+#define S5P_FIMV_DECODE_FRAME_OTHER_FRAME 4
+
+/* Sizes of buffers required for decoding */
+#define S5P_FIMV_DEC_NB_IP_SIZE (32 * 1024)
+#define S5P_FIMV_DEC_VERT_NB_MV_SIZE (16 * 1024)
+#define S5P_FIMV_DEC_NB_DCAC_SIZE (16 * 1024)
+#define S5P_FIMV_DEC_UPNB_MV_SIZE (68 * 1024)
+#define S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE (136 * 1024)
+#define S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE (32 * 1024)
+#define S5P_FIMV_DEC_VC1_BITPLANE_SIZE (2 * 1024)
+#define S5P_FIMV_DEC_STX_PARSER_SIZE (68 * 1024)
+
+#define S5P_FIMV_DEC_BUF_ALIGN (8 * 1024)
+#define S5P_FIMV_ENC_BUF_ALIGN (8 * 1024)
+#define S5P_FIMV_NV12M_HALIGN 16
+#define S5P_FIMV_NV12M_LVALIGN 16
+#define S5P_FIMV_NV12M_CVALIGN 8
+#define S5P_FIMV_NV12MT_HALIGN 128
+#define S5P_FIMV_NV12MT_VALIGN 32
+#define S5P_FIMV_NV12M_SALIGN 2048
+#define S5P_FIMV_NV12MT_SALIGN 8192
+
+/* Sizes of buffers required for encoding */
+#define S5P_FIMV_ENC_UPMV_SIZE 0x10000
+#define S5P_FIMV_ENC_COLFLG_SIZE 0x10000
+#define S5P_FIMV_ENC_INTRAMD_SIZE 0x10000
+#define S5P_FIMV_ENC_INTRAPRED_SIZE 0x4000
+#define S5P_FIMV_ENC_NBORINFO_SIZE 0x10000
+#define S5P_FIMV_ENC_ACDCCOEF_SIZE 0x10000
+
+/* Encoder */
+#define S5P_FIMV_ENC_SI_STRM_SIZE 0x2004 /* stream size */
+#define S5P_FIMV_ENC_SI_PIC_CNT 0x2008 /* picture count */
+#define S5P_FIMV_ENC_SI_WRITE_PTR 0x200c /* write pointer */
+#define S5P_FIMV_ENC_SI_SLICE_TYPE 0x2010 /* slice type(I/P/B/IDR) */
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_NON_CODED 0
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_I 1
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_P 2
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_B 3
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_SKIPPED 4
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_OTHERS 5
+#define S5P_FIMV_ENCODED_Y_ADDR 0x2014 /* the addr of the encoded
+ luma pic */
+#define S5P_FIMV_ENCODED_C_ADDR 0x2018 /* the addr of the encoded
+ chroma pic */
+
+#define S5P_FIMV_ENC_SI_CH0_SB_ADR 0x2044 /* addr of stream buf */
+#define S5P_FIMV_ENC_SI_CH0_SB_SIZE 0x204c /* size of stream buf */
+#define S5P_FIMV_ENC_SI_CH0_CUR_Y_ADR 0x2050 /* current Luma addr */
+#define S5P_FIMV_ENC_SI_CH0_CUR_C_ADR 0x2054 /* current Chroma addr */
+#define S5P_FIMV_ENC_SI_CH0_FRAME_INS 0x2058 /* frame insertion */
+
+#define S5P_FIMV_ENC_SI_CH1_SB_ADR 0x2084 /* addr of stream buf */
+#define S5P_FIMV_ENC_SI_CH1_SB_SIZE 0x208c /* size of stream buf */
+#define S5P_FIMV_ENC_SI_CH1_CUR_Y_ADR 0x2090 /* current Luma addr */
+#define S5P_FIMV_ENC_SI_CH1_CUR_C_ADR 0x2094 /* current Chroma addr */
+#define S5P_FIMV_ENC_SI_CH1_FRAME_INS 0x2098 /* frame insertion */
+
+#define S5P_FIMV_ENC_PIC_TYPE_CTRL 0xc504 /* pic type level control */
+#define S5P_FIMV_ENC_B_RECON_WRITE_ON 0xc508 /* B frame recon write ctrl */
+#define S5P_FIMV_ENC_MSLICE_CTRL 0xc50c /* multi slice control */
+#define S5P_FIMV_ENC_MSLICE_MB 0xc510 /* MB number in the one slice */
+#define S5P_FIMV_ENC_MSLICE_BIT 0xc514 /* bit count for one slice */
+#define S5P_FIMV_ENC_CIR_CTRL 0xc518 /* number of intra refresh MB */
+#define S5P_FIMV_ENC_MAP_FOR_CUR 0xc51c /* linear or tiled mode */
+#define S5P_FIMV_ENC_PADDING_CTRL 0xc520 /* padding control */
+
+#define S5P_FIMV_ENC_RC_CONFIG 0xc5a0 /* RC config */
+#define S5P_FIMV_ENC_RC_BIT_RATE 0xc5a8 /* bit rate */
+#define S5P_FIMV_ENC_RC_QBOUND 0xc5ac /* max/min QP */
+#define S5P_FIMV_ENC_RC_RPARA 0xc5b0 /* rate control reaction coeff */
+#define S5P_FIMV_ENC_RC_MB_CTRL 0xc5b4 /* MB adaptive scaling */
+
+/* Encoder for H264 only */
+#define S5P_FIMV_ENC_H264_ENTROPY_MODE 0xd004 /* CAVLC or CABAC */
+#define S5P_FIMV_ENC_H264_ALPHA_OFF 0xd008 /* loop filter alpha offset */
+#define S5P_FIMV_ENC_H264_BETA_OFF 0xd00c /* loop filter beta offset */
+#define S5P_FIMV_ENC_H264_NUM_OF_REF 0xd010 /* number of reference for P/B */
+#define S5P_FIMV_ENC_H264_TRANS_FLAG 0xd034 /* 8x8 transform flag in PPS &
+ high profile */
+
+#define S5P_FIMV_ENC_RC_FRAME_RATE 0xd0d0 /* frame rate */
+
+/* Encoder for MPEG4 only */
+#define S5P_FIMV_ENC_MPEG4_QUART_PXL 0xe008 /* qpel interpolation ctrl */
+
+/* Additional */
+#define S5P_FIMV_SI_CH0_DPB_CONF_CTRL 0x2068 /* DPB Config Control Register */
+#define S5P_FIMV_SLICE_INT_MASK 1
+#define S5P_FIMV_SLICE_INT_SHIFT 31
+#define S5P_FIMV_DDELAY_ENA_SHIFT 30
+#define S5P_FIMV_DDELAY_VAL_MASK 0xff
+#define S5P_FIMV_DDELAY_VAL_SHIFT 16
+#define S5P_FIMV_DPB_COUNT_MASK 0xffff
+#define S5P_FIMV_DPB_FLUSH_MASK 1
+#define S5P_FIMV_DPB_FLUSH_SHIFT 14
+
+
+#define S5P_FIMV_SI_CH0_RELEASE_BUF 0x2060 /* DPB release buffer register */
+#define S5P_FIMV_SI_CH0_HOST_WR_ADR 0x2064 /* address of shared memory */
+
+/* Codec numbers */
+#define S5P_FIMV_CODEC_NONE -1
+
+#define S5P_FIMV_CODEC_H264_DEC 0
+#define S5P_FIMV_CODEC_VC1_DEC 1
+#define S5P_FIMV_CODEC_MPEG4_DEC 2
+#define S5P_FIMV_CODEC_MPEG2_DEC 3
+#define S5P_FIMV_CODEC_H263_DEC 4
+#define S5P_FIMV_CODEC_VC1RCV_DEC 5
+
+#define S5P_FIMV_CODEC_H264_ENC 16
+#define S5P_FIMV_CODEC_MPEG4_ENC 17
+#define S5P_FIMV_CODEC_H263_ENC 18
+
+/* Channel Control Register */
+#define S5P_FIMV_CH_SEQ_HEADER 1
+#define S5P_FIMV_CH_FRAME_START 2
+#define S5P_FIMV_CH_LAST_FRAME 3
+#define S5P_FIMV_CH_INIT_BUFS 4
+#define S5P_FIMV_CH_FRAME_START_REALLOC 5
+#define S5P_FIMV_CH_MASK 7
+#define S5P_FIMV_CH_SHIFT 16
+
+
+/* Host to RISC command */
+#define S5P_FIMV_H2R_CMD_EMPTY 0
+#define S5P_FIMV_H2R_CMD_OPEN_INSTANCE 1
+#define S5P_FIMV_H2R_CMD_CLOSE_INSTANCE 2
+#define S5P_FIMV_H2R_CMD_SYS_INIT 3
+#define S5P_FIMV_H2R_CMD_FLUSH 4
+#define S5P_FIMV_H2R_CMD_SLEEP 5
+#define S5P_FIMV_H2R_CMD_WAKEUP 6
+
+#define S5P_FIMV_R2H_CMD_EMPTY 0
+#define S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET 1
+#define S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET 2
+#define S5P_FIMV_R2H_CMD_RSV_RET 3
+#define S5P_FIMV_R2H_CMD_SEQ_DONE_RET 4
+#define S5P_FIMV_R2H_CMD_FRAME_DONE_RET 5
+#define S5P_FIMV_R2H_CMD_SLICE_DONE_RET 6
+#define S5P_FIMV_R2H_CMD_ENC_COMPLETE_RET 7
+#define S5P_FIMV_R2H_CMD_SYS_INIT_RET 8
+#define S5P_FIMV_R2H_CMD_FW_STATUS_RET 9
+#define S5P_FIMV_R2H_CMD_SLEEP_RET 10
+#define S5P_FIMV_R2H_CMD_WAKEUP_RET 11
+#define S5P_FIMV_R2H_CMD_FLUSH_RET 12
+#define S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET 15
+#define S5P_FIMV_R2H_CMD_EDFU_INIT_RET 16
+#define S5P_FIMV_R2H_CMD_ERR_RET 32
+
+/* Error handling defines */
+#define S5P_FIMV_ERR_WARNINGS_START 145
+#define S5P_FIMV_ERR_DEC_MASK 0xFFFF
+#define S5P_FIMV_ERR_DEC_SHIFT 0
+#define S5P_FIMV_ERR_DSPL_MASK 0xFFFF0000
+#define S5P_FIMV_ERR_DSPL_SHIFT 16
+
+/* Shared memory registers' offsets */
+
+/* An offset of the start position in the stream when
+ * the start position is not aligned */
+#define S5P_FIMV_SHARED_CROP_INFO_H 0x0020
+#define S5P_FIMV_SHARED_CROP_LEFT_MASK 0xFFFF
+#define S5P_FIMV_SHARED_CROP_LEFT_SHIFT 0
+#define S5P_FIMV_SHARED_CROP_RIGHT_MASK 0xFFFF0000
+#define S5P_FIMV_SHARED_CROP_RIGHT_SHIFT 16
+#define S5P_FIMV_SHARED_CROP_INFO_V 0x0024
+#define S5P_FIMV_SHARED_CROP_TOP_MASK 0xFFFF
+#define S5P_FIMV_SHARED_CROP_TOP_SHIFT 0
+#define S5P_FIMV_SHARED_CROP_BOTTOM_MASK 0xFFFF0000
+#define S5P_FIMV_SHARED_CROP_BOTTOM_SHIFT 16
+#define S5P_FIMV_SHARED_SET_FRAME_TAG 0x0004
+#define S5P_FIMV_SHARED_GET_FRAME_TAG_TOP 0x0008
+#define S5P_FIMV_SHARED_GET_FRAME_TAG_BOT 0x000C
+#define S5P_FIMV_SHARED_START_BYTE_NUM 0x0018
+#define S5P_FIMV_SHARED_RC_VOP_TIMING 0x0030
+#define S5P_FIMV_SHARED_LUMA_DPB_SIZE 0x0064
+#define S5P_FIMV_SHARED_CHROMA_DPB_SIZE 0x0068
+#define S5P_FIMV_SHARED_MV_SIZE 0x006C
+#define S5P_FIMV_SHARED_PIC_TIME_TOP 0x0010
+#define S5P_FIMV_SHARED_PIC_TIME_BOTTOM 0x0014
+#define S5P_FIMV_SHARED_EXT_ENC_CONTROL 0x0028
+#define S5P_FIMV_SHARED_P_B_FRAME_QP 0x0070
+#define S5P_FIMV_SHARED_ASPECT_RATIO_IDC 0x0074
+#define S5P_FIMV_SHARED_EXTENDED_SAR 0x0078
+#define S5P_FIMV_SHARED_H264_I_PERIOD 0x009C
+#define S5P_FIMV_SHARED_RC_CONTROL_CONFIG 0x00A0
+
+#endif /* _REGS_FIMV_H */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc.c b/drivers/media/video/s5p-mfc/s5p_mfc.c
new file mode 100644
index 0000000..7dc7eab
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc.c
@@ -0,0 +1,1274 @@
+/*
+ * Samsung S5P Multi Format Codec v 5.1
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/videobuf2-core.h>
+#include "regs-mfc.h"
+#include "s5p_mfc_ctrl.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_dec.h"
+#include "s5p_mfc_enc.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_opr.h"
+#include "s5p_mfc_pm.h"
+#include "s5p_mfc_shm.h"
+
+#define S5P_MFC_NAME "s5p-mfc"
+#define S5P_MFC_DEC_NAME "s5p-mfc-dec"
+#define S5P_MFC_ENC_NAME "s5p-mfc-enc"
+
+int debug;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level - higher value produces more verbose messages");
+
+/* Helper functions for interrupt processing */
+/* Remove from hw execution round robin */
+static void clear_work_bit(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ spin_lock(&dev->condlock);
+ clear_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock(&dev->condlock);
+}
+
+/* Wake up context wait_queue */
+static void wake_up_ctx(struct s5p_mfc_ctx *ctx, unsigned int reason,
+ unsigned int err)
+{
+ ctx->int_cond = 1;
+ ctx->int_type = reason;
+ ctx->int_err = err;
+ wake_up(&ctx->queue);
+}
+
+/* Wake up device wait_queue */
+static void wake_up_dev(struct s5p_mfc_dev *dev, unsigned int reason,
+ unsigned int err)
+{
+ dev->int_cond = 1;
+ dev->int_type = reason;
+ dev->int_err = err;
+ wake_up(&dev->queue);
+}
+
+void s5p_mfc_watchdog(unsigned long arg)
+{
+ struct s5p_mfc_dev *dev = (struct s5p_mfc_dev *)arg;
+
+ if (test_bit(0, &dev->hw_lock))
+ atomic_inc(&dev->watchdog_cnt);
+ if (atomic_read(&dev->watchdog_cnt) >= MFC_WATCHDOG_CNT) {
+ /* This means that hw is busy and no interrupts were
+ * generated by hw for the Nth time of running this
+ * watchdog timer. This usually means a serious hw
+ * error. Now it is time to kill all instances and
+ * reset the MFC. */
+ mfc_err("Time out during waiting for HW\n");
+ queue_work(dev->watchdog_workqueue, &dev->watchdog_work);
+ }
+ dev->watchdog_timer.expires = jiffies +
+ msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
+ add_timer(&dev->watchdog_timer);
+}
+
+static void s5p_mfc_watchdog_worker(struct work_struct *work)
+{
+ struct s5p_mfc_dev *dev;
+ struct s5p_mfc_ctx *ctx;
+ unsigned long flags;
+ int mutex_locked;
+ int i, ret;
+
+ dev = container_of(work, struct s5p_mfc_dev, watchdog_work);
+
+ mfc_err("Driver timeout error handling\n");
+ /* Lock the mutex that protects open and release.
+ * This is necessary as they may load and unload firmware. */
+ mutex_locked = mutex_trylock(&dev->mfc_mutex);
+ if (!mutex_locked)
+ mfc_err("Error: some instance may be closing/opening\n");
+ spin_lock_irqsave(&dev->irqlock, flags);
+
+ s5p_mfc_clock_off();
+
+ for (i = 0; i < MFC_NUM_CONTEXTS; i++) {
+ ctx = dev->ctx[i];
+ if (!ctx)
+ continue;
+ ctx->state = MFCINST_ERROR;
+ s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
+ s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
+ clear_work_bit(ctx);
+ wake_up_ctx(ctx, S5P_FIMV_R2H_CMD_ERR_RET, 0);
+ }
+ clear_bit(0, &dev->hw_lock);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ /* Double check if there is at least one instance running.
+ * If no instance is in memory than no firmware should be present */
+ if (dev->num_inst > 0) {
+ ret = s5p_mfc_reload_firmware(dev);
+ if (ret) {
+ mfc_err("Failed to reload FW\n");
+ goto unlock;
+ }
+ s5p_mfc_clock_on();
+ ret = s5p_mfc_init_hw(dev);
+ if (ret)
+ mfc_err("Failed to reinit FW\n");
+ }
+unlock:
+ if (mutex_locked)
+ mutex_unlock(&dev->mfc_mutex);
+}
+
+static enum s5p_mfc_node_type s5p_mfc_get_node_type(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (!vdev) {
+ mfc_err("failed to get video_device");
+ return MFCNODE_INVALID;
+ }
+ if (vdev->index == 0)
+ return MFCNODE_DECODER;
+ else if (vdev->index == 1)
+ return MFCNODE_ENCODER;
+ return MFCNODE_INVALID;
+}
+
+static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev *dev)
+{
+ mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT);
+ mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD);
+ mfc_write(dev, 0xffff, S5P_FIMV_SI_RTN_CHID);
+}
+
+static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_buf *dst_buf;
+
+ ctx->state = MFCINST_FINISHED;
+ ctx->sequence++;
+ while (!list_empty(&ctx->dst_queue)) {
+ dst_buf = list_entry(ctx->dst_queue.next,
+ struct s5p_mfc_buf, list);
+ mfc_debug(2, "Cleaning up buffer: %d\n",
+ dst_buf->b->v4l2_buf.index);
+ vb2_set_plane_payload(dst_buf->b, 0, 0);
+ vb2_set_plane_payload(dst_buf->b, 1, 0);
+ list_del(&dst_buf->list);
+ ctx->dst_queue_cnt--;
+ dst_buf->b->v4l2_buf.sequence = (ctx->sequence++);
+
+ if (s5p_mfc_read_shm(ctx, PIC_TIME_TOP) ==
+ s5p_mfc_read_shm(ctx, PIC_TIME_BOT))
+ dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
+ else
+ dst_buf->b->v4l2_buf.field = V4L2_FIELD_INTERLACED;
+
+ ctx->dec_dst_flag &= ~(1 << dst_buf->b->v4l2_buf.index);
+ vb2_buffer_done(dst_buf->b, VB2_BUF_STATE_DONE);
+ }
+}
+
+static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *dst_buf, *src_buf;
+ size_t dec_y_addr = s5p_mfc_get_dec_y_adr();
+ unsigned int frame_type = s5p_mfc_get_frame_type();
+
+ /* Copy timestamp / timecode from decoded src to dst and set
+ appropraite flags */
+ src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
+ if (vb2_dma_contig_plane_paddr(dst_buf->b, 0) == dec_y_addr) {
+ memcpy(&dst_buf->b->v4l2_buf.timecode,
+ &src_buf->b->v4l2_buf.timecode,
+ sizeof(struct v4l2_timecode));
+ memcpy(&dst_buf->b->v4l2_buf.timestamp,
+ &src_buf->b->v4l2_buf.timestamp,
+ sizeof(struct timeval));
+ switch (frame_type) {
+ case S5P_FIMV_DECODE_FRAME_I_FRAME:
+ dst_buf->b->v4l2_buf.flags |=
+ V4L2_BUF_FLAG_KEYFRAME;
+ break;
+ case S5P_FIMV_DECODE_FRAME_P_FRAME:
+ dst_buf->b->v4l2_buf.flags |=
+ V4L2_BUF_FLAG_PFRAME;
+ break;
+ case S5P_FIMV_DECODE_FRAME_B_FRAME:
+ dst_buf->b->v4l2_buf.flags |=
+ V4L2_BUF_FLAG_BFRAME;
+ break;
+ }
+ break;
+ }
+ }
+}
+
+static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *dst_buf;
+ size_t dspl_y_addr = s5p_mfc_get_dspl_y_adr();
+ unsigned int frame_type = s5p_mfc_get_frame_type();
+ unsigned int index;
+
+ /* If frame is same as previous then skip and do not dequeue */
+ if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) {
+ if (!ctx->after_packed_pb)
+ ctx->sequence++;
+ ctx->after_packed_pb = 0;
+ return;
+ }
+ ctx->sequence++;
+ /* The MFC returns address of the buffer, now we have to
+ * check which videobuf does it correspond to */
+ list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
+ /* Check if this is the buffer we're looking for */
+ if (vb2_dma_contig_plane_paddr(dst_buf->b, 0) == dspl_y_addr) {
+ list_del(&dst_buf->list);
+ ctx->dst_queue_cnt--;
+ dst_buf->b->v4l2_buf.sequence = ctx->sequence;
+ if (s5p_mfc_read_shm(ctx, PIC_TIME_TOP) ==
+ s5p_mfc_read_shm(ctx, PIC_TIME_BOT))
+ dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
+ else
+ dst_buf->b->v4l2_buf.field =
+ V4L2_FIELD_INTERLACED;
+ vb2_set_plane_payload(dst_buf->b, 0, ctx->luma_size);
+ vb2_set_plane_payload(dst_buf->b, 1, ctx->chroma_size);
+ clear_bit(dst_buf->b->v4l2_buf.index,
+ &ctx->dec_dst_flag);
+
+ vb2_buffer_done(dst_buf->b,
+ err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+
+ index = dst_buf->b->v4l2_buf.index;
+ break;
+ }
+ }
+}
+
+/* Handle frame decoding interrupt */
+static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
+ unsigned int reason, unsigned int err)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned int dst_frame_status;
+ struct s5p_mfc_buf *src_buf;
+ unsigned long flags;
+ unsigned int res_change;
+
+ unsigned int index;
+
+ dst_frame_status = s5p_mfc_get_dspl_status()
+ & S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK;
+ res_change = s5p_mfc_get_dspl_status()
+ & S5P_FIMV_DEC_STATUS_RESOLUTION_MASK;
+ mfc_debug(2, "Frame Status: %x\n", dst_frame_status);
+ if (ctx->state == MFCINST_RES_CHANGE_INIT)
+ ctx->state = MFCINST_RES_CHANGE_FLUSH;
+ if (res_change) {
+ ctx->state = MFCINST_RES_CHANGE_INIT;
+ s5p_mfc_clear_int_flags(dev);
+ wake_up_ctx(ctx, reason, err);
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ BUG();
+ s5p_mfc_clock_off();
+ s5p_mfc_try_run(dev);
+ return;
+ }
+ if (ctx->dpb_flush_flag)
+ ctx->dpb_flush_flag = 0;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ /* All frames remaining in the buffer have been extracted */
+ if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_EMPTY) {
+ if (ctx->state == MFCINST_RES_CHANGE_FLUSH) {
+ s5p_mfc_handle_frame_all_extracted(ctx);
+ ctx->state = MFCINST_RES_CHANGE_END;
+ goto leave_handle_frame;
+ } else {
+ s5p_mfc_handle_frame_all_extracted(ctx);
+ }
+ }
+
+ if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY ||
+ dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_ONLY)
+ s5p_mfc_handle_frame_copy_time(ctx);
+
+ /* A frame has been decoded and is in the buffer */
+ if (dst_frame_status == S5P_FIMV_DEC_STATUS_DISPLAY_ONLY ||
+ dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY) {
+ s5p_mfc_handle_frame_new(ctx, err);
+ } else {
+ mfc_debug(2, "No frame decode\n");
+ }
+ /* Mark source buffer as complete */
+ if (dst_frame_status != S5P_FIMV_DEC_STATUS_DISPLAY_ONLY
+ && !list_empty(&ctx->src_queue)) {
+ src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
+ list);
+ ctx->consumed_stream += s5p_mfc_get_consumed_stream();
+ if (ctx->codec_mode != S5P_FIMV_CODEC_H264_DEC &&
+ s5p_mfc_get_frame_type() == S5P_FIMV_DECODE_FRAME_P_FRAME
+ && ctx->consumed_stream + STUFF_BYTE <
+ src_buf->b->v4l2_planes[0].bytesused) {
+ /* Run MFC again on the same buffer */
+ mfc_debug(2, "Running again the same buffer\n");
+ ctx->after_packed_pb = 1;
+ } else {
+ index = src_buf->b->v4l2_buf.index;
+ mfc_debug(2, "MFC needs next buffer\n");
+ ctx->consumed_stream = 0;
+ list_del(&src_buf->list);
+ ctx->src_queue_cnt--;
+ if (s5p_mfc_err_dec(err) > 0)
+ vb2_buffer_done(src_buf->b, VB2_BUF_STATE_ERROR);
+ else
+ vb2_buffer_done(src_buf->b, VB2_BUF_STATE_DONE);
+ }
+ }
+leave_handle_frame:
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING)
+ || ctx->dst_queue_cnt < ctx->dpb_count)
+ clear_work_bit(ctx);
+ s5p_mfc_clear_int_flags(dev);
+ wake_up_ctx(ctx, reason, err);
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ BUG();
+ s5p_mfc_clock_off();
+ s5p_mfc_try_run(dev);
+}
+
+/* Error handling for interrupt */
+static void s5p_mfc_handle_error(struct s5p_mfc_ctx *ctx,
+ unsigned int reason, unsigned int err)
+{
+ struct s5p_mfc_dev *dev;
+ unsigned long flags;
+
+ /* If no context is available then all necessary
+ * processing has been done. */
+ if (ctx == 0)
+ return;
+
+ dev = ctx->dev;
+ mfc_err("Interrupt Error: %08x\n", err);
+ s5p_mfc_clear_int_flags(dev);
+ wake_up_dev(dev, reason, err);
+
+ /* Error recovery is dependent on the state of context */
+ switch (ctx->state) {
+ case MFCINST_INIT:
+ /* This error had to happen while acquireing instance */
+ case MFCINST_GOT_INST:
+ /* This error had to happen while parsing the header */
+ case MFCINST_HEAD_PARSED:
+ /* This error had to happen while setting dst buffers */
+ case MFCINST_RETURN_INST:
+ /* This error had to happen while releasing instance */
+ clear_work_bit(ctx);
+ wake_up_ctx(ctx, reason, err);
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ BUG();
+ s5p_mfc_clock_off();
+ ctx->state = MFCINST_ERROR;
+ break;
+ case MFCINST_FINISHING:
+ case MFCINST_FINISHED:
+ case MFCINST_RUNNING:
+ /* It is higly probable that an error occured
+ * while decoding a frame */
+ clear_work_bit(ctx);
+ ctx->state = MFCINST_ERROR;
+ /* Mark all dst buffers as having an error */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
+ /* Mark all src buffers as having an error */
+ s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ BUG();
+ s5p_mfc_clock_off();
+ break;
+ default:
+ mfc_err("Encountered an error interrupt which had not been handled\n");
+ break;
+ }
+ return;
+}
+
+/* Header parsing interrupt handling */
+static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
+ unsigned int reason, unsigned int err)
+{
+ struct s5p_mfc_dev *dev;
+ unsigned int guard_width, guard_height;
+
+ if (ctx == 0)
+ return;
+ dev = ctx->dev;
+ if (ctx->c_ops->post_seq_start) {
+ if (ctx->c_ops->post_seq_start(ctx))
+ mfc_err("post_seq_start() failed\n");
+ } else {
+ ctx->img_width = s5p_mfc_get_img_width();
+ ctx->img_height = s5p_mfc_get_img_height();
+
+ ctx->buf_width = ALIGN(ctx->img_width,
+ S5P_FIMV_NV12MT_HALIGN);
+ ctx->buf_height = ALIGN(ctx->img_height,
+ S5P_FIMV_NV12MT_VALIGN);
+ mfc_debug(2, "SEQ Done: Movie dimensions %dx%d, "
+ "buffer dimensions: %dx%d\n", ctx->img_width,
+ ctx->img_height, ctx->buf_width,
+ ctx->buf_height);
+ if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC) {
+ ctx->luma_size = ALIGN(ctx->buf_width *
+ ctx->buf_height, S5P_FIMV_DEC_BUF_ALIGN);
+ ctx->chroma_size = ALIGN(ctx->buf_width *
+ ALIGN((ctx->img_height >> 1),
+ S5P_FIMV_NV12MT_VALIGN),
+ S5P_FIMV_DEC_BUF_ALIGN);
+ ctx->mv_size = ALIGN(ctx->buf_width *
+ ALIGN((ctx->buf_height >> 2),
+ S5P_FIMV_NV12MT_VALIGN),
+ S5P_FIMV_DEC_BUF_ALIGN);
+ } else {
+ guard_width = ALIGN(ctx->img_width + 24,
+ S5P_FIMV_NV12MT_HALIGN);
+ guard_height = ALIGN(ctx->img_height + 16,
+ S5P_FIMV_NV12MT_VALIGN);
+ ctx->luma_size = ALIGN(guard_width *
+ guard_height, S5P_FIMV_DEC_BUF_ALIGN);
+ guard_width = ALIGN(ctx->img_width + 16,
+ S5P_FIMV_NV12MT_HALIGN);
+ guard_height = ALIGN((ctx->img_height >> 1) + 4,
+ S5P_FIMV_NV12MT_VALIGN);
+ ctx->chroma_size = ALIGN(guard_width *
+ guard_height, S5P_FIMV_DEC_BUF_ALIGN);
+ ctx->mv_size = 0;
+ }
+ ctx->dpb_count = s5p_mfc_get_dpb_count();
+ if (ctx->img_width == 0 || ctx->img_width == 0)
+ ctx->state = MFCINST_ERROR;
+ else
+ ctx->state = MFCINST_HEAD_PARSED;
+ }
+ s5p_mfc_clear_int_flags(dev);
+ clear_work_bit(ctx);
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ BUG();
+ s5p_mfc_clock_off();
+ s5p_mfc_try_run(dev);
+ wake_up_ctx(ctx, reason, err);
+}
+
+/* Header parsing interrupt handling */
+static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
+ unsigned int reason, unsigned int err)
+{
+ struct s5p_mfc_buf *src_buf;
+ struct s5p_mfc_dev *dev;
+ unsigned long flags;
+
+ if (ctx == 0)
+ return;
+ dev = ctx->dev;
+ s5p_mfc_clear_int_flags(dev);
+ ctx->int_type = reason;
+ ctx->int_err = err;
+ ctx->int_cond = 1;
+ spin_lock(&dev->condlock);
+ clear_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock(&dev->condlock);
+ if (err == 0) {
+ ctx->state = MFCINST_RUNNING;
+ if (!ctx->dpb_flush_flag) {
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (!list_empty(&ctx->src_queue)) {
+ src_buf = list_entry(ctx->src_queue.next,
+ struct s5p_mfc_buf, list);
+ list_del(&src_buf->list);
+ ctx->src_queue_cnt--;
+ vb2_buffer_done(src_buf->b,
+ VB2_BUF_STATE_DONE);
+ }
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ } else {
+ ctx->dpb_flush_flag = 0;
+ }
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ BUG();
+
+ s5p_mfc_clock_off();
+
+ wake_up(&ctx->queue);
+ s5p_mfc_try_run(dev);
+ } else {
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ BUG();
+
+ s5p_mfc_clock_off();
+
+ wake_up(&ctx->queue);
+ }
+}
+
+/* Interrupt processing */
+static irqreturn_t s5p_mfc_irq(int irq, void *priv)
+{
+ struct s5p_mfc_dev *dev = priv;
+ struct s5p_mfc_ctx *ctx;
+ unsigned int reason;
+ unsigned int err;
+
+ mfc_debug_enter();
+ /* Reset the timeout watchdog */
+ atomic_set(&dev->watchdog_cnt, 0);
+ ctx = dev->ctx[dev->curr_ctx];
+ /* Get the reason of interrupt and the error code */
+ reason = s5p_mfc_get_int_reason();
+ err = s5p_mfc_get_int_err();
+ mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err);
+ switch (reason) {
+ case S5P_FIMV_R2H_CMD_ERR_RET:
+ /* An error has occured */
+ if (ctx->state == MFCINST_RUNNING &&
+ s5p_mfc_err_dec(err) >= S5P_FIMV_ERR_WARNINGS_START)
+ s5p_mfc_handle_frame(ctx, reason, err);
+ else
+ s5p_mfc_handle_error(ctx, reason, err);
+ clear_bit(0, &dev->enter_suspend);
+ break;
+
+ case S5P_FIMV_R2H_CMD_SLICE_DONE_RET:
+ case S5P_FIMV_R2H_CMD_FRAME_DONE_RET:
+ if (ctx->c_ops->post_frame_start) {
+ if (ctx->c_ops->post_frame_start(ctx))
+ mfc_err("post_frame_start() failed\n");
+ s5p_mfc_clear_int_flags(dev);
+ wake_up_ctx(ctx, reason, err);
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ BUG();
+ s5p_mfc_clock_off();
+ s5p_mfc_try_run(dev);
+ } else {
+ s5p_mfc_handle_frame(ctx, reason, err);
+ }
+ break;
+
+ case S5P_FIMV_R2H_CMD_SEQ_DONE_RET:
+ s5p_mfc_handle_seq_done(ctx, reason, err);
+ break;
+
+ case S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET:
+ ctx->inst_no = s5p_mfc_get_inst_no();
+ ctx->state = MFCINST_GOT_INST;
+ clear_work_bit(ctx);
+ wake_up(&ctx->queue);
+ goto irq_cleanup_hw;
+
+ case S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET:
+ clear_work_bit(ctx);
+ ctx->state = MFCINST_FREE;
+ wake_up(&ctx->queue);
+ goto irq_cleanup_hw;
+
+ case S5P_FIMV_R2H_CMD_SYS_INIT_RET:
+ case S5P_FIMV_R2H_CMD_FW_STATUS_RET:
+ case S5P_FIMV_R2H_CMD_SLEEP_RET:
+ case S5P_FIMV_R2H_CMD_WAKEUP_RET:
+ if (ctx)
+ clear_work_bit(ctx);
+ s5p_mfc_clear_int_flags(dev);
+ wake_up_dev(dev, reason, err);
+ clear_bit(0, &dev->hw_lock);
+ clear_bit(0, &dev->enter_suspend);
+ break;
+
+ case S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET:
+ s5p_mfc_handle_init_buffers(ctx, reason, err);
+ break;
+ default:
+ mfc_debug(2, "Unknown int reason\n");
+ s5p_mfc_clear_int_flags(dev);
+ }
+ mfc_debug_leave();
+ return IRQ_HANDLED;
+irq_cleanup_hw:
+ s5p_mfc_clear_int_flags(dev);
+ ctx->int_type = reason;
+ ctx->int_err = err;
+ ctx->int_cond = 1;
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ mfc_err("Failed to unlock hw\n");
+
+ s5p_mfc_clock_off();
+
+ s5p_mfc_try_run(dev);
+ mfc_debug(2, "Exit via irq_cleanup_hw\n");
+ return IRQ_HANDLED;
+}
+
+/* Open an MFC node */
+static int s5p_mfc_open(struct file *file)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+ struct s5p_mfc_ctx *ctx = NULL;
+ struct vb2_queue *q;
+ unsigned long flags;
+ int ret = 0;
+
+ mfc_debug_enter();
+ dev->num_inst++; /* It is guarded by mfc_mutex in vfd */
+ /* Allocate memory for context */
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx) {
+ mfc_err("Not enough memory\n");
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+ ctx->dev = dev;
+ INIT_LIST_HEAD(&ctx->src_queue);
+ INIT_LIST_HEAD(&ctx->dst_queue);
+ ctx->src_queue_cnt = 0;
+ ctx->dst_queue_cnt = 0;
+ /* Get context number */
+ ctx->num = 0;
+ while (dev->ctx[ctx->num]) {
+ ctx->num++;
+ if (ctx->num >= MFC_NUM_CONTEXTS) {
+ mfc_err("Too many open contexts\n");
+ ret = -EBUSY;
+ goto err_no_ctx;
+ }
+ }
+ /* Mark context as idle */
+ spin_lock_irqsave(&dev->condlock, flags);
+ clear_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ dev->ctx[ctx->num] = ctx;
+ if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+ ctx->type = MFCINST_DECODER;
+ ctx->c_ops = get_dec_codec_ops();
+ /* Setup ctrl handler */
+ ret = s5p_mfc_dec_ctrls_setup(ctx);
+ if (ret) {
+ mfc_err("Failed to setup mfc controls\n");
+ goto err_ctrls_setup;
+ }
+ } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+ ctx->type = MFCINST_ENCODER;
+ ctx->c_ops = get_enc_codec_ops();
+ /* only for encoder */
+ INIT_LIST_HEAD(&ctx->ref_queue);
+ ctx->ref_queue_cnt = 0;
+ /* Setup ctrl handler */
+ ret = s5p_mfc_enc_ctrls_setup(ctx);
+ if (ret) {
+ mfc_err("Failed to setup mfc controls\n");
+ goto err_ctrls_setup;
+ }
+ } else {
+ ret = -ENOENT;
+ goto err_bad_node;
+ }
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ ctx->inst_no = -1;
+ /* Load firmware if this is the first instance */
+ if (dev->num_inst == 1) {
+ dev->watchdog_timer.expires = jiffies +
+ msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
+ add_timer(&dev->watchdog_timer);
+ ret = s5p_mfc_power_on();
+ if (ret < 0) {
+ mfc_err("power on failed\n");
+ goto err_pwr_enable;
+ }
+ s5p_mfc_clock_on();
+ ret = s5p_mfc_alloc_and_load_firmware(dev);
+ if (ret)
+ goto err_alloc_fw;
+ /* Init the FW */
+ ret = s5p_mfc_init_hw(dev);
+ if (ret)
+ goto err_init_hw;
+ s5p_mfc_clock_off();
+ }
+ /* Init videobuf2 queue for CAPTURE */
+ q = &ctx->vq_dst;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->drv_priv = &ctx->fh;
+ if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+ q->io_modes = VB2_MMAP;
+ q->ops = get_dec_queue_ops();
+ } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->ops = get_enc_queue_ops();
+ } else {
+ ret = -ENOENT;
+ goto err_queue_init;
+ }
+ q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
+ ret = vb2_queue_init(q);
+ if (ret) {
+ mfc_err("Failed to initialize videobuf2 queue(capture)\n");
+ goto err_queue_init;
+ }
+ /* Init videobuf2 queue for OUTPUT */
+ q = &ctx->vq_src;
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ q->io_modes = VB2_MMAP;
+ q->drv_priv = &ctx->fh;
+ if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+ q->io_modes = VB2_MMAP;
+ q->ops = get_dec_queue_ops();
+ } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->ops = get_enc_queue_ops();
+ } else {
+ ret = -ENOENT;
+ goto err_queue_init;
+ }
+ q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
+ ret = vb2_queue_init(q);
+ if (ret) {
+ mfc_err("Failed to initialize videobuf2 queue(output)\n");
+ goto err_queue_init;
+ }
+ init_waitqueue_head(&ctx->queue);
+ mfc_debug_leave();
+ return ret;
+ /* Deinit when failure occured */
+err_queue_init:
+err_init_hw:
+ s5p_mfc_release_firmware(dev);
+err_alloc_fw:
+ dev->ctx[ctx->num] = 0;
+ del_timer_sync(&dev->watchdog_timer);
+ s5p_mfc_clock_off();
+err_pwr_enable:
+ if (dev->num_inst == 1) {
+ if (s5p_mfc_power_off() < 0)
+ mfc_err("power off failed\n");
+ s5p_mfc_release_firmware(dev);
+ }
+err_ctrls_setup:
+ s5p_mfc_dec_ctrls_delete(ctx);
+err_bad_node:
+err_no_ctx:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+err_alloc:
+ dev->num_inst--;
+ mfc_debug_leave();
+ return ret;
+}
+
+/* Release MFC context */
+static int s5p_mfc_release(struct file *file)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+
+ mfc_debug_enter();
+ s5p_mfc_clock_on();
+ vb2_queue_release(&ctx->vq_src);
+ vb2_queue_release(&ctx->vq_dst);
+ /* Mark context as idle */
+ spin_lock_irqsave(&dev->condlock, flags);
+ clear_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ /* If instance was initialised then
+ * return instance and free reosurces */
+ if (ctx->inst_no != MFC_NO_INSTANCE_SET) {
+ mfc_debug(2, "Has to free instance\n");
+ ctx->state = MFCINST_RETURN_INST;
+ spin_lock_irqsave(&dev->condlock, flags);
+ set_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_try_run(dev);
+ /* Wait until instance is returned or timeout occured */
+ if (s5p_mfc_wait_for_done_ctx
+ (ctx, S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET, 0)) {
+ s5p_mfc_clock_off();
+ mfc_err("Err returning instance\n");
+ }
+ mfc_debug(2, "After free instance\n");
+ /* Free resources */
+ s5p_mfc_release_codec_buffers(ctx);
+ s5p_mfc_release_instance_buffer(ctx);
+ if (ctx->type == MFCINST_DECODER)
+ s5p_mfc_release_dec_desc_buffer(ctx);
+
+ ctx->inst_no = MFC_NO_INSTANCE_SET;
+ }
+ /* hardware locking scheme */
+ if (dev->curr_ctx == ctx->num)
+ clear_bit(0, &dev->hw_lock);
+ dev->num_inst--;
+ if (dev->num_inst == 0) {
+ mfc_debug(2, "Last instance - release firmware\n");
+ /* reset <-> F/W release */
+ s5p_mfc_reset(dev);
+ s5p_mfc_release_firmware(dev);
+ del_timer_sync(&dev->watchdog_timer);
+ if (s5p_mfc_power_off() < 0)
+ mfc_err("Power off failed\n");
+ }
+ mfc_debug(2, "Shutting down clock\n");
+ s5p_mfc_clock_off();
+ dev->ctx[ctx->num] = 0;
+ s5p_mfc_dec_ctrls_delete(ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ mfc_debug_leave();
+ return 0;
+}
+
+/* Poll */
+static unsigned int s5p_mfc_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct vb2_queue *src_q, *dst_q;
+ struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
+ unsigned int rc = 0;
+ unsigned long flags;
+
+ src_q = &ctx->vq_src;
+ dst_q = &ctx->vq_dst;
+ /*
+ * There has to be at least one buffer queued on each queued_list, which
+ * means either in driver already or waiting for driver to claim it
+ * and start processing.
+ */
+ if ((!src_q->streaming || list_empty(&src_q->queued_list))
+ && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
+ rc = POLLERR;
+ goto end;
+ }
+ mutex_unlock(&dev->mfc_mutex);
+ poll_wait(file, &src_q->done_wq, wait);
+ poll_wait(file, &dst_q->done_wq, wait);
+ mutex_lock(&dev->mfc_mutex);
+ spin_lock_irqsave(&src_q->done_lock, flags);
+ if (!list_empty(&src_q->done_list))
+ src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
+ done_entry);
+ if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
+ || src_vb->state == VB2_BUF_STATE_ERROR))
+ rc |= POLLOUT | POLLWRNORM;
+ spin_unlock_irqrestore(&src_q->done_lock, flags);
+ spin_lock_irqsave(&dst_q->done_lock, flags);
+ if (!list_empty(&dst_q->done_list))
+ dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
+ done_entry);
+ if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
+ || dst_vb->state == VB2_BUF_STATE_ERROR))
+ rc |= POLLIN | POLLRDNORM;
+ spin_unlock_irqrestore(&dst_q->done_lock, flags);
+end:
+ return rc;
+}
+
+/* Mmap */
+static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ int ret;
+ if (offset < DST_QUEUE_OFF_BASE) {
+ mfc_debug(2, "mmaping source\n");
+ ret = vb2_mmap(&ctx->vq_src, vma);
+ } else { /* capture */
+ mfc_debug(2, "mmaping destination\n");
+ vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
+ ret = vb2_mmap(&ctx->vq_dst, vma);
+ }
+ return ret;
+}
+
+/* v4l2 ops */
+static const struct v4l2_file_operations s5p_mfc_fops = {
+ .owner = THIS_MODULE,
+ .open = s5p_mfc_open,
+ .release = s5p_mfc_release,
+ .poll = s5p_mfc_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = s5p_mfc_mmap,
+};
+
+static int match_child(struct device *dev, void *data)
+{
+ if (!dev_name(dev))
+ return 0;
+ return !strcmp(dev_name(dev), (char *)data);
+}
+
+
+/* MFC probe function */
+static int __devinit s5p_mfc_probe(struct platform_device *pdev)
+{
+ struct s5p_mfc_dev *dev;
+ struct video_device *vfd;
+ struct resource *res;
+ int ret;
+
+ pr_debug("%s++\n", __func__);
+ dev = kzalloc(sizeof *dev, GFP_KERNEL);
+ if (!dev) {
+ dev_err(&pdev->dev, "Not enough memory for MFC device\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&dev->irqlock);
+ spin_lock_init(&dev->condlock);
+ dev->plat_dev = pdev;
+ if (!dev->plat_dev) {
+ dev_err(&pdev->dev, "No platform data specified\n");
+ ret = -ENODEV;
+ goto err_dev;
+ }
+
+ ret = s5p_mfc_init_pm(dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get mfc clock source\n");
+ goto err_clk;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "failed to get memory region resource\n");
+ ret = -ENOENT;
+ goto err_res;
+ }
+
+ dev->mfc_mem = request_mem_region(res->start, resource_size(res),
+ pdev->name);
+ if (dev->mfc_mem == NULL) {
+ dev_err(&pdev->dev, "failed to get memory region\n");
+ ret = -ENOENT;
+ goto err_mem_reg;
+ }
+ dev->regs_base = ioremap(dev->mfc_mem->start, resource_size(dev->mfc_mem));
+ if (dev->regs_base == NULL) {
+ dev_err(&pdev->dev, "failed to ioremap address region\n");
+ ret = -ENOENT;
+ goto err_ioremap;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "failed to get irq resource\n");
+ ret = -ENOENT;
+ goto err_get_res;
+ }
+ dev->irq = res->start;
+ ret = request_irq(dev->irq, s5p_mfc_irq, IRQF_DISABLED, pdev->name,
+ dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret);
+ goto err_req_irq;
+ }
+
+ dev->mem_dev_l = device_find_child(&dev->plat_dev->dev, "s5p-mfc-l",
+ match_child);
+ if (!dev->mem_dev_l) {
+ mfc_err("Mem child (L) device get failed\n");
+ ret = -ENODEV;
+ goto err_find_child;
+ }
+ dev->mem_dev_r = device_find_child(&dev->plat_dev->dev, "s5p-mfc-r",
+ match_child);
+ if (!dev->mem_dev_r) {
+ mfc_err("Mem child (R) device get failed\n");
+ ret = -ENODEV;
+ goto err_find_child;
+ }
+
+ dev->alloc_ctx[0] = vb2_dma_contig_init_ctx(dev->mem_dev_l);
+ if (IS_ERR_OR_NULL(dev->alloc_ctx[0])) {
+ ret = PTR_ERR(dev->alloc_ctx[0]);
+ goto err_mem_init_ctx_0;
+ }
+ dev->alloc_ctx[1] = vb2_dma_contig_init_ctx(dev->mem_dev_r);
+ if (IS_ERR_OR_NULL(dev->alloc_ctx[1])) {
+ ret = PTR_ERR(dev->alloc_ctx[1]);
+ goto err_mem_init_ctx_1;
+ }
+
+ mutex_init(&dev->mfc_mutex);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ goto err_v4l2_dev_reg;
+ init_waitqueue_head(&dev->queue);
+
+ /* decoder */
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto err_dec_alloc;
+ }
+ vfd->fops = &s5p_mfc_fops,
+ vfd->ioctl_ops = get_dec_v4l2_ioctl_ops();
+ vfd->release = video_device_release,
+ vfd->lock = &dev->mfc_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_DEC_NAME);
+ dev->vfd_dec = vfd;
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ video_device_release(vfd);
+ goto err_dec_reg;
+ }
+ v4l2_info(&dev->v4l2_dev,
+ "decoder registered as /dev/video%d\n", vfd->num);
+ video_set_drvdata(vfd, dev);
+
+ /* encoder */
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto err_enc_alloc;
+ }
+ vfd->fops = &s5p_mfc_fops,
+ vfd->ioctl_ops = get_enc_v4l2_ioctl_ops();
+ vfd->release = video_device_release,
+ vfd->lock = &dev->mfc_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_ENC_NAME);
+ dev->vfd_enc = vfd;
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ video_device_release(vfd);
+ goto err_enc_reg;
+ }
+ v4l2_info(&dev->v4l2_dev,
+ "encoder registered as /dev/video%d\n", vfd->num);
+ video_set_drvdata(vfd, dev);
+ platform_set_drvdata(pdev, dev);
+
+ dev->hw_lock = 0;
+ dev->watchdog_workqueue = create_singlethread_workqueue(S5P_MFC_NAME);
+ INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
+ atomic_set(&dev->watchdog_cnt, 0);
+ init_timer(&dev->watchdog_timer);
+ dev->watchdog_timer.data = (unsigned long)dev;
+ dev->watchdog_timer.function = s5p_mfc_watchdog;
+
+ pr_debug("%s--\n", __func__);
+ return 0;
+
+/* Deinit MFC if probe had failed */
+err_enc_reg:
+ video_device_release(dev->vfd_enc);
+err_enc_alloc:
+ video_unregister_device(dev->vfd_dec);
+err_dec_reg:
+ video_device_release(dev->vfd_dec);
+err_dec_alloc:
+ v4l2_device_unregister(&dev->v4l2_dev);
+err_v4l2_dev_reg:
+ vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
+err_mem_init_ctx_1:
+ vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
+err_mem_init_ctx_0:
+err_find_child:
+ free_irq(dev->irq, dev);
+err_req_irq:
+err_get_res:
+ iounmap(dev->regs_base);
+ dev->regs_base = NULL;
+err_ioremap:
+ release_resource(dev->mfc_mem);
+ kfree(dev->mfc_mem);
+err_mem_reg:
+err_res:
+ s5p_mfc_final_pm(dev);
+err_clk:
+err_dev:
+ kfree(dev);
+ pr_debug("%s-- with error\n", __func__);
+ return ret;
+
+}
+
+/* Remove the driver */
+static int __devexit s5p_mfc_remove(struct platform_device *pdev)
+{
+ struct s5p_mfc_dev *dev = platform_get_drvdata(pdev);
+
+ v4l2_info(&dev->v4l2_dev, "Removing %s\n", pdev->name);
+
+ del_timer_sync(&dev->watchdog_timer);
+ flush_workqueue(dev->watchdog_workqueue);
+ destroy_workqueue(dev->watchdog_workqueue);
+
+ video_unregister_device(dev->vfd_enc);
+ video_unregister_device(dev->vfd_dec);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
+ vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
+
+ free_irq(dev->irq, dev);
+ iounmap(dev->regs_base);
+ if (dev->mfc_mem) {
+ release_resource(dev->mfc_mem);
+ kfree(dev->mfc_mem);
+ dev->mfc_mem = NULL;
+ }
+ s5p_mfc_final_pm(dev);
+ kfree(dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int s5p_mfc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
+ int ret;
+
+ if (m_dev->num_inst == 0)
+ return 0;
+ return s5p_mfc_sleep(m_dev);
+ if (test_and_set_bit(0, &m_dev->enter_suspend) != 0) {
+ mfc_err("Error: going to suspend for a second time\n");
+ return -EIO;
+ }
+
+ /* Check if we're processing then wait if it necessary. */
+ while (test_and_set_bit(0, &m_dev->hw_lock) != 0) {
+ /* Try and lock the HW */
+ /* Wait on the interrupt waitqueue */
+ ret = wait_event_interruptible_timeout(m_dev->queue,
+ m_dev->int_cond || m_dev->ctx[m_dev->curr_ctx]->int_cond,
+ msecs_to_jiffies(MFC_INT_TIMEOUT));
+
+ if (ret == 0) {
+ mfc_err("Waiting for hardware to finish timed out\n");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static int s5p_mfc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
+
+ if (m_dev->num_inst == 0)
+ return 0;
+ return s5p_mfc_wakeup(m_dev);
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int s5p_mfc_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
+
+ atomic_set(&m_dev->pm.power, 0);
+ return 0;
+}
+
+static int s5p_mfc_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
+ int pre_power;
+
+ if (!m_dev->alloc_ctx)
+ return 0;
+ pre_power = atomic_read(&m_dev->pm.power);
+ atomic_set(&m_dev->pm.power, 1);
+ return 0;
+}
+#endif
+
+/* Power management */
+static const struct dev_pm_ops s5p_mfc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(s5p_mfc_suspend, s5p_mfc_resume)
+ SET_RUNTIME_PM_OPS(s5p_mfc_runtime_suspend, s5p_mfc_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver s5p_mfc_pdrv = {
+ .probe = s5p_mfc_probe,
+ .remove = __devexit_p(s5p_mfc_remove),
+ .driver = {
+ .name = S5P_MFC_NAME,
+ .owner = THIS_MODULE,
+ .pm = &s5p_mfc_pm_ops
+ },
+};
+
+static char banner[] __initdata =
+ "S5P MFC V4L2 Driver, (C) 2011 Samsung Electronics\n";
+
+static int __init s5p_mfc_init(void)
+{
+ int ret;
+
+ pr_info("%s", banner);
+ ret = platform_driver_register(&s5p_mfc_pdrv);
+ if (ret)
+ pr_err("Platform device registration failed.\n");
+ return ret;
+}
+
+static void __devexit s5p_mfc_exit(void)
+{
+ platform_driver_unregister(&s5p_mfc_pdrv);
+}
+
+module_init(s5p_mfc_init);
+module_exit(s5p_mfc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
+MODULE_DESCRIPTION("Samsung S5P Multi Format Codec V4L2 driver");
+
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_cmd.c b/drivers/media/video/s5p-mfc/s5p_mfc_cmd.c
new file mode 100644
index 0000000..f0665ed
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_cmd.c
@@ -0,0 +1,120 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_cmd.c
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "regs-mfc.h"
+#include "s5p_mfc_cmd.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+
+/* This function is used to send a command to the MFC */
+static int s5p_mfc_cmd_host2risc(struct s5p_mfc_dev *dev, int cmd,
+ struct s5p_mfc_cmd_args *args)
+{
+ int cur_cmd;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
+ /* wait until host to risc command register becomes 'H2R_CMD_EMPTY' */
+ do {
+ if (time_after(jiffies, timeout)) {
+ mfc_err("Timeout while waiting for hardware\n");
+ return -EIO;
+ }
+ cur_cmd = mfc_read(dev, S5P_FIMV_HOST2RISC_CMD);
+ } while (cur_cmd != S5P_FIMV_H2R_CMD_EMPTY);
+ mfc_write(dev, args->arg[0], S5P_FIMV_HOST2RISC_ARG1);
+ mfc_write(dev, args->arg[1], S5P_FIMV_HOST2RISC_ARG2);
+ mfc_write(dev, args->arg[2], S5P_FIMV_HOST2RISC_ARG3);
+ mfc_write(dev, args->arg[3], S5P_FIMV_HOST2RISC_ARG4);
+ /* Issue the command */
+ mfc_write(dev, cmd, S5P_FIMV_HOST2RISC_CMD);
+ return 0;
+}
+
+/* Initialize the MFC */
+int s5p_mfc_sys_init_cmd(struct s5p_mfc_dev *dev)
+{
+ struct s5p_mfc_cmd_args h2r_args;
+
+ memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+ h2r_args.arg[0] = dev->fw_size;
+ return s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_SYS_INIT, &h2r_args);
+}
+
+/* Suspend the MFC hardware */
+int s5p_mfc_sleep_cmd(struct s5p_mfc_dev *dev)
+{
+ struct s5p_mfc_cmd_args h2r_args;
+
+ memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+ return s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_SLEEP, &h2r_args);
+}
+
+/* Wake up the MFC hardware */
+int s5p_mfc_wakeup_cmd(struct s5p_mfc_dev *dev)
+{
+ struct s5p_mfc_cmd_args h2r_args;
+
+ memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+ return s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_WAKEUP, &h2r_args);
+}
+
+
+int s5p_mfc_open_inst_cmd(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_cmd_args h2r_args;
+ int ret;
+
+ /* Preparing decoding - getting instance number */
+ mfc_debug(2, "Getting instance number (codec: %d)\n", ctx->codec_mode);
+ dev->curr_ctx = ctx->num;
+ memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+ h2r_args.arg[0] = ctx->codec_mode;
+ h2r_args.arg[1] = 0; /* no crc & no pixelcache */
+ h2r_args.arg[2] = ctx->ctx_ofs;
+ h2r_args.arg[3] = ctx->ctx_size;
+ ret = s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_OPEN_INSTANCE,
+ &h2r_args);
+ if (ret) {
+ mfc_err("Failed to create a new instance\n");
+ ctx->state = MFCINST_ERROR;
+ }
+ return ret;
+}
+
+int s5p_mfc_close_inst_cmd(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_cmd_args h2r_args;
+ int ret;
+
+ if (ctx->state == MFCINST_FREE) {
+ mfc_err("Instance already returned\n");
+ ctx->state = MFCINST_ERROR;
+ return -EINVAL;
+ }
+ /* Closing decoding instance */
+ mfc_debug(2, "Returning instance number %d\n", ctx->inst_no);
+ dev->curr_ctx = ctx->num;
+ memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+ h2r_args.arg[0] = ctx->inst_no;
+ ret = s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_CLOSE_INSTANCE,
+ &h2r_args);
+ if (ret) {
+ mfc_err("Failed to return an instance\n");
+ ctx->state = MFCINST_ERROR;
+ return -EINVAL;
+ }
+ return 0;
+}
+
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_cmd.h b/drivers/media/video/s5p-mfc/s5p_mfc_cmd.h
new file mode 100644
index 0000000..5ceebfe
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_cmd.h
@@ -0,0 +1,30 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_cmd.h
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_CMD_H_
+#define S5P_MFC_CMD_H_
+
+#include "s5p_mfc_common.h"
+
+#define MAX_H2R_ARG 4
+
+struct s5p_mfc_cmd_args {
+ unsigned int arg[MAX_H2R_ARG];
+};
+
+int s5p_mfc_sys_init_cmd(struct s5p_mfc_dev *dev);
+int s5p_mfc_sleep_cmd(struct s5p_mfc_dev *dev);
+int s5p_mfc_wakeup_cmd(struct s5p_mfc_dev *dev);
+int s5p_mfc_open_inst_cmd(struct s5p_mfc_ctx *ctx);
+int s5p_mfc_close_inst_cmd(struct s5p_mfc_ctx *ctx);
+
+#endif /* S5P_MFC_CMD_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_common.h b/drivers/media/video/s5p-mfc/s5p_mfc_common.h
new file mode 100644
index 0000000..91146fa
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_common.h
@@ -0,0 +1,572 @@
+/*
+ * Samsung S5P Multi Format Codec v 5.0
+ *
+ * This file contains definitions of enums and structs used by the codec
+ * driver.
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#ifndef S5P_MFC_COMMON_H_
+#define S5P_MFC_COMMON_H_
+
+#include "regs-mfc.h"
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+
+/* Definitions related to MFC memory */
+
+/* Offset base used to differentiate between CAPTURE and OUTPUT
+* while mmaping */
+#define DST_QUEUE_OFF_BASE (TASK_SIZE / 2)
+
+/* Offset used by the hardware to store addresses */
+#define MFC_OFFSET_SHIFT 11
+
+#define FIRMWARE_ALIGN 0x20000 /* 128KB */
+#define MFC_H264_CTX_BUF_SIZE 0x96000 /* 600KB per H264 instance */
+#define MFC_CTX_BUF_SIZE 0x2800 /* 10KB per instance */
+#define DESC_BUF_SIZE 0x20000 /* 128KB for DESC buffer */
+#define SHARED_BUF_SIZE 0x2000 /* 8KB for shared buffer */
+
+#define DEF_CPB_SIZE 0x40000 /* 512KB */
+
+#define MFC_BANK1_ALLOC_CTX 0
+#define MFC_BANK2_ALLOC_CTX 1
+
+#define MFC_BANK1_ALIGN_ORDER 13
+#define MFC_BANK2_ALIGN_ORDER 13
+#define MFC_BASE_ALIGN_ORDER 17
+
+#include <media/videobuf2-dma-contig.h>
+
+static inline dma_addr_t s5p_mfc_mem_cookie(void *a, void *b)
+{
+ /* Same functionality as the vb2_dma_contig_plane_paddr */
+ dma_addr_t *paddr = vb2_dma_contig_memops.cookie(b);
+
+ return *paddr;
+}
+
+/* MFC definitions */
+#define MFC_MAX_EXTRA_DPB 5
+#define MFC_MAX_BUFFERS 32
+#define MFC_NUM_CONTEXTS 4
+/* Interrupt timeout */
+#define MFC_INT_TIMEOUT 2000
+/* Busy wait timeout */
+#define MFC_BW_TIMEOUT 500
+/* Watchdog interval */
+#define MFC_WATCHDOG_INTERVAL 1000
+/* After how many executions watchdog should assume lock up */
+#define MFC_WATCHDOG_CNT 10
+#define MFC_NO_INSTANCE_SET -1
+#define MFC_ENC_CAP_PLANE_COUNT 1
+#define MFC_ENC_OUT_PLANE_COUNT 2
+#define STUFF_BYTE 4
+#define MFC_MAX_CTRLS 64
+
+#define mfc_read(dev, offset) readl(dev->regs_base + (offset))
+#define mfc_write(dev, data, offset) writel((data), dev->regs_base + \
+ (offset))
+
+/**
+ * enum s5p_mfc_fmt_type - type of the pixelformat
+ */
+enum s5p_mfc_fmt_type {
+ MFC_FMT_DEC,
+ MFC_FMT_ENC,
+ MFC_FMT_RAW,
+};
+
+/**
+ * enum s5p_mfc_node_type - The type of an MFC device node.
+ */
+enum s5p_mfc_node_type {
+ MFCNODE_INVALID = -1,
+ MFCNODE_DECODER = 0,
+ MFCNODE_ENCODER = 1,
+};
+
+/**
+ * enum s5p_mfc_inst_type - The type of an MFC instance.
+ */
+enum s5p_mfc_inst_type {
+ MFCINST_INVALID,
+ MFCINST_DECODER,
+ MFCINST_ENCODER,
+};
+
+/**
+ * enum s5p_mfc_inst_state - The state of an MFC instance.
+ */
+enum s5p_mfc_inst_state {
+ MFCINST_FREE = 0,
+ MFCINST_INIT = 100,
+ MFCINST_GOT_INST,
+ MFCINST_HEAD_PARSED,
+ MFCINST_BUFS_SET,
+ MFCINST_RUNNING,
+ MFCINST_FINISHING,
+ MFCINST_FINISHED,
+ MFCINST_RETURN_INST,
+ MFCINST_ERROR,
+ MFCINST_ABORT,
+ MFCINST_RES_CHANGE_INIT,
+ MFCINST_RES_CHANGE_FLUSH,
+ MFCINST_RES_CHANGE_END,
+};
+
+/**
+ * enum s5p_mfc_queue_state - The state of buffer queue.
+ */
+enum s5p_mfc_queue_state {
+ QUEUE_FREE,
+ QUEUE_BUFS_REQUESTED,
+ QUEUE_BUFS_QUERIED,
+ QUEUE_BUFS_MMAPED,
+};
+
+/**
+ * enum s5p_mfc_decode_arg - type of frame decoding
+ */
+enum s5p_mfc_decode_arg {
+ MFC_DEC_FRAME,
+ MFC_DEC_LAST_FRAME,
+ MFC_DEC_RES_CHANGE,
+};
+
+struct s5p_mfc_ctx;
+
+/**
+ * struct s5p_mfc_buf - MFC buffer
+ */
+struct s5p_mfc_buf {
+ struct list_head list;
+ struct vb2_buffer *b;
+ union {
+ struct {
+ size_t luma;
+ size_t chroma;
+ } raw;
+ size_t stream;
+ } cookie;
+ int used;
+};
+
+/**
+ * struct s5p_mfc_pm - power management data structure
+ */
+struct s5p_mfc_pm {
+ struct clk *clock;
+ struct clk *clock_gate;
+ atomic_t power;
+ struct device *device;
+};
+
+/**
+ * struct s5p_mfc_dev - The struct containing driver internal parameters.
+ *
+ * @v4l2_dev: v4l2_device
+ * @vfd_dec: video device for decoding
+ * @vfd_enc: video device for encoding
+ * @plat_dev: platform device
+ * @mem_dev_l: child device of the left memory bank (0)
+ * @mem_dev_r: child device of the right memory bank (1)
+ * @regs_base: base address of the MFC hw registers
+ * @irq: irq resource
+ * @mfc_mem: MFC registers memory resource
+ * @dec_ctrl_handler: control framework handler for decoding
+ * @enc_ctrl_handler: control framework handler for encoding
+ * @pm: power management control
+ * @num_inst: couter of active MFC instances
+ * @irqlock: lock for operations on videobuf2 queues
+ * @condlock: lock for changing/checking if a context is ready to be
+ * processed
+ * @mfc_mutex: lock for video_device
+ * @int_cond: variable used by the waitqueue
+ * @int_type: type of last interrupt
+ * @int_err: error number for last interrupt
+ * @queue: waitqueue for waiting for completion of device commands
+ * @fw_size: size of firmware
+ * @bank1: address of the beggining of bank 1 memory
+ * @bank2: address of the beggining of bank 2 memory
+ * @hw_lock: used for hardware locking
+ * @ctx: array of driver contexts
+ * @curr_ctx: number of the currently running context
+ * @ctx_work_bits: used to mark which contexts are waiting for hardware
+ * @watchdog_cnt: counter for the watchdog
+ * @watchdog_workqueue: workqueue for the watchdog
+ * @watchdog_work: worker for the watchdog
+ * @alloc_ctx: videobuf2 allocator contexts for two memory banks
+ * @enter_suspend: flag set when entering suspend
+ *
+ */
+struct s5p_mfc_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vfd_dec;
+ struct video_device *vfd_enc;
+ struct platform_device *plat_dev;
+ struct device *mem_dev_l;
+ struct device *mem_dev_r;
+ void __iomem *regs_base;
+ int irq;
+ struct resource *mfc_mem;
+ struct v4l2_ctrl_handler dec_ctrl_handler;
+ struct v4l2_ctrl_handler enc_ctrl_handler;
+ struct s5p_mfc_pm pm;
+ int num_inst;
+ spinlock_t irqlock; /* lock when operating on videobuf2 queues */
+ spinlock_t condlock; /* lock when changing/checking if a context is
+ ready to be processed */
+ struct mutex mfc_mutex; /* video_device lock */
+ int int_cond;
+ int int_type;
+ unsigned int int_err;
+ wait_queue_head_t queue;
+ size_t fw_size;
+ size_t bank1;
+ size_t bank2;
+ unsigned long hw_lock;
+ struct s5p_mfc_ctx *ctx[MFC_NUM_CONTEXTS];
+ int curr_ctx;
+ unsigned long ctx_work_bits;
+ atomic_t watchdog_cnt;
+ struct timer_list watchdog_timer;
+ struct workqueue_struct *watchdog_workqueue;
+ struct work_struct watchdog_work;
+ void *alloc_ctx[2];
+ unsigned long enter_suspend;
+};
+
+/**
+ * struct s5p_mfc_h264_enc_params - encoding parameters for h264
+ */
+struct s5p_mfc_h264_enc_params {
+ enum v4l2_mpeg_video_h264_profile profile;
+ enum v4l2_mpeg_video_h264_loop_filter_mode loop_filter_mode;
+ s8 loop_filter_alpha;
+ s8 loop_filter_beta;
+ enum v4l2_mpeg_video_h264_entropy_mode entropy_mode;
+ u8 max_ref_pic;
+ u8 num_ref_pic_4p;
+ int _8x8_transform;
+ int rc_mb;
+ int rc_mb_dark;
+ int rc_mb_smooth;
+ int rc_mb_static;
+ int rc_mb_activity;
+ int vui_sar;
+ u8 vui_sar_idc;
+ u16 vui_ext_sar_width;
+ u16 vui_ext_sar_height;
+ int open_gop;
+ u16 open_gop_size;
+ u8 rc_frame_qp;
+ u8 rc_min_qp;
+ u8 rc_max_qp;
+ u8 rc_p_frame_qp;
+ u8 rc_b_frame_qp;
+ enum v4l2_mpeg_video_h264_level level_v4l2;
+ int level;
+ u16 cpb_size;
+};
+
+/**
+ * struct s5p_mfc_mpeg4_enc_params - encoding parameters for h263 and mpeg4
+ */
+struct s5p_mfc_mpeg4_enc_params {
+ /* MPEG4 Only */
+ enum v4l2_mpeg_video_mpeg4_profile profile;
+ int quarter_pixel;
+ /* Common for MPEG4, H263 */
+ u16 vop_time_res;
+ u16 vop_frm_delta;
+ u8 rc_frame_qp;
+ u8 rc_min_qp;
+ u8 rc_max_qp;
+ u8 rc_p_frame_qp;
+ u8 rc_b_frame_qp;
+ enum v4l2_mpeg_video_mpeg4_level level_v4l2;
+ int level;
+};
+
+/**
+ * struct s5p_mfc_enc_params - general encoding parameters
+ */
+struct s5p_mfc_enc_params {
+ u16 width;
+ u16 height;
+
+ u16 gop_size;
+ enum v4l2_mpeg_video_multi_slice_mode slice_mode;
+ u16 slice_mb;
+ u32 slice_bit;
+ u16 intra_refresh_mb;
+ int pad;
+ u8 pad_luma;
+ u8 pad_cb;
+ u8 pad_cr;
+ int rc_frame;
+ u32 rc_bitrate;
+ u16 rc_reaction_coeff;
+ u16 vbv_size;
+
+ enum v4l2_mpeg_video_header_mode seq_hdr_mode;
+ enum v4l2_mpeg_mfc51_video_frame_skip_mode frame_skip_mode;
+ int fixed_target_bit;
+
+ u8 num_b_frame;
+ u32 rc_framerate_num;
+ u32 rc_framerate_denom;
+ int interlace;
+
+ union {
+ struct s5p_mfc_h264_enc_params h264;
+ struct s5p_mfc_mpeg4_enc_params mpeg4;
+ } codec;
+
+};
+
+/**
+ * struct s5p_mfc_codec_ops - codec ops, used by encoding
+ */
+struct s5p_mfc_codec_ops {
+ /* initialization routines */
+ int (*pre_seq_start) (struct s5p_mfc_ctx *ctx);
+ int (*post_seq_start) (struct s5p_mfc_ctx *ctx);
+ /* execution routines */
+ int (*pre_frame_start) (struct s5p_mfc_ctx *ctx);
+ int (*post_frame_start) (struct s5p_mfc_ctx *ctx);
+};
+
+#define call_cop(c, op, args...) \
+ (((c)->c_ops->op) ? \
+ ((c)->c_ops->op(args)) : 0)
+
+/**
+ * struct s5p_mfc_ctx - This struct contains the instance context
+ *
+ * @dev: pointer to the s5p_mfc_dev of the device
+ * @fh: struct v4l2_fh
+ * @num: number of the context that this structure describes
+ * @int_cond: variable used by the waitqueue
+ * @int_type: type of the last interrupt
+ * @int_err: error number received from MFC hw in the interrupt
+ * @queue: waitqueue that can be used to wait for this context to
+ * finish
+ * @src_fmt: source pixelformat information
+ * @dst_fmt: destination pixelformat information
+ * @vq_src: vb2 queue for source buffers
+ * @vq_dst: vb2 queue for destination buffers
+ * @src_queue: driver internal queue for source buffers
+ * @dst_queue: driver internal queue for destination buffers
+ * @src_queue_cnt: number of buffers queued on the source internal queue
+ * @dst_queue_cnt: number of buffers queued on the dest internal queue
+ * @type: type of the instance - decoder or encoder
+ * @state: state of the context
+ * @inst_no: number of hw instance associated with the context
+ * @img_width: width of the image that is decoded or encoded
+ * @img_height: height of the image that is decoded or encoded
+ * @buf_width: width of the buffer for processed image
+ * @buf_height: height of the buffer for processed image
+ * @luma_size: size of a luma plane
+ * @chroma_size: size of a chroma plane
+ * @mv_size: size of a motion vectors buffer
+ * @consumed_stream: number of bytes that have been used so far from the
+ * decoding buffer
+ * @dpb_flush_flag: flag used to indicate that a DPB buffers are being
+ * flushed
+ * @bank1_buf: handle to memory allocated for temporary buffers from
+ * memory bank 1
+ * @bank1_phys: address of the temporary buffers from memory bank 1
+ * @bank1_size: size of the memory allocated for temporary buffers from
+ * memory bank 1
+ * @bank2_buf: handle to memory allocated for temporary buffers from
+ * memory bank 2
+ * @bank2_phys: address of the temporary buffers from memory bank 2
+ * @bank2_size: size of the memory allocated for temporary buffers from
+ * memory bank 2
+ * @capture_state: state of the capture buffers queue
+ * @output_state: state of the output buffers queue
+ * @src_bufs: information on allocated source buffers
+ * @dst_bufs: information on allocated destination buffers
+ * @sequence: counter for the sequence number for v4l2
+ * @dec_dst_flag: flags for buffers queued in the hardware
+ * @dec_src_buf_size: size of the buffer for source buffers in decoding
+ * @codec_mode: number of codec mode used by MFC hw
+ * @slice_interface: slice interface flag
+ * @loop_filter_mpeg4: loop filter for MPEG4 flag
+ * @display_delay: value of the display delay for H264
+ * @display_delay_enable: display delay for H264 enable flag
+ * @after_packed_pb: flag used to track buffer when stream is in
+ * Packed PB format
+ * @dpb_count: count of the DPB buffers required by MFC hw
+ * @total_dpb_count: count of DPB buffers with additional buffers
+ * requested by the application
+ * @ctx_buf: handle to the memory associated with this context
+ * @ctx_phys: address of the memory associated with this context
+ * @ctx_size: size of the memory associated with this context
+ * @desc_buf: description buffer for decoding handle
+ * @desc_phys: description buffer for decoding address
+ * @shm_alloc: handle for the shared memory buffer
+ * @shm: virtual address for the shared memory buffer
+ * @shm_ofs: address offset for shared memory
+ * @enc_params: encoding parameters for MFC
+ * @enc_dst_buf_size: size of the buffers for encoder output
+ * @frame_type: used to force the type of the next encoded frame
+ * @ref_queue: list of the reference buffers for encoding
+ * @ref_queue_cnt: number of the buffers in the reference list
+ * @c_ops: ops for encoding
+ * @ctrls: array of controls, used when adding controls to the
+ * v4l2 control framework
+ * @ctrl_handler: handler for v4l2 framework
+ */
+struct s5p_mfc_ctx {
+ struct s5p_mfc_dev *dev;
+ struct v4l2_fh fh;
+
+ int num;
+
+ int int_cond;
+ int int_type;
+ unsigned int int_err;
+ wait_queue_head_t queue;
+
+ struct s5p_mfc_fmt *src_fmt;
+ struct s5p_mfc_fmt *dst_fmt;
+
+ struct vb2_queue vq_src;
+ struct vb2_queue vq_dst;
+
+ struct list_head src_queue;
+ struct list_head dst_queue;
+
+ unsigned int src_queue_cnt;
+ unsigned int dst_queue_cnt;
+
+ enum s5p_mfc_inst_type type;
+ enum s5p_mfc_inst_state state;
+ int inst_no;
+
+ /* Image parameters */
+ int img_width;
+ int img_height;
+ int buf_width;
+ int buf_height;
+
+ int luma_size;
+ int chroma_size;
+ int mv_size;
+
+ unsigned long consumed_stream;
+
+ unsigned int dpb_flush_flag;
+
+ /* Buffers */
+ void *bank1_buf;
+ size_t bank1_phys;
+ size_t bank1_size;
+
+ void *bank2_buf;
+ size_t bank2_phys;
+ size_t bank2_size;
+
+ enum s5p_mfc_queue_state capture_state;
+ enum s5p_mfc_queue_state output_state;
+
+ struct s5p_mfc_buf src_bufs[MFC_MAX_BUFFERS];
+ int src_bufs_cnt;
+ struct s5p_mfc_buf dst_bufs[MFC_MAX_BUFFERS];
+ int dst_bufs_cnt;
+
+ unsigned int sequence;
+ unsigned long dec_dst_flag;
+ size_t dec_src_buf_size;
+
+ /* Control values */
+ int codec_mode;
+ int slice_interface;
+ int loop_filter_mpeg4;
+ int display_delay;
+ int display_delay_enable;
+ int after_packed_pb;
+
+ int dpb_count;
+ int total_dpb_count;
+
+ /* Buffers */
+ void *ctx_buf;
+ size_t ctx_phys;
+ size_t ctx_ofs;
+ size_t ctx_size;
+
+ void *desc_buf;
+ size_t desc_phys;
+
+
+ void *shm_alloc;
+ void *shm;
+ size_t shm_ofs;
+
+ struct s5p_mfc_enc_params enc_params;
+
+ size_t enc_dst_buf_size;
+
+ enum v4l2_mpeg_mfc51_video_force_frame_type force_frame_type;
+
+ struct list_head ref_queue;
+ unsigned int ref_queue_cnt;
+
+ struct s5p_mfc_codec_ops *c_ops;
+
+ struct v4l2_ctrl *ctrls[MFC_MAX_CTRLS];
+ struct v4l2_ctrl_handler ctrl_handler;
+};
+
+/*
+ * struct s5p_mfc_fmt - structure used to store information about pixelformats
+ * used by the MFC
+ */
+struct s5p_mfc_fmt {
+ char *name;
+ u32 fourcc;
+ u32 codec_mode;
+ enum s5p_mfc_fmt_type type;
+ u32 num_planes;
+};
+
+/**
+ * struct mfc_control - structure used to store information about MFC controls
+ * it is used to initialize the control framework.
+ */
+struct mfc_control {
+ __u32 id;
+ enum v4l2_ctrl_type type;
+ __u8 name[32]; /* Whatever */
+ __s32 minimum; /* Note signedness */
+ __s32 maximum;
+ __s32 step;
+ __u32 menu_skip_mask;
+ __s32 default_value;
+ __u32 flags;
+ __u32 reserved[2];
+ __u8 is_volatile;
+};
+
+
+#define fh_to_ctx(__fh) container_of(__fh, struct s5p_mfc_ctx, fh)
+#define ctrl_to_ctx(__ctrl) \
+ container_of((__ctrl)->handler, struct s5p_mfc_ctx, ctrl_handler)
+
+#endif /* S5P_MFC_COMMON_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c
new file mode 100644
index 0000000..5f4da80
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c
@@ -0,0 +1,343 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include "regs-mfc.h"
+#include "s5p_mfc_cmd.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_pm.h"
+
+static void *s5p_mfc_bitproc_buf;
+static size_t s5p_mfc_bitproc_phys;
+static unsigned char *s5p_mfc_bitproc_virt;
+
+/* Allocate and load firmware */
+int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
+{
+ struct firmware *fw_blob;
+ size_t bank2_base_phys;
+ void *b_base;
+ int err;
+
+ /* Firmare has to be present as a separate file or compiled
+ * into kernel. */
+ mfc_debug_enter();
+ err = request_firmware((const struct firmware **)&fw_blob,
+ "s5pc110-mfc.fw", dev->v4l2_dev.dev);
+ if (err != 0) {
+ mfc_err("Firmware is not present in the /lib/firmware directory nor compiled in kernel\n");
+ return -EINVAL;
+ }
+ dev->fw_size = ALIGN(fw_blob->size, FIRMWARE_ALIGN);
+ if (s5p_mfc_bitproc_buf) {
+ mfc_err("Attempting to allocate firmware when it seems that it is already loaded\n");
+ release_firmware(fw_blob);
+ return -ENOMEM;
+ }
+ s5p_mfc_bitproc_buf = vb2_dma_contig_memops.alloc(
+ dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], dev->fw_size);
+ if (IS_ERR(s5p_mfc_bitproc_buf)) {
+ s5p_mfc_bitproc_buf = 0;
+ mfc_err("Allocating bitprocessor buffer failed\n");
+ release_firmware(fw_blob);
+ return -ENOMEM;
+ }
+ s5p_mfc_bitproc_phys = s5p_mfc_mem_cookie(
+ dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], s5p_mfc_bitproc_buf);
+ if (s5p_mfc_bitproc_phys & ((1 << MFC_BASE_ALIGN_ORDER) - 1)) {
+ mfc_err("The base memory for bank 1 is not aligned to 128KB\n");
+ vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
+ s5p_mfc_bitproc_phys = 0;
+ s5p_mfc_bitproc_buf = 0;
+ release_firmware(fw_blob);
+ return -EIO;
+ }
+ s5p_mfc_bitproc_virt = vb2_dma_contig_memops.vaddr(s5p_mfc_bitproc_buf);
+ if (!s5p_mfc_bitproc_virt) {
+ mfc_err("Bitprocessor memory remap failed\n");
+ vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
+ s5p_mfc_bitproc_phys = 0;
+ s5p_mfc_bitproc_buf = 0;
+ release_firmware(fw_blob);
+ return -EIO;
+ }
+ dev->bank1 = s5p_mfc_bitproc_phys;
+ b_base = vb2_dma_contig_memops.alloc(
+ dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], 1 << MFC_BANK2_ALIGN_ORDER);
+ if (IS_ERR(b_base)) {
+ vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
+ s5p_mfc_bitproc_phys = 0;
+ s5p_mfc_bitproc_buf = 0;
+ mfc_err("Allocating bank2 base failed\n");
+ release_firmware(fw_blob);
+ return -ENOMEM;
+ }
+ bank2_base_phys = s5p_mfc_mem_cookie(
+ dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], b_base);
+ vb2_dma_contig_memops.put(b_base);
+ if (bank2_base_phys & ((1 << MFC_BASE_ALIGN_ORDER) - 1)) {
+ mfc_err("The base memory for bank 2 is not aligned to 128KB\n");
+ vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
+ s5p_mfc_bitproc_phys = 0;
+ s5p_mfc_bitproc_buf = 0;
+ release_firmware(fw_blob);
+ return -EIO;
+ }
+ dev->bank2 = bank2_base_phys;
+ memcpy(s5p_mfc_bitproc_virt, fw_blob->data, fw_blob->size);
+ wmb();
+ release_firmware(fw_blob);
+ mfc_debug_leave();
+ return 0;
+}
+
+/* Reload firmware to MFC */
+int s5p_mfc_reload_firmware(struct s5p_mfc_dev *dev)
+{
+ struct firmware *fw_blob;
+ int err;
+
+ /* Firmare has to be present as a separate file or compiled
+ * into kernel. */
+ mfc_debug_enter();
+ err = request_firmware((const struct firmware **)&fw_blob,
+ "s5pc110-mfc.fw", dev->v4l2_dev.dev);
+ if (err != 0) {
+ mfc_err("Firmware is not present in the /lib/firmware directory nor compiled in kernel\n");
+ return -EINVAL;
+ }
+ if (fw_blob->size > dev->fw_size) {
+ mfc_err("MFC firmware is too big to be loaded\n");
+ release_firmware(fw_blob);
+ return -ENOMEM;
+ }
+ if (s5p_mfc_bitproc_buf == 0 || s5p_mfc_bitproc_phys == 0) {
+ mfc_err("MFC firmware is not allocated or was not mapped correctly\n");
+ release_firmware(fw_blob);
+ return -EINVAL;
+ }
+ memcpy(s5p_mfc_bitproc_virt, fw_blob->data, fw_blob->size);
+ wmb();
+ release_firmware(fw_blob);
+ mfc_debug_leave();
+ return 0;
+}
+
+/* Release firmware memory */
+int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev)
+{
+ /* Before calling this function one has to make sure
+ * that MFC is no longer processing */
+ if (!s5p_mfc_bitproc_buf)
+ return -EINVAL;
+ vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
+ s5p_mfc_bitproc_virt = 0;
+ s5p_mfc_bitproc_phys = 0;
+ s5p_mfc_bitproc_buf = 0;
+ return 0;
+}
+
+/* Reset the device */
+int s5p_mfc_reset(struct s5p_mfc_dev *dev)
+{
+ unsigned int mc_status;
+ unsigned long timeout;
+
+ mfc_debug_enter();
+ /* Stop procedure */
+ /* reset RISC */
+ mfc_write(dev, 0x3f6, S5P_FIMV_SW_RESET);
+ /* All reset except for MC */
+ mfc_write(dev, 0x3e2, S5P_FIMV_SW_RESET);
+ mdelay(10);
+
+ timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
+ /* Check MC status */
+ do {
+ if (time_after(jiffies, timeout)) {
+ mfc_err("Timeout while resetting MFC\n");
+ return -EIO;
+ }
+
+ mc_status = mfc_read(dev, S5P_FIMV_MC_STATUS);
+
+ } while (mc_status & 0x3);
+
+ mfc_write(dev, 0x0, S5P_FIMV_SW_RESET);
+ mfc_write(dev, 0x3fe, S5P_FIMV_SW_RESET);
+ mfc_debug_leave();
+ return 0;
+}
+
+static inline void s5p_mfc_init_memctrl(struct s5p_mfc_dev *dev)
+{
+ mfc_write(dev, dev->bank1, S5P_FIMV_MC_DRAMBASE_ADR_A);
+ mfc_write(dev, dev->bank2, S5P_FIMV_MC_DRAMBASE_ADR_B);
+ mfc_debug(2, "Bank1: %08x, Bank2: %08x\n", dev->bank1, dev->bank2);
+}
+
+static inline void s5p_mfc_clear_cmds(struct s5p_mfc_dev *dev)
+{
+ mfc_write(dev, 0xffffffff, S5P_FIMV_SI_CH0_INST_ID);
+ mfc_write(dev, 0xffffffff, S5P_FIMV_SI_CH1_INST_ID);
+ mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD);
+ mfc_write(dev, 0, S5P_FIMV_HOST2RISC_CMD);
+}
+
+/* Initialize hardware */
+int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
+{
+ unsigned int ver;
+ int ret;
+
+ mfc_debug_enter();
+ if (!s5p_mfc_bitproc_buf)
+ return -EINVAL;
+
+ /* 0. MFC reset */
+ mfc_debug(2, "MFC reset..\n");
+ s5p_mfc_clock_on();
+ ret = s5p_mfc_reset(dev);
+ if (ret) {
+ mfc_err("Failed to reset MFC - timeout\n");
+ return ret;
+ }
+ mfc_debug(2, "Done MFC reset..\n");
+ /* 1. Set DRAM base Addr */
+ s5p_mfc_init_memctrl(dev);
+ /* 2. Initialize registers of channel I/F */
+ s5p_mfc_clear_cmds(dev);
+ /* 3. Release reset signal to the RISC */
+ s5p_mfc_clean_dev_int_flags(dev);
+ mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
+ mfc_debug(2, "Will now wait for completion of firmware transfer\n");
+ if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_FW_STATUS_RET)) {
+ mfc_err("Failed to load firmware\n");
+ s5p_mfc_reset(dev);
+ s5p_mfc_clock_off();
+ return -EIO;
+ }
+ s5p_mfc_clean_dev_int_flags(dev);
+ /* 4. Initialize firmware */
+ ret = s5p_mfc_sys_init_cmd(dev);
+ if (ret) {
+ mfc_err("Failed to send command to MFC - timeout\n");
+ s5p_mfc_reset(dev);
+ s5p_mfc_clock_off();
+ return ret;
+ }
+ mfc_debug(2, "Ok, now will write a command to init the system\n");
+ if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_SYS_INIT_RET)) {
+ mfc_err("Failed to load firmware\n");
+ s5p_mfc_reset(dev);
+ s5p_mfc_clock_off();
+ return -EIO;
+ }
+ dev->int_cond = 0;
+ if (dev->int_err != 0 || dev->int_type !=
+ S5P_FIMV_R2H_CMD_SYS_INIT_RET) {
+ /* Failure. */
+ mfc_err("Failed to init firmware - error: %d int: %d\n",
+ dev->int_err, dev->int_type);
+ s5p_mfc_reset(dev);
+ s5p_mfc_clock_off();
+ return -EIO;
+ }
+ ver = mfc_read(dev, S5P_FIMV_FW_VERSION);
+ mfc_debug(2, "MFC F/W version : %02xyy, %02xmm, %02xdd\n",
+ (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
+ s5p_mfc_clock_off();
+ mfc_debug_leave();
+ return 0;
+}
+
+
+int s5p_mfc_sleep(struct s5p_mfc_dev *dev)
+{
+ int ret;
+
+ mfc_debug_enter();
+ s5p_mfc_clock_on();
+ s5p_mfc_clean_dev_int_flags(dev);
+ ret = s5p_mfc_sleep_cmd(dev);
+ if (ret) {
+ mfc_err("Failed to send command to MFC - timeout\n");
+ return ret;
+ }
+ if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_SLEEP_RET)) {
+ mfc_err("Failed to sleep\n");
+ return -EIO;
+ }
+ s5p_mfc_clock_off();
+ dev->int_cond = 0;
+ if (dev->int_err != 0 || dev->int_type !=
+ S5P_FIMV_R2H_CMD_SLEEP_RET) {
+ /* Failure. */
+ mfc_err("Failed to sleep - error: %d int: %d\n", dev->int_err,
+ dev->int_type);
+ return -EIO;
+ }
+ mfc_debug_leave();
+ return ret;
+}
+
+int s5p_mfc_wakeup(struct s5p_mfc_dev *dev)
+{
+ int ret;
+
+ mfc_debug_enter();
+ /* 0. MFC reset */
+ mfc_debug(2, "MFC reset..\n");
+ s5p_mfc_clock_on();
+ ret = s5p_mfc_reset(dev);
+ if (ret) {
+ mfc_err("Failed to reset MFC - timeout\n");
+ return ret;
+ }
+ mfc_debug(2, "Done MFC reset..\n");
+ /* 1. Set DRAM base Addr */
+ s5p_mfc_init_memctrl(dev);
+ /* 2. Initialize registers of channel I/F */
+ s5p_mfc_clear_cmds(dev);
+ s5p_mfc_clean_dev_int_flags(dev);
+ /* 3. Initialize firmware */
+ ret = s5p_mfc_wakeup_cmd(dev);
+ if (ret) {
+ mfc_err("Failed to send command to MFC - timeout\n");
+ return ret;
+ }
+ /* 4. Release reset signal to the RISC */
+ mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
+ mfc_debug(2, "Ok, now will write a command to wakeup the system\n");
+ if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_WAKEUP_RET)) {
+ mfc_err("Failed to load firmware\n");
+ return -EIO;
+ }
+ s5p_mfc_clock_off();
+ dev->int_cond = 0;
+ if (dev->int_err != 0 || dev->int_type !=
+ S5P_FIMV_R2H_CMD_WAKEUP_RET) {
+ /* Failure. */
+ mfc_err("Failed to wakeup - error: %d int: %d\n", dev->int_err,
+ dev->int_type);
+ return -EIO;
+ }
+ mfc_debug_leave();
+ return 0;
+}
+
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.h b/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.h
new file mode 100644
index 0000000..61dc23b
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.h
@@ -0,0 +1,29 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_CTRL_H
+#define S5P_MFC_CTRL_H
+
+#include "s5p_mfc_common.h"
+
+int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev);
+int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev);
+int s5p_mfc_reload_firmware(struct s5p_mfc_dev *dev);
+
+int s5p_mfc_init_hw(struct s5p_mfc_dev *dev);
+
+int s5p_mfc_sleep(struct s5p_mfc_dev *dev);
+int s5p_mfc_wakeup(struct s5p_mfc_dev *dev);
+
+int s5p_mfc_reset(struct s5p_mfc_dev *dev);
+
+#endif /* S5P_MFC_CTRL_H */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_debug.h b/drivers/media/video/s5p-mfc/s5p_mfc_debug.h
new file mode 100644
index 0000000..ecb8616
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_debug.h
@@ -0,0 +1,48 @@
+/*
+ * drivers/media/video/samsung/mfc5/s5p_mfc_debug.h
+ *
+ * Header file for Samsung MFC (Multi Function Codec - FIMV) driver
+ * This file contains debug macros
+ *
+ * Kamil Debski, Copyright (c) 2011 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef S5P_MFC_DEBUG_H_
+#define S5P_MFC_DEBUG_H_
+
+#define DEBUG
+
+#ifdef DEBUG
+extern int debug;
+
+#define mfc_debug(level, fmt, args...) \
+ do { \
+ if (debug >= level) \
+ printk(KERN_DEBUG "%s:%d: " fmt, \
+ __func__, __LINE__, ##args); \
+ } while (0)
+#else
+#define mfc_debug(level, fmt, args...)
+#endif
+
+#define mfc_debug_enter() mfc_debug(5, "enter")
+#define mfc_debug_leave() mfc_debug(5, "leave")
+
+#define mfc_err(fmt, args...) \
+ do { \
+ printk(KERN_ERR "%s:%d: " fmt, \
+ __func__, __LINE__, ##args); \
+ } while (0)
+
+#define mfc_info(fmt, args...) \
+ do { \
+ printk(KERN_INFO "%s:%d: " fmt, \
+ __func__, __LINE__, ##args); \
+ } while (0)
+
+#endif /* S5P_MFC_DEBUG_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
new file mode 100644
index 0000000..b2c5052
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
@@ -0,0 +1,1036 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-core.h>
+#include "regs-mfc.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_dec.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_opr.h"
+#include "s5p_mfc_pm.h"
+#include "s5p_mfc_shm.h"
+
+static struct s5p_mfc_fmt formats[] = {
+ {
+ .name = "4:2:0 2 Planes 64x32 Tiles",
+ .fourcc = V4L2_PIX_FMT_NV12MT,
+ .codec_mode = S5P_FIMV_CODEC_NONE,
+ .type = MFC_FMT_RAW,
+ .num_planes = 2,
+ },
+ {
+ .name = "4:2:0 2 Planes",
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .codec_mode = S5P_FIMV_CODEC_NONE,
+ .type = MFC_FMT_RAW,
+ .num_planes = 2,
+ },
+ {
+ .name = "H264 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_H264,
+ .codec_mode = S5P_FIMV_CODEC_H264_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .name = "H263 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_H263,
+ .codec_mode = S5P_FIMV_CODEC_H263_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .name = "MPEG1 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_MPEG1,
+ .codec_mode = S5P_FIMV_CODEC_MPEG2_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .name = "MPEG2 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_MPEG2,
+ .codec_mode = S5P_FIMV_CODEC_MPEG2_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .name = "MPEG4 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_MPEG4,
+ .codec_mode = S5P_FIMV_CODEC_MPEG4_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .name = "XviD Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_XVID,
+ .codec_mode = S5P_FIMV_CODEC_MPEG4_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .name = "VC1 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_VC1_ANNEX_G,
+ .codec_mode = S5P_FIMV_CODEC_VC1_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .name = "VC1 RCV Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_VC1_ANNEX_L,
+ .codec_mode = S5P_FIMV_CODEC_VC1RCV_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+/* Find selected format description */
+static struct s5p_mfc_fmt *find_format(struct v4l2_format *f, unsigned int t)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].fourcc == f->fmt.pix_mp.pixelformat &&
+ formats[i].type == t)
+ return &formats[i];
+ }
+ return NULL;
+}
+
+static struct mfc_control controls[] = {
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "H264 Display Delay",
+ .minimum = 0,
+ .maximum = 16383,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "H264 Display Delay Enable",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Mpeg4 Loop Filter Enable",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Slice Interface Enable",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Minimum number of cap bufs",
+ .minimum = 1,
+ .maximum = 32,
+ .step = 1,
+ .default_value = 1,
+ .is_volatile = 1,
+ },
+};
+
+#define NUM_CTRLS ARRAY_SIZE(controls)
+
+/* Check whether a context should be run on hardware */
+static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
+{
+ /* Context is to parse header */
+ if (ctx->src_queue_cnt >= 1 && ctx->state == MFCINST_GOT_INST)
+ return 1;
+ /* Context is to decode a frame */
+ if (ctx->src_queue_cnt >= 1 &&
+ ctx->state == MFCINST_RUNNING &&
+ ctx->dst_queue_cnt >= ctx->dpb_count)
+ return 1;
+ /* Context is to return last frame */
+ if (ctx->state == MFCINST_FINISHING &&
+ ctx->dst_queue_cnt >= ctx->dpb_count)
+ return 1;
+ /* Context is to set buffers */
+ if (ctx->src_queue_cnt >= 1 &&
+ ctx->state == MFCINST_HEAD_PARSED &&
+ ctx->capture_state == QUEUE_BUFS_MMAPED)
+ return 1;
+ /* Resolution change */
+ if ((ctx->state == MFCINST_RES_CHANGE_INIT ||
+ ctx->state == MFCINST_RES_CHANGE_FLUSH) &&
+ ctx->dst_queue_cnt >= ctx->dpb_count)
+ return 1;
+ if (ctx->state == MFCINST_RES_CHANGE_END &&
+ ctx->src_queue_cnt >= 1)
+ return 1;
+ mfc_debug(2, "ctx is not ready\n");
+ return 0;
+}
+
+static struct s5p_mfc_codec_ops decoder_codec_ops = {
+ .pre_seq_start = NULL,
+ .post_seq_start = NULL,
+ .pre_frame_start = NULL,
+ .post_frame_start = NULL,
+};
+
+/* Query capabilities of the device */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+
+ strncpy(cap->driver, dev->plat_dev->name, sizeof(cap->driver) - 1);
+ strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
+ cap->bus_info[0] = 0;
+ cap->version = KERNEL_VERSION(1, 0, 0);
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
+ | V4L2_CAP_STREAMING;
+ return 0;
+}
+
+/* Enumerate format */
+static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool mplane, bool out)
+{
+ struct s5p_mfc_fmt *fmt;
+ int i, j = 0;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (mplane && formats[i].num_planes == 1)
+ continue;
+ else if (!mplane && formats[i].num_planes > 1)
+ continue;
+ if (out && formats[i].type != MFC_FMT_DEC)
+ continue;
+ else if (!out && formats[i].type != MFC_FMT_RAW)
+ continue;
+
+ if (j == f->index)
+ break;
+ ++j;
+ }
+ if (i == ARRAY_SIZE(formats))
+ return -EINVAL;
+ fmt = &formats[i];
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *pirv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, false, false);
+}
+
+static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, true, false);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *prov,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, false, true);
+}
+
+static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *prov,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, true, true);
+}
+
+/* Get format */
+static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_pix_format_mplane *pix_mp;
+
+ mfc_debug_enter();
+ pix_mp = &f->fmt.pix_mp;
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ (ctx->state == MFCINST_GOT_INST || ctx->state ==
+ MFCINST_RES_CHANGE_END)) {
+ /* If the MFC is parsing the header,
+ * so wait until it is finished */
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_wait_for_done_ctx(ctx, S5P_FIMV_R2H_CMD_SEQ_DONE_RET,
+ 0);
+ }
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ ctx->state >= MFCINST_HEAD_PARSED &&
+ ctx->state < MFCINST_ABORT) {
+ /* This is run on CAPTURE (decode output) */
+ /* Width and height are set to the dimensions
+ of the movie, the buffer is bigger and
+ further processing stages should crop to this
+ rectangle. */
+ pix_mp->width = ctx->buf_width;
+ pix_mp->height = ctx->buf_height;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->num_planes = 2;
+ /* Set pixelformat to the format in which MFC
+ outputs the decoded frame */
+ pix_mp->pixelformat = V4L2_PIX_FMT_NV12MT;
+ pix_mp->plane_fmt[0].bytesperline = ctx->buf_width;
+ pix_mp->plane_fmt[0].sizeimage = ctx->luma_size;
+ pix_mp->plane_fmt[1].bytesperline = ctx->buf_width;
+ pix_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ /* This is run on OUTPUT
+ The buffer contains compressed image
+ so width and height have no meaning */
+ pix_mp->width = 0;
+ pix_mp->height = 0;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->plane_fmt[0].bytesperline = ctx->dec_src_buf_size;
+ pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size;
+ pix_mp->pixelformat = ctx->src_fmt->fourcc;
+ pix_mp->num_planes = ctx->src_fmt->num_planes;
+ } else {
+ mfc_err("Format could not be read\n");
+ mfc_debug(2, "%s-- with error\n", __func__);
+ return -EINVAL;
+ }
+ mfc_debug_leave();
+ return 0;
+}
+
+/* Try format */
+static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct s5p_mfc_fmt *fmt;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ mfc_err("This node supports decoding only\n");
+ return -EINVAL;
+ }
+ fmt = find_format(f, MFC_FMT_DEC);
+ if (!fmt) {
+ mfc_err("Unsupported format\n");
+ return -EINVAL;
+ }
+ if (fmt->type != MFC_FMT_DEC) {
+ mfc_err("\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Set format */
+static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ int ret = 0;
+ struct s5p_mfc_fmt *fmt;
+ struct v4l2_pix_format_mplane *pix_mp;
+
+ mfc_debug_enter();
+ ret = vidioc_try_fmt(file, priv, f);
+ pix_mp = &f->fmt.pix_mp;
+ if (ret)
+ return ret;
+ if (ctx->vq_src.streaming || ctx->vq_dst.streaming) {
+ v4l2_err(&dev->v4l2_dev, "%s queue busy\n", __func__);
+ ret = -EBUSY;
+ goto out;
+ }
+ fmt = find_format(f, MFC_FMT_DEC);
+ if (!fmt || fmt->codec_mode == S5P_FIMV_CODEC_NONE) {
+ mfc_err("Unknown codec\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (fmt->type != MFC_FMT_DEC) {
+ mfc_err("Wrong format selected, you should choose "
+ "format for decoding\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ ctx->src_fmt = fmt;
+ ctx->codec_mode = fmt->codec_mode;
+ mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode);
+ pix_mp->height = 0;
+ pix_mp->width = 0;
+ if (pix_mp->plane_fmt[0].sizeimage)
+ ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage;
+ else
+ pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size =
+ DEF_CPB_SIZE;
+ pix_mp->plane_fmt[0].bytesperline = 0;
+ ctx->state = MFCINST_INIT;
+out:
+ mfc_debug_leave();
+ return ret;
+}
+
+/* Reqeust buffers */
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ int ret = 0;
+ unsigned long flags;
+
+ if (reqbufs->memory != V4L2_MEMORY_MMAP) {
+ mfc_err("Only V4L2_MEMORY_MAP is supported\n");
+ return -EINVAL;
+ }
+ if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ /* Can only request buffers after an instance has been opened.*/
+ if (ctx->state == MFCINST_INIT) {
+ ctx->src_bufs_cnt = 0;
+ if (reqbufs->count == 0) {
+ mfc_debug(2, "Freeing buffers\n");
+ s5p_mfc_clock_on();
+ ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
+ s5p_mfc_clock_off();
+ return ret;
+ }
+ /* Decoding */
+ if (ctx->output_state != QUEUE_FREE) {
+ mfc_err("Bufs have already been requested\n");
+ return -EINVAL;
+ }
+ s5p_mfc_clock_on();
+ ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
+ s5p_mfc_clock_off();
+ if (ret) {
+ mfc_err("vb2_reqbufs on output failed\n");
+ return ret;
+ }
+ mfc_debug(2, "vb2_reqbufs: %d\n", ret);
+ ctx->output_state = QUEUE_BUFS_REQUESTED;
+ }
+ } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ ctx->dst_bufs_cnt = 0;
+ if (reqbufs->count == 0) {
+ mfc_debug(2, "Freeing buffers\n");
+ s5p_mfc_clock_on();
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ s5p_mfc_clock_off();
+ return ret;
+ }
+ if (ctx->capture_state != QUEUE_FREE) {
+ mfc_err("Bufs have already been requested\n");
+ return -EINVAL;
+ }
+ ctx->capture_state = QUEUE_BUFS_REQUESTED;
+ s5p_mfc_clock_on();
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ s5p_mfc_clock_off();
+ if (ret) {
+ mfc_err("vb2_reqbufs on capture failed\n");
+ return ret;
+ }
+ if (reqbufs->count < ctx->dpb_count) {
+ mfc_err("Not enough buffers allocated\n");
+ reqbufs->count = 0;
+ s5p_mfc_clock_on();
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ s5p_mfc_clock_off();
+ return -ENOMEM;
+ }
+ ctx->total_dpb_count = reqbufs->count;
+ ret = s5p_mfc_alloc_codec_buffers(ctx);
+ if (ret) {
+ mfc_err("Failed to allocate decoding buffers\n");
+ reqbufs->count = 0;
+ s5p_mfc_clock_on();
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ s5p_mfc_clock_off();
+ return -ENOMEM;
+ }
+ if (ctx->dst_bufs_cnt == ctx->total_dpb_count) {
+ ctx->capture_state = QUEUE_BUFS_MMAPED;
+ } else {
+ mfc_err("Not all buffers passed to buf_init\n");
+ reqbufs->count = 0;
+ s5p_mfc_clock_on();
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ s5p_mfc_release_codec_buffers(ctx);
+ s5p_mfc_clock_off();
+ return -ENOMEM;
+ }
+ if (s5p_mfc_ctx_ready(ctx)) {
+ spin_lock_irqsave(&dev->condlock, flags);
+ set_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ }
+ s5p_mfc_try_run(dev);
+ s5p_mfc_wait_for_done_ctx(ctx,
+ S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET, 0);
+ }
+ return ret;
+}
+
+/* Query buffer */
+static int vidioc_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ int ret;
+ int i;
+
+ if (buf->memory != V4L2_MEMORY_MMAP) {
+ mfc_err("Only mmaped buffers can be used\n");
+ return -EINVAL;
+ }
+ mfc_debug(2, "State: %d, buf->type: %d\n", ctx->state, buf->type);
+ if (ctx->state == MFCINST_INIT &&
+ buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = vb2_querybuf(&ctx->vq_src, buf);
+ } else if (ctx->state == MFCINST_RUNNING &&
+ buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ ret = vb2_querybuf(&ctx->vq_dst, buf);
+ for (i = 0; i < buf->length; i++)
+ buf->m.planes[i].m.mem_offset += DST_QUEUE_OFF_BASE;
+ } else {
+ mfc_err("vidioc_querybuf called in an inappropriate state\n");
+ ret = -EINVAL;
+ }
+ mfc_debug_leave();
+ return ret;
+}
+
+/* Queue a buffer */
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->state == MFCINST_ERROR) {
+ mfc_err("Call on QBUF after unrecoverable error\n");
+ return -EIO;
+ }
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_qbuf(&ctx->vq_src, buf);
+ else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_qbuf(&ctx->vq_dst, buf);
+ return -EINVAL;
+}
+
+/* Dequeue a buffer */
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->state == MFCINST_ERROR) {
+ mfc_err("Call on DQBUF after unrecoverable error\n");
+ return -EIO;
+ }
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_dqbuf(&ctx->vq_src, buf, file->f_flags & O_NONBLOCK);
+ else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_dqbuf(&ctx->vq_dst, buf, file->f_flags & O_NONBLOCK);
+ return -EINVAL;
+}
+
+/* Stream on */
+static int vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ mfc_debug_enter();
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+
+ if (ctx->state == MFCINST_INIT) {
+ ctx->dst_bufs_cnt = 0;
+ ctx->src_bufs_cnt = 0;
+ ctx->capture_state = QUEUE_FREE;
+ ctx->output_state = QUEUE_FREE;
+ s5p_mfc_alloc_instance_buffer(ctx);
+ s5p_mfc_alloc_dec_temp_buffers(ctx);
+ spin_lock_irqsave(&dev->condlock, flags);
+ set_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_try_run(dev);
+
+ if (s5p_mfc_wait_for_done_ctx(ctx,
+ S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET, 0)) {
+ /* Error or timeout */
+ mfc_err("Error getting instance from hardware\n");
+ s5p_mfc_release_instance_buffer(ctx);
+ s5p_mfc_release_dec_desc_buffer(ctx);
+ return -EIO;
+ }
+ mfc_debug(2, "Got instance number: %d\n", ctx->inst_no);
+ }
+ ret = vb2_streamon(&ctx->vq_src, type);
+ }
+ else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ ret = vb2_streamon(&ctx->vq_dst, type);
+ mfc_debug_leave();
+ return ret;
+}
+
+/* Stream off, which equals to a pause */
+static int vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_streamoff(&ctx->vq_src, type);
+ else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_streamoff(&ctx->vq_dst, type);
+ return -EINVAL;
+}
+
+/* Set controls - v4l2 control framework */
+static int s5p_mfc_dec_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY:
+ ctx->loop_filter_mpeg4 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE:
+ ctx->display_delay_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
+ ctx->display_delay = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
+ ctx->slice_interface = ctrl->val;
+ break;
+ default:
+ mfc_err("Invalid control 0x%08x\n", ctrl->id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ if (ctx->state >= MFCINST_HEAD_PARSED &&
+ ctx->state < MFCINST_ABORT) {
+ ctrl->val = ctx->dpb_count;
+ break;
+ } else if (ctx->state != MFCINST_INIT) {
+ v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
+ return -EINVAL;
+ }
+ /* Should wait for the header to be parsed */
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_wait_for_done_ctx(ctx,
+ S5P_FIMV_R2H_CMD_SEQ_DONE_RET, 0);
+ if (ctx->state >= MFCINST_HEAD_PARSED &&
+ ctx->state < MFCINST_ABORT) {
+ ctrl->val = ctx->dpb_count;
+ } else {
+ v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
+ return -EINVAL;
+ }
+ break;
+ }
+ return 0;
+}
+
+
+static const struct v4l2_ctrl_ops s5p_mfc_dec_ctrl_ops = {
+ .s_ctrl = s5p_mfc_dec_s_ctrl,
+ .g_volatile_ctrl = s5p_mfc_dec_g_v_ctrl,
+};
+
+/* Get cropping information */
+static int vidioc_g_crop(struct file *file, void *priv,
+ struct v4l2_crop *cr)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ u32 left, right, top, bottom;
+
+ if (ctx->state != MFCINST_HEAD_PARSED &&
+ ctx->state != MFCINST_RUNNING && ctx->state != MFCINST_FINISHING
+ && ctx->state != MFCINST_FINISHED) {
+ mfc_err("Cannont set crop\n");
+ return -EINVAL;
+ }
+ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_H264) {
+ left = s5p_mfc_read_shm(ctx, CROP_INFO_H);
+ right = left >> S5P_FIMV_SHARED_CROP_RIGHT_SHIFT;
+ left = left & S5P_FIMV_SHARED_CROP_LEFT_MASK;
+ top = s5p_mfc_read_shm(ctx, CROP_INFO_V);
+ bottom = top >> S5P_FIMV_SHARED_CROP_BOTTOM_SHIFT;
+ top = top & S5P_FIMV_SHARED_CROP_TOP_MASK;
+ cr->c.left = left;
+ cr->c.top = top;
+ cr->c.width = ctx->img_width - left - right;
+ cr->c.height = ctx->img_height - top - bottom;
+ mfc_debug(2, "Cropping info [h264]: l=%d t=%d "
+ "w=%d h=%d (r=%d b=%d fw=%d fh=%d\n", left, top,
+ cr->c.width, cr->c.height, right, bottom,
+ ctx->buf_width, ctx->buf_height);
+ } else {
+ cr->c.left = 0;
+ cr->c.top = 0;
+ cr->c.width = ctx->img_width;
+ cr->c.height = ctx->img_height;
+ mfc_debug(2, "Cropping info: w=%d h=%d fw=%d "
+ "fh=%d\n", cr->c.width, cr->c.height, ctx->buf_width,
+ ctx->buf_height);
+ }
+ return 0;
+}
+
+/* v4l2_ioctl_ops */
+static const struct v4l2_ioctl_ops s5p_mfc_dec_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_g_crop = vidioc_g_crop,
+};
+
+static int s5p_mfc_queue_setup(struct vb2_queue *vq, unsigned int *buf_count,
+ unsigned int *plane_count, unsigned long psize[],
+ void *allocators[])
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+
+ /* Video output for decoding (source)
+ * this can be set after getting an instance */
+ if (ctx->state == MFCINST_INIT &&
+ vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ /* A single plane is required for input */
+ *plane_count = 1;
+ if (*buf_count < 1)
+ *buf_count = 1;
+ if (*buf_count > MFC_MAX_BUFFERS)
+ *buf_count = MFC_MAX_BUFFERS;
+ /* Video capture for decoding (destination)
+ * this can be set after the header was parsed */
+ } else if (ctx->state == MFCINST_HEAD_PARSED &&
+ vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ /* Output plane count is 2 - one for Y and one for CbCr */
+ *plane_count = 2;
+ /* Setup buffer count */
+ if (*buf_count < ctx->dpb_count)
+ *buf_count = ctx->dpb_count;
+ if (*buf_count > ctx->dpb_count + MFC_MAX_EXTRA_DPB)
+ *buf_count = ctx->dpb_count + MFC_MAX_EXTRA_DPB;
+ if (*buf_count > MFC_MAX_BUFFERS)
+ *buf_count = MFC_MAX_BUFFERS;
+ } else {
+ mfc_err("State seems invalid. State = %d, vq->type = %d\n",
+ ctx->state, vq->type);
+ return -EINVAL;
+ }
+ mfc_debug(2, "Buffer count=%d, plane count=%d\n",
+ *buf_count, *plane_count);
+ if (ctx->state == MFCINST_HEAD_PARSED &&
+ vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ psize[0] = ctx->luma_size;
+ psize[1] = ctx->chroma_size;
+ allocators[0] = ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX];
+ allocators[1] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ ctx->state == MFCINST_INIT) {
+ psize[0] = ctx->dec_src_buf_size;
+ allocators[0] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
+ } else {
+ mfc_err("This video node is dedicated to decoding. Decoding not initalised\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void s5p_mfc_unlock(struct vb2_queue *q)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mutex_unlock(&dev->mfc_mutex);
+}
+
+static void s5p_mfc_lock(struct vb2_queue *q)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mutex_lock(&dev->mfc_mutex);
+}
+
+static int s5p_mfc_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ unsigned int i;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (ctx->capture_state == QUEUE_BUFS_MMAPED)
+ return 0;
+ for (i = 0; i <= ctx->src_fmt->num_planes ; i++) {
+ if (IS_ERR_OR_NULL(ERR_PTR(
+ vb2_dma_contig_plane_paddr(vb, i)))) {
+ mfc_err("Plane mem not allocated\n");
+ return -EINVAL;
+ }
+ }
+ if (vb2_plane_size(vb, 0) < ctx->luma_size ||
+ vb2_plane_size(vb, 1) < ctx->chroma_size) {
+ mfc_err("Plane buffer (CAPTURE) is too small\n");
+ return -EINVAL;
+ }
+ i = vb->v4l2_buf.index;
+ ctx->dst_bufs[i].b = vb;
+ ctx->dst_bufs[i].cookie.raw.luma =
+ vb2_dma_contig_plane_paddr(vb, 0);
+ ctx->dst_bufs[i].cookie.raw.chroma =
+ vb2_dma_contig_plane_paddr(vb, 1);
+ ctx->dst_bufs_cnt++;
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (IS_ERR_OR_NULL(ERR_PTR(
+ vb2_dma_contig_plane_paddr(vb, 0)))) {
+ mfc_err("Plane memory not allocated\n");
+ return -EINVAL;
+ }
+ if (vb2_plane_size(vb, 0) < ctx->dec_src_buf_size) {
+ mfc_err("Plane buffer (OUTPUT) is too small\n");
+ return -EINVAL;
+ }
+
+ i = vb->v4l2_buf.index;
+ ctx->src_bufs[i].b = vb;
+ ctx->src_bufs[i].cookie.stream =
+ vb2_dma_contig_plane_paddr(vb, 0);
+ ctx->src_bufs_cnt++;
+ } else {
+ mfc_err("s5p_mfc_buf_init: unknown queue type\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int s5p_mfc_start_streaming(struct vb2_queue *q)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+ if (ctx->state == MFCINST_FINISHING ||
+ ctx->state == MFCINST_FINISHED)
+ ctx->state = MFCINST_RUNNING;
+ /* If context is ready then dev = work->data;schedule it to run */
+ if (s5p_mfc_ctx_ready(ctx)) {
+ spin_lock_irqsave(&dev->condlock, flags);
+ set_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ }
+ s5p_mfc_try_run(dev);
+ return 0;
+}
+
+static int s5p_mfc_stop_streaming(struct vb2_queue *q)
+{
+ unsigned long flags;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ int aborted = 0;
+
+ if ((ctx->state == MFCINST_FINISHING ||
+ ctx->state == MFCINST_RUNNING) &&
+ dev->curr_ctx == ctx->num && dev->hw_lock) {
+ ctx->state = MFCINST_ABORT;
+ s5p_mfc_wait_for_done_ctx(ctx,
+ S5P_FIMV_R2H_CMD_FRAME_DONE_RET, 0);
+ aborted = 1;
+ }
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
+ INIT_LIST_HEAD(&ctx->dst_queue);
+ ctx->dst_queue_cnt = 0;
+ ctx->dpb_flush_flag = 1;
+ ctx->dec_dst_flag = 0;
+ }
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
+ INIT_LIST_HEAD(&ctx->src_queue);
+ ctx->src_queue_cnt = 0;
+ }
+ if (aborted)
+ ctx->state = MFCINST_RUNNING;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return 0;
+}
+
+
+static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+ struct s5p_mfc_buf *mfc_buf;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ mfc_buf = &ctx->src_bufs[vb->v4l2_buf.index];
+ mfc_buf->used = 0;
+ spin_lock_irqsave(&dev->irqlock, flags);
+ list_add_tail(&mfc_buf->list, &ctx->src_queue);
+ ctx->src_queue_cnt++;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ mfc_buf = &ctx->dst_bufs[vb->v4l2_buf.index];
+ mfc_buf->used = 0;
+ /* Mark destination as available for use by MFC */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ set_bit(vb->v4l2_buf.index, &ctx->dec_dst_flag);
+ list_add_tail(&mfc_buf->list, &ctx->dst_queue);
+ ctx->dst_queue_cnt++;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ } else {
+ mfc_err("Unsupported buffer type (%d)\n", vq->type);
+ }
+ if (s5p_mfc_ctx_ready(ctx)) {
+ spin_lock_irqsave(&dev->condlock, flags);
+ set_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ }
+ s5p_mfc_try_run(dev);
+}
+
+static struct vb2_ops s5p_mfc_dec_qops = {
+ .queue_setup = s5p_mfc_queue_setup,
+ .wait_prepare = s5p_mfc_unlock,
+ .wait_finish = s5p_mfc_lock,
+ .buf_init = s5p_mfc_buf_init,
+ .start_streaming = s5p_mfc_start_streaming,
+ .stop_streaming = s5p_mfc_stop_streaming,
+ .buf_queue = s5p_mfc_buf_queue,
+};
+
+struct s5p_mfc_codec_ops *get_dec_codec_ops(void)
+{
+ return &decoder_codec_ops;
+}
+
+struct vb2_ops *get_dec_queue_ops(void)
+{
+ return &s5p_mfc_dec_qops;
+}
+
+const struct v4l2_ioctl_ops *get_dec_v4l2_ioctl_ops(void)
+{
+ return &s5p_mfc_dec_ioctl_ops;
+}
+
+#define IS_MFC51_PRIV(x) ((V4L2_CTRL_ID2CLASS(x) == V4L2_CTRL_CLASS_MPEG) \
+ && V4L2_CTRL_DRIVER_PRIV(x))
+
+int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx)
+{
+ struct v4l2_ctrl_config cfg;
+ int i;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, NUM_CTRLS);
+ if (ctx->ctrl_handler.error) {
+ mfc_err("v4l2_ctrl_handler_init failed\n");
+ return ctx->ctrl_handler.error;
+ }
+
+ for (i = 0; i < NUM_CTRLS; i++) {
+ if (IS_MFC51_PRIV(controls[i].id)) {
+ cfg.ops = &s5p_mfc_dec_ctrl_ops;
+ cfg.id = controls[i].id;
+ cfg.min = controls[i].minimum;
+ cfg.max = controls[i].maximum;
+ cfg.def = controls[i].default_value;
+ cfg.name = controls[i].name;
+ cfg.type = controls[i].type;
+
+ cfg.step = controls[i].step;
+ cfg.menu_skip_mask = 0;
+
+ ctx->ctrls[i] = v4l2_ctrl_new_custom(&ctx->ctrl_handler,
+ &cfg, NULL);
+ } else {
+ ctx->ctrls[i] = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &s5p_mfc_dec_ctrl_ops,
+ controls[i].id, controls[i].minimum,
+ controls[i].maximum, controls[i].step,
+ controls[i].default_value);
+ }
+ if (ctx->ctrl_handler.error) {
+ mfc_err("Adding control (%d) failed\n", i);
+ return ctx->ctrl_handler.error;
+ }
+ if (controls[i].is_volatile && ctx->ctrls[i])
+ ctx->ctrls[i]->is_volatile = 1;
+ }
+ return 0;
+}
+
+void s5p_mfc_dec_ctrls_delete(struct s5p_mfc_ctx *ctx)
+{
+ int i;
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ for (i = 0; i < NUM_CTRLS; i++)
+ ctx->ctrls[i] = NULL;
+}
+
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.h b/drivers/media/video/s5p-mfc/s5p_mfc_dec.h
new file mode 100644
index 0000000..fb8b215
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.h
@@ -0,0 +1,23 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_dec.h
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_DEC_H_
+#define S5P_MFC_DEC_H_
+
+struct s5p_mfc_codec_ops *get_dec_codec_ops(void);
+struct vb2_ops *get_dec_queue_ops(void);
+const struct v4l2_ioctl_ops *get_dec_v4l2_ioctl_ops(void);
+struct s5p_mfc_fmt *get_dec_def_fmt(bool src);
+int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_dec_ctrls_delete(struct s5p_mfc_ctx *ctx);
+
+#endif /* S5P_MFC_DEC_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
new file mode 100644
index 0000000..fee094a
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
@@ -0,0 +1,1829 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Jeongtae Park <jtp.park@samsung.com>
+ * Kamil Debski <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-core.h>
+#include "regs-mfc.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_enc.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_opr.h"
+
+static struct s5p_mfc_fmt formats[] = {
+ {
+ .name = "4:2:0 2 Planes 64x32 Tiles",
+ .fourcc = V4L2_PIX_FMT_NV12MT,
+ .codec_mode = S5P_FIMV_CODEC_NONE,
+ .type = MFC_FMT_RAW,
+ .num_planes = 2,
+ },
+ {
+ .name = "4:2:0 2 Planes",
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .codec_mode = S5P_FIMV_CODEC_NONE,
+ .type = MFC_FMT_RAW,
+ .num_planes = 2,
+ },
+ {
+ .name = "H264 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_H264,
+ .codec_mode = S5P_FIMV_CODEC_H264_ENC,
+ .type = MFC_FMT_ENC,
+ .num_planes = 1,
+ },
+ {
+ .name = "MPEG4 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_MPEG4,
+ .codec_mode = S5P_FIMV_CODEC_MPEG4_ENC,
+ .type = MFC_FMT_ENC,
+ .num_planes = 1,
+ },
+ {
+ .name = "H264 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_H263,
+ .codec_mode = S5P_FIMV_CODEC_H263_ENC,
+ .type = MFC_FMT_ENC,
+ .num_planes = 1,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+static struct s5p_mfc_fmt *find_format(struct v4l2_format *f, unsigned int t)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].fourcc == f->fmt.pix_mp.pixelformat &&
+ formats[i].type == t)
+ return &formats[i];
+ }
+ return NULL;
+}
+
+static struct mfc_control controls[] = {
+ {
+ .id = V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE,
+ .maximum = V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES,
+ .default_value = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 1,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 1900,
+ .maximum = (1 << 30) - 1,
+ .step = 1,
+ .default_value = 1900,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_PADDING,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Padding Control Enable",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Padding Color YUV Value",
+ .minimum = 0,
+ .maximum = (1 << 25) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_BITRATE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 1,
+ .maximum = (1 << 30) - 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Rate Control Reaction Coeff.",
+ .minimum = 1,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Force frame type",
+ .minimum = V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_DISABLED,
+ .maximum = V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_NOT_CODED,
+ .default_value = V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_DISABLED,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VBV_SIZE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEADER_MODE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE,
+ .maximum = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ .default_value = V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Frame Skip Enable",
+ .minimum = V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_DISABLED,
+ .maximum = V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT,
+ .menu_skip_mask = 0,
+ .default_value = V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_DISABLED,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Fixed Target Bit Enable",
+ .minimum = 0,
+ .maximum = 1,
+ .default_value = 0,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_B_FRAMES,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 2,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .maximum = V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH,
+ .default_value = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .menu_skip_mask = ~(
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)
+ ),
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .maximum = V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
+ .default_value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .menu_skip_mask = ~(
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_1)
+ ),
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_MPEG4_LEVEL_0,
+ .maximum = V4L2_MPEG_VIDEO_MPEG4_LEVEL_5,
+ .default_value = V4L2_MPEG_VIDEO_MPEG4_LEVEL_0,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED,
+ .maximum = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY,
+ .default_value = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = -6,
+ .maximum = 6,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = -6,
+ .maximum = 6,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+ .maximum = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+ .default_value = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "The Number of Ref. Pic for P",
+ .minimum = 1,
+ .maximum = 2,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_MIN_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_MAX_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "H263 I-Frame QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H263_MIN_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "H263 Minimum QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H263_MAX_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "H263 Maximum QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "H263 P frame QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "H263 B frame QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "MPEG4 I-Frame QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "MPEG4 Minimum QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "MPEG4 Maximum QP value",
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "MPEG4 P frame QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "MPEG4 B frame QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "H264 Dark Reg Adaptive RC",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "H264 Smooth Reg Adaptive RC",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "H264 Static Reg Adaptive RC",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "H264 Activity Reg Adaptive RC",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
+ .maximum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED,
+ .default_value = 0,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_GOP_CLOSURE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_I_PERIOD,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
+ .maximum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE,
+ .default_value = 0,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_QPEL,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+};
+
+#define NUM_CTRLS ARRAY_SIZE(controls)
+static const char * const *mfc51_get_menu(u32 id)
+{
+ static const char * const mfc51_video_frame_skip[] = {
+ "Disabled",
+ "Level Limit",
+ "VBV/CPB Limit",
+ NULL,
+ };
+ static const char * const mfc51_video_force_frame[] = {
+ "Disabled",
+ "I Frame",
+ "Not Coded",
+ NULL,
+ };
+ switch (id) {
+ case V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE:
+ return mfc51_video_frame_skip;
+ case V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE:
+ return mfc51_video_force_frame;
+ }
+ return NULL;
+}
+
+static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
+{
+ mfc_debug(2, "src=%d, dst=%d, state=%d\n",
+ ctx->src_queue_cnt, ctx->dst_queue_cnt, ctx->state);
+ /* context is ready to make header */
+ if (ctx->state == MFCINST_GOT_INST && ctx->dst_queue_cnt >= 1)
+ return 1;
+ /* context is ready to encode a frame */
+ if (ctx->state == MFCINST_RUNNING &&
+ ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1)
+ return 1;
+ /* context is ready to encode remain frames */
+ if (ctx->state == MFCINST_FINISHING &&
+ ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1)
+ return 1;
+ mfc_debug(2, "ctx is not ready\n");
+ return 0;
+}
+
+static void cleanup_ref_queue(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_buf *mb_entry;
+ unsigned long mb_y_addr, mb_c_addr;
+
+ /* move buffers in ref queue to src queue */
+ while (!list_empty(&ctx->ref_queue)) {
+ mb_entry = list_entry((&ctx->ref_queue)->next,
+ struct s5p_mfc_buf, list);
+ mb_y_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 0);
+ mb_c_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 1);
+ list_del(&mb_entry->list);
+ ctx->ref_queue_cnt--;
+ list_add_tail(&mb_entry->list, &ctx->src_queue);
+ ctx->src_queue_cnt++;
+ }
+ mfc_debug(2, "enc src count: %d, enc ref count: %d\n",
+ ctx->src_queue_cnt, ctx->ref_queue_cnt);
+ INIT_LIST_HEAD(&ctx->ref_queue);
+ ctx->ref_queue_cnt = 0;
+}
+
+static int enc_pre_seq_start(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *dst_mb;
+ unsigned long dst_addr;
+ unsigned int dst_size;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
+ dst_addr = vb2_dma_contig_plane_paddr(dst_mb->b, 0);
+ dst_size = vb2_plane_size(dst_mb->b, 0);
+ s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return 0;
+}
+
+static int enc_post_seq_start(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ struct s5p_mfc_buf *dst_mb;
+ unsigned long flags;
+
+ if (p->seq_hdr_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) {
+ spin_lock_irqsave(&dev->irqlock, flags);
+ dst_mb = list_entry(ctx->dst_queue.next,
+ struct s5p_mfc_buf, list);
+ list_del(&dst_mb->list);
+ ctx->dst_queue_cnt--;
+ vb2_set_plane_payload(dst_mb->b, 0,
+ s5p_mfc_get_enc_strm_size());
+ vb2_buffer_done(dst_mb->b, VB2_BUF_STATE_DONE);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ }
+ ctx->state = MFCINST_RUNNING;
+ if (s5p_mfc_ctx_ready(ctx)) {
+ spin_lock_irqsave(&dev->condlock, flags);
+ set_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ }
+ s5p_mfc_try_run(dev);
+ return 0;
+}
+
+static int enc_pre_frame_start(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *dst_mb;
+ struct s5p_mfc_buf *src_mb;
+ unsigned long flags;
+ unsigned long src_y_addr, src_c_addr, dst_addr;
+ unsigned int dst_size;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ src_y_addr = vb2_dma_contig_plane_paddr(src_mb->b, 0);
+ src_c_addr = vb2_dma_contig_plane_paddr(src_mb->b, 1);
+ s5p_mfc_set_enc_frame_buffer(ctx, src_y_addr, src_c_addr);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
+ dst_addr = vb2_dma_contig_plane_paddr(dst_mb->b, 0);
+ dst_size = vb2_plane_size(dst_mb->b, 0);
+ s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ return 0;
+}
+
+static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *mb_entry;
+ unsigned long enc_y_addr, enc_c_addr;
+ unsigned long mb_y_addr, mb_c_addr;
+ int slice_type;
+ unsigned int strm_size;
+ unsigned long flags;
+
+ slice_type = s5p_mfc_get_enc_slice_type();
+ strm_size = s5p_mfc_get_enc_strm_size();
+ mfc_debug(2, "Encoded slice type: %d", slice_type);
+ mfc_debug(2, "Encoded stream size: %d", strm_size);
+ mfc_debug(2, "Display order: %d",
+ mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT));
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (slice_type >= 0) {
+ s5p_mfc_get_enc_frame_buffer(ctx, &enc_y_addr, &enc_c_addr);
+ list_for_each_entry(mb_entry, &ctx->src_queue, list) {
+ mb_y_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 0);
+ mb_c_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 1);
+ if ((enc_y_addr == mb_y_addr) &&
+ (enc_c_addr == mb_c_addr)) {
+ list_del(&mb_entry->list);
+ ctx->src_queue_cnt--;
+ vb2_buffer_done(mb_entry->b,
+ VB2_BUF_STATE_DONE);
+ break;
+ }
+ }
+ list_for_each_entry(mb_entry, &ctx->ref_queue, list) {
+ mb_y_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 0);
+ mb_c_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 1);
+ if ((enc_y_addr == mb_y_addr) &&
+ (enc_c_addr == mb_c_addr)) {
+ list_del(&mb_entry->list);
+ ctx->ref_queue_cnt--;
+ vb2_buffer_done(mb_entry->b,
+ VB2_BUF_STATE_DONE);
+ break;
+ }
+ }
+ }
+ if ((ctx->src_queue_cnt > 0) && (ctx->state == MFCINST_RUNNING)) {
+ mb_entry = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
+ list);
+ if (mb_entry->used) {
+ list_del(&mb_entry->list);
+ ctx->src_queue_cnt--;
+ list_add_tail(&mb_entry->list, &ctx->ref_queue);
+ ctx->ref_queue_cnt++;
+ }
+ mfc_debug(2, "enc src count: %d, enc ref count: %d\n",
+ ctx->src_queue_cnt, ctx->ref_queue_cnt);
+ }
+ if (strm_size > 0) {
+ /* at least one more dest. buffers exist always */
+ mb_entry = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf,
+ list);
+ list_del(&mb_entry->list);
+ ctx->dst_queue_cnt--;
+ switch (slice_type) {
+ case S5P_FIMV_ENC_SI_SLICE_TYPE_I:
+ mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+ break;
+ case S5P_FIMV_ENC_SI_SLICE_TYPE_P:
+ mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
+ break;
+ case S5P_FIMV_ENC_SI_SLICE_TYPE_B:
+ mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_BFRAME;
+ break;
+ }
+ vb2_set_plane_payload(mb_entry->b, 0, strm_size);
+ vb2_buffer_done(mb_entry->b, VB2_BUF_STATE_DONE);
+ }
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ if ((ctx->src_queue_cnt == 0) || (ctx->dst_queue_cnt == 0)) {
+ spin_lock(&dev->condlock);
+ clear_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock(&dev->condlock);
+ }
+ return 0;
+}
+
+static struct s5p_mfc_codec_ops encoder_codec_ops = {
+ .pre_seq_start = enc_pre_seq_start,
+ .post_seq_start = enc_post_seq_start,
+ .pre_frame_start = enc_pre_frame_start,
+ .post_frame_start = enc_post_frame_start,
+};
+
+/* Query capabilities of the device */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+
+ strncpy(cap->driver, dev->plat_dev->name, sizeof(cap->driver) - 1);
+ strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
+ cap->bus_info[0] = 0;
+ cap->version = KERNEL_VERSION(1, 0, 0);
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE
+ | V4L2_CAP_VIDEO_OUTPUT
+ | V4L2_CAP_STREAMING;
+ return 0;
+}
+
+static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool mplane, bool out)
+{
+ struct s5p_mfc_fmt *fmt;
+ int i, j = 0;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (mplane && formats[i].num_planes == 1)
+ continue;
+ else if (!mplane && formats[i].num_planes > 1)
+ continue;
+ if (out && formats[i].type != MFC_FMT_RAW)
+ continue;
+ else if (!out && formats[i].type != MFC_FMT_ENC)
+ continue;
+ if (j == f->index) {
+ fmt = &formats[i];
+ strlcpy(f->description, fmt->name,
+ sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+ ++j;
+ }
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *pirv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, false, false);
+}
+
+static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, true, false);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *prov,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, false, true);
+}
+
+static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *prov,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, true, true);
+}
+
+static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+
+ mfc_debug(2, "f->type = %d ctx->state = %d\n", f->type, ctx->state);
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ /* This is run on output (encoder dest) */
+ pix_fmt_mp->width = 0;
+ pix_fmt_mp->height = 0;
+ pix_fmt_mp->field = V4L2_FIELD_NONE;
+ pix_fmt_mp->pixelformat = ctx->dst_fmt->fourcc;
+ pix_fmt_mp->num_planes = ctx->dst_fmt->num_planes;
+
+ pix_fmt_mp->plane_fmt[0].bytesperline = ctx->enc_dst_buf_size;
+ pix_fmt_mp->plane_fmt[0].sizeimage = ctx->enc_dst_buf_size;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ /* This is run on capture (encoder src) */
+ pix_fmt_mp->width = ctx->img_width;
+ pix_fmt_mp->height = ctx->img_height;
+
+ pix_fmt_mp->field = V4L2_FIELD_NONE;
+ pix_fmt_mp->pixelformat = ctx->src_fmt->fourcc;
+ pix_fmt_mp->num_planes = ctx->src_fmt->num_planes;
+
+ pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width;
+ pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size;
+ pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width;
+ pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
+ } else {
+ mfc_err("invalid buf type\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct s5p_mfc_fmt *fmt;
+ struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ fmt = find_format(f, MFC_FMT_ENC);
+ if (!fmt) {
+ mfc_err("failed to try output format\n");
+ return -EINVAL;
+ }
+
+ if (pix_fmt_mp->plane_fmt[0].sizeimage == 0) {
+ mfc_err("must be set encoding output size\n");
+ return -EINVAL;
+ }
+
+ pix_fmt_mp->plane_fmt[0].bytesperline =
+ pix_fmt_mp->plane_fmt[0].sizeimage;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ fmt = find_format(f, MFC_FMT_RAW);
+ if (!fmt) {
+ mfc_err("failed to try output format\n");
+ return -EINVAL;
+ }
+
+ if (fmt->num_planes != pix_fmt_mp->num_planes) {
+ mfc_err("failed to try output format\n");
+ return -EINVAL;
+ }
+ } else {
+ mfc_err("invalid buf type\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_fmt *fmt;
+ struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+ unsigned long flags;
+ int ret = 0;
+
+ ret = vidioc_try_fmt(file, priv, f);
+ if (ret)
+ return ret;
+ if (ctx->vq_src.streaming || ctx->vq_dst.streaming) {
+ v4l2_err(&dev->v4l2_dev, "%s queue busy\n", __func__);
+ ret = -EBUSY;
+ goto out;
+ }
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ fmt = find_format(f, MFC_FMT_ENC);
+ if (!fmt) {
+ mfc_err("failed to set capture format\n");
+ return -EINVAL;
+ }
+ ctx->state = MFCINST_INIT;
+ ctx->dst_fmt = fmt;
+ ctx->codec_mode = ctx->dst_fmt->codec_mode;
+ ctx->enc_dst_buf_size = pix_fmt_mp->plane_fmt[0].sizeimage;
+ pix_fmt_mp->plane_fmt[0].bytesperline = 0;
+ ctx->dst_bufs_cnt = 0;
+ ctx->capture_state = QUEUE_FREE;
+ s5p_mfc_alloc_instance_buffer(ctx);
+ spin_lock_irqsave(&dev->condlock, flags);
+ set_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_try_run(dev);
+ if (s5p_mfc_wait_for_done_ctx(ctx, \
+ S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET, 1)) {
+ /* Error or timeout */
+ mfc_err("Error getting instance from hardware\n");
+ s5p_mfc_release_instance_buffer(ctx);
+ ret = -EIO;
+ goto out;
+ }
+ mfc_debug(2, "Got instance number: %d\n", ctx->inst_no);
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ fmt = find_format(f, MFC_FMT_RAW);
+ if (!fmt) {
+ mfc_err("failed to set output format\n");
+ return -EINVAL;
+ }
+ if (fmt->num_planes != pix_fmt_mp->num_planes) {
+ mfc_err("failed to set output format\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ ctx->src_fmt = fmt;
+ ctx->img_width = pix_fmt_mp->width;
+ ctx->img_height = pix_fmt_mp->height;
+ mfc_debug(2, "codec number: %d\n", ctx->src_fmt->codec_mode);
+ mfc_debug(2, "fmt - w: %d, h: %d, ctx - w: %d, h: %d\n",
+ pix_fmt_mp->width, pix_fmt_mp->height,
+ ctx->img_width, ctx->img_height);
+ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) {
+ ctx->buf_width = ALIGN(ctx->img_width,
+ S5P_FIMV_NV12M_HALIGN);
+ ctx->luma_size = ALIGN(ctx->img_width,
+ S5P_FIMV_NV12M_HALIGN) * ALIGN(ctx->img_height,
+ S5P_FIMV_NV12M_LVALIGN);
+ ctx->chroma_size = ALIGN(ctx->img_width,
+ S5P_FIMV_NV12M_HALIGN) * ALIGN((ctx->img_height
+ >> 1), S5P_FIMV_NV12M_CVALIGN);
+
+ ctx->luma_size = ALIGN(ctx->luma_size,
+ S5P_FIMV_NV12M_SALIGN);
+ ctx->chroma_size = ALIGN(ctx->chroma_size,
+ S5P_FIMV_NV12M_SALIGN);
+
+ pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size;
+ pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width;
+ pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
+ pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width;
+
+ } else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT) {
+ ctx->buf_width = ALIGN(ctx->img_width,
+ S5P_FIMV_NV12MT_HALIGN);
+ ctx->luma_size = ALIGN(ctx->img_width,
+ S5P_FIMV_NV12MT_HALIGN) * ALIGN(ctx->img_height,
+ S5P_FIMV_NV12MT_VALIGN);
+ ctx->chroma_size = ALIGN(ctx->img_width,
+ S5P_FIMV_NV12MT_HALIGN) * ALIGN((ctx->img_height
+ >> 1), S5P_FIMV_NV12MT_VALIGN);
+ ctx->luma_size = ALIGN(ctx->luma_size,
+ S5P_FIMV_NV12MT_SALIGN);
+ ctx->chroma_size = ALIGN(ctx->chroma_size,
+ S5P_FIMV_NV12MT_SALIGN);
+
+ pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size;
+ pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width;
+ pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
+ pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width;
+ }
+ ctx->src_bufs_cnt = 0;
+ ctx->output_state = QUEUE_FREE;
+ } else {
+ mfc_err("invalid buf type\n");
+ return -EINVAL;
+ }
+out:
+ mfc_debug_leave();
+ return ret;
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ int ret = 0;
+
+ /* if memory is not mmp or userptr return error */
+ if ((reqbufs->memory != V4L2_MEMORY_MMAP) &&
+ (reqbufs->memory != V4L2_MEMORY_USERPTR))
+ return -EINVAL;
+ if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (ctx->capture_state != QUEUE_FREE) {
+ mfc_err("invalid capture state: %d\n",
+ ctx->capture_state);
+ return -EINVAL;
+ }
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ if (ret != 0) {
+ mfc_err("error in vb2_reqbufs() for E(D)\n");
+ return ret;
+ }
+ ctx->capture_state = QUEUE_BUFS_REQUESTED;
+ ret = s5p_mfc_alloc_codec_buffers(ctx);
+ if (ret) {
+ mfc_err("Failed to allocate encoding buffers\n");
+ reqbufs->count = 0;
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ return -ENOMEM;
+ }
+ } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (ctx->output_state != QUEUE_FREE) {
+ mfc_err("invalid output state: %d\n",
+ ctx->output_state);
+ return -EINVAL;
+ }
+ ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
+ if (ret != 0) {
+ mfc_err("error in vb2_reqbufs() for E(S)\n");
+ return ret;
+ }
+ ctx->output_state = QUEUE_BUFS_REQUESTED;
+ } else {
+ mfc_err("invalid buf type\n");
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ int ret = 0;
+
+ /* if memory is not mmp or userptr return error */
+ if ((buf->memory != V4L2_MEMORY_MMAP) &&
+ (buf->memory != V4L2_MEMORY_USERPTR))
+ return -EINVAL;
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (ctx->state != MFCINST_GOT_INST) {
+ mfc_err("invalid context state: %d\n", ctx->state);
+ return -EINVAL;
+ }
+ ret = vb2_querybuf(&ctx->vq_dst, buf);
+ if (ret != 0) {
+ mfc_err("error in vb2_querybuf() for E(D)\n");
+ return ret;
+ }
+ buf->m.planes[0].m.mem_offset += DST_QUEUE_OFF_BASE;
+ } else if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = vb2_querybuf(&ctx->vq_src, buf);
+ if (ret != 0) {
+ mfc_err("error in vb2_querybuf() for E(S)\n");
+ return ret;
+ }
+ } else {
+ mfc_err("invalid buf type\n");
+ return -EINVAL;
+ }
+ return ret;
+}
+
+/* Queue a buffer */
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->state == MFCINST_ERROR) {
+ mfc_err("Call on QBUF after unrecoverable error\n");
+ return -EIO;
+ }
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_qbuf(&ctx->vq_src, buf);
+ else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_qbuf(&ctx->vq_dst, buf);
+ return -EINVAL;
+}
+
+/* Dequeue a buffer */
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->state == MFCINST_ERROR) {
+ mfc_err("Call on DQBUF after unrecoverable error\n");
+ return -EIO;
+ }
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_dqbuf(&ctx->vq_src, buf, file->f_flags & O_NONBLOCK);
+ else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_dqbuf(&ctx->vq_dst, buf, file->f_flags & O_NONBLOCK);
+ return -EINVAL;
+}
+
+/* Stream on */
+static int vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_streamon(&ctx->vq_src, type);
+ else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_streamon(&ctx->vq_dst, type);
+ return -EINVAL;
+}
+
+/* Stream off, which equals to a pause */
+static int vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_streamoff(&ctx->vq_src, type);
+ else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_streamoff(&ctx->vq_dst, type);
+ return -EINVAL;
+}
+
+static inline int h264_level(enum v4l2_mpeg_video_h264_level lvl)
+{
+ static unsigned int t[V4L2_MPEG_VIDEO_H264_LEVEL_4_0 + 1] = {
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_1_0 */ 10,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_1B */ 9,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_1_1 */ 11,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_1_2 */ 12,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_1_3 */ 13,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_2_0 */ 20,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_2_1 */ 21,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_2_2 */ 22,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_3_0 */ 30,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_3_1 */ 31,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_3_2 */ 32,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_4_0 */ 40,
+ };
+ return t[lvl];
+}
+
+static inline int mpeg4_level(enum v4l2_mpeg_video_mpeg4_level lvl)
+{
+ static unsigned int t[V4L2_MPEG_VIDEO_MPEG4_LEVEL_5 + 1] = {
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_0 */ 0,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B */ 9,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_1 */ 1,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_2 */ 2,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_3 */ 3,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_3B */ 7,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_4 */ 4,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_5 */ 5,
+ };
+ return t[lvl];
+}
+
+static inline int vui_sar_idc(enum v4l2_mpeg_video_h264_vui_sar_idc sar)
+{
+ static unsigned int t[V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED + 1] = {
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED */ 0,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1 */ 1,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_12x11 */ 2,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_10x11 */ 3,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_16x11 */ 4,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_40x33 */ 5,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_24x11 */ 6,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_20x11 */ 7,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_32x11 */ 8,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_80x33 */ 9,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_18x11 */ 10,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_15x11 */ 11,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_64x33 */ 12,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_160x99 */ 13,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_4x3 */ 14,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_3x2 */ 15,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1 */ 16,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED */ 255,
+ };
+ return t[sar];
+}
+
+static int s5p_mfc_enc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ p->gop_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ p->slice_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
+ p->slice_mb = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
+ p->slice_bit = ctrl->val * 8;
+ break;
+ case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:
+ p->intra_refresh_mb = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_PADDING:
+ p->pad = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV:
+ p->pad_luma = (ctrl->val >> 16) & 0xff;
+ p->pad_cb = (ctrl->val >> 8) & 0xff;
+ p->pad_cr = (ctrl->val >> 0) & 0xff;
+ break;
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+ p->rc_frame = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ p->rc_bitrate = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF:
+ p->rc_reaction_coeff = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE:
+ ctx->force_frame_type = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VBV_SIZE:
+ p->vbv_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE:
+ p->codec.h264.cpb_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ p->seq_hdr_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE:
+ p->frame_skip_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT:
+ p->fixed_target_bit = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ p->num_b_frame = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+ p->codec.h264.profile =
+ S5P_FIMV_ENC_PROFILE_H264_MAIN;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+ p->codec.h264.profile =
+ S5P_FIMV_ENC_PROFILE_H264_HIGH;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ p->codec.h264.profile =
+ S5P_FIMV_ENC_PROFILE_H264_BASELINE;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ p->codec.h264.level_v4l2 = ctrl->val;
+ p->codec.h264.level = h264_level(ctrl->val);
+ if (p->codec.h264.level < 0) {
+ mfc_err("Level number is wrong\n");
+ ret = p->codec.h264.level;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ p->codec.mpeg4.level_v4l2 = ctrl->val;
+ p->codec.mpeg4.level = mpeg4_level(ctrl->val);
+ if (p->codec.mpeg4.level < 0) {
+ mfc_err("Level number is wrong\n");
+ ret = p->codec.mpeg4.level;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ p->codec.h264.loop_filter_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
+ p->codec.h264.loop_filter_alpha = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
+ p->codec.h264.loop_filter_beta = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ p->codec.h264.entropy_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P:
+ p->codec.h264.num_ref_pic_4p = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
+ p->codec.h264._8x8_transform = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
+ p->codec.h264.rc_mb = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
+ p->codec.h264.rc_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
+ p->codec.h264.rc_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
+ p->codec.h264.rc_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
+ p->codec.h264.rc_p_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:
+ p->codec.h264.rc_b_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP:
+ case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP:
+ p->codec.mpeg4.rc_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP:
+ case V4L2_CID_MPEG_VIDEO_H263_MIN_QP:
+ p->codec.mpeg4.rc_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP:
+ case V4L2_CID_MPEG_VIDEO_H263_MAX_QP:
+ p->codec.mpeg4.rc_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP:
+ case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP:
+ p->codec.mpeg4.rc_p_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP:
+ case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP:
+ p->codec.mpeg4.rc_b_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK:
+ p->codec.h264.rc_mb_dark = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH:
+ p->codec.h264.rc_mb_smooth = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC:
+ p->codec.h264.rc_mb_static = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY:
+ p->codec.h264.rc_mb_activity = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
+ p->codec.h264.vui_sar = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+ p->codec.h264.vui_sar_idc = vui_sar_idc(ctrl->val);
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH:
+ p->codec.h264.vui_ext_sar_width = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT:
+ p->codec.h264.vui_ext_sar_height = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
+ p->codec.h264.open_gop = !ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:
+ p->codec.h264.open_gop_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE:
+ p->codec.mpeg4.profile =
+ S5P_FIMV_ENC_PROFILE_MPEG4_SIMPLE;
+ break;
+ case V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE:
+ p->codec.mpeg4.profile =
+ S5P_FIMV_ENC_PROFILE_MPEG4_ADVANCED_SIMPLE;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:
+ p->codec.mpeg4.quarter_pixel = ctrl->val;
+ break;
+ default:
+ v4l2_err(&dev->v4l2_dev, "Invalid control, id=%d, val=%d\n",
+ ctrl->id, ctrl->val);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops s5p_mfc_enc_ctrl_ops = {
+ .s_ctrl = s5p_mfc_enc_s_ctrl,
+};
+
+int vidioc_s_parm(struct file *file, void *priv, struct v4l2_streamparm *a)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ctx->enc_params.rc_framerate_num =
+ a->parm.output.timeperframe.denominator;
+ ctx->enc_params.rc_framerate_denom =
+ a->parm.output.timeperframe.numerator;
+ } else {
+ mfc_err("Setting FPS is only possible for the output queue\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int vidioc_g_parm(struct file *file, void *priv, struct v4l2_streamparm *a)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ a->parm.output.timeperframe.denominator =
+ ctx->enc_params.rc_framerate_num;
+ a->parm.output.timeperframe.numerator =
+ ctx->enc_params.rc_framerate_denom;
+ } else {
+ mfc_err("Setting FPS is only possible for the output queue\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops s5p_mfc_enc_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_s_parm = vidioc_s_parm,
+ .vidioc_g_parm = vidioc_g_parm,
+};
+
+static int check_vb_with_fmt(struct s5p_mfc_fmt *fmt, struct vb2_buffer *vb)
+{
+ int i;
+
+ if (!fmt)
+ return -EINVAL;
+ if (fmt->num_planes != vb->num_planes) {
+ mfc_err("invalid plane number for the format\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < fmt->num_planes; i++) {
+ if (!vb2_dma_contig_plane_paddr(vb, i)) {
+ mfc_err("failed to get plane cookie\n");
+ return -EINVAL;
+ }
+ mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx",
+ vb->v4l2_buf.index, i,
+ vb2_dma_contig_plane_paddr(vb, i));
+ }
+ return 0;
+}
+
+static int s5p_mfc_queue_setup(struct vb2_queue *vq,
+ unsigned int *buf_count, unsigned int *plane_count,
+ unsigned long psize[], void *allocators[])
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+
+ if (ctx->state != MFCINST_GOT_INST) {
+ mfc_err("inavlid state: %d\n", ctx->state);
+ return -EINVAL;
+ }
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (ctx->dst_fmt)
+ *plane_count = ctx->dst_fmt->num_planes;
+ else
+ *plane_count = MFC_ENC_CAP_PLANE_COUNT;
+ if (*buf_count < 1)
+ *buf_count = 1;
+ if (*buf_count > MFC_MAX_BUFFERS)
+ *buf_count = MFC_MAX_BUFFERS;
+ psize[0] = ctx->enc_dst_buf_size;
+ allocators[0] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (ctx->src_fmt)
+ *plane_count = ctx->src_fmt->num_planes;
+ else
+ *plane_count = MFC_ENC_OUT_PLANE_COUNT;
+
+ if (*buf_count < 1)
+ *buf_count = 1;
+ if (*buf_count > MFC_MAX_BUFFERS)
+ *buf_count = MFC_MAX_BUFFERS;
+ psize[0] = ctx->luma_size;
+ psize[1] = ctx->chroma_size;
+ allocators[0] = ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX];
+ allocators[1] = ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX];
+ } else {
+ mfc_err("inavlid queue type: %d\n", vq->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void s5p_mfc_unlock(struct vb2_queue *q)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mutex_unlock(&dev->mfc_mutex);
+}
+
+static void s5p_mfc_lock(struct vb2_queue *q)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mutex_lock(&dev->mfc_mutex);
+}
+
+static int s5p_mfc_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ unsigned int i;
+ int ret;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ ret = check_vb_with_fmt(ctx->dst_fmt, vb);
+ if (ret < 0)
+ return ret;
+ i = vb->v4l2_buf.index;
+ ctx->dst_bufs[i].b = vb;
+ ctx->dst_bufs[i].cookie.stream =
+ vb2_dma_contig_plane_paddr(vb, 0);
+ ctx->dst_bufs_cnt++;
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = check_vb_with_fmt(ctx->src_fmt, vb);
+ if (ret < 0)
+ return ret;
+ i = vb->v4l2_buf.index;
+ ctx->src_bufs[i].b = vb;
+ ctx->src_bufs[i].cookie.raw.luma =
+ vb2_dma_contig_plane_paddr(vb, 0);
+ ctx->src_bufs[i].cookie.raw.chroma =
+ vb2_dma_contig_plane_paddr(vb, 1);
+ ctx->src_bufs_cnt++;
+ } else {
+ mfc_err("inavlid queue type: %d\n", vq->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int s5p_mfc_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ int ret;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ ret = check_vb_with_fmt(ctx->dst_fmt, vb);
+ if (ret < 0)
+ return ret;
+ mfc_debug(2, "plane size: %ld, dst size: %d\n",
+ vb2_plane_size(vb, 0), ctx->enc_dst_buf_size);
+ if (vb2_plane_size(vb, 0) < ctx->enc_dst_buf_size) {
+ mfc_err("plane size is too small for capture\n");
+ return -EINVAL;
+ }
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = check_vb_with_fmt(ctx->src_fmt, vb);
+ if (ret < 0)
+ return ret;
+ mfc_debug(2, "plane size: %ld, luma size: %d\n",
+ vb2_plane_size(vb, 0), ctx->luma_size);
+ mfc_debug(2, "plane size: %ld, chroma size: %d\n",
+ vb2_plane_size(vb, 1), ctx->chroma_size);
+ if (vb2_plane_size(vb, 0) < ctx->luma_size ||
+ vb2_plane_size(vb, 1) < ctx->chroma_size) {
+ mfc_err("plane size is too small for output\n");
+ return -EINVAL;
+ }
+ } else {
+ mfc_err("inavlid queue type: %d\n", vq->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int s5p_mfc_start_streaming(struct vb2_queue *q)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+ /* If context is ready then dev = work->data;schedule it to run */
+ if (s5p_mfc_ctx_ready(ctx)) {
+ spin_lock_irqsave(&dev->condlock, flags);
+ set_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ }
+ s5p_mfc_try_run(dev);
+ return 0;
+}
+
+static int s5p_mfc_stop_streaming(struct vb2_queue *q)
+{
+ unsigned long flags;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ if ((ctx->state == MFCINST_FINISHING ||
+ ctx->state == MFCINST_RUNNING) &&
+ dev->curr_ctx == ctx->num && dev->hw_lock) {
+ ctx->state = MFCINST_ABORT;
+ s5p_mfc_wait_for_done_ctx(ctx, S5P_FIMV_R2H_CMD_FRAME_DONE_RET,
+ 0);
+ }
+ ctx->state = MFCINST_FINISHED;
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
+ INIT_LIST_HEAD(&ctx->dst_queue);
+ ctx->dst_queue_cnt = 0;
+ }
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ cleanup_ref_queue(ctx);
+ s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
+ INIT_LIST_HEAD(&ctx->src_queue);
+ ctx->src_queue_cnt = 0;
+ }
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return 0;
+}
+
+static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+ struct s5p_mfc_buf *mfc_buf;
+
+ if (ctx->state == MFCINST_ERROR) {
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ cleanup_ref_queue(ctx);
+ return;
+ }
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ mfc_buf = &ctx->dst_bufs[vb->v4l2_buf.index];
+ mfc_buf->used = 0;
+ /* Mark destination as available for use by MFC */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ list_add_tail(&mfc_buf->list, &ctx->dst_queue);
+ ctx->dst_queue_cnt++;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ mfc_buf = &ctx->src_bufs[vb->v4l2_buf.index];
+ mfc_buf->used = 0;
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (vb->v4l2_planes[0].bytesused == 0) {
+ mfc_debug(1, "change state to FINISHING\n");
+ ctx->state = MFCINST_FINISHING;
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ cleanup_ref_queue(ctx);
+ } else {
+ list_add_tail(&mfc_buf->list, &ctx->src_queue);
+ ctx->src_queue_cnt++;
+ }
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ } else {
+ mfc_err("unsupported buffer type (%d)\n", vq->type);
+ }
+ if (s5p_mfc_ctx_ready(ctx)) {
+ spin_lock_irqsave(&dev->condlock, flags);
+ set_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ }
+ s5p_mfc_try_run(dev);
+}
+
+static struct vb2_ops s5p_mfc_enc_qops = {
+ .queue_setup = s5p_mfc_queue_setup,
+ .wait_prepare = s5p_mfc_unlock,
+ .wait_finish = s5p_mfc_lock,
+ .buf_init = s5p_mfc_buf_init,
+ .buf_prepare = s5p_mfc_buf_prepare,
+ .start_streaming = s5p_mfc_start_streaming,
+ .stop_streaming = s5p_mfc_stop_streaming,
+ .buf_queue = s5p_mfc_buf_queue,
+};
+
+struct s5p_mfc_codec_ops *get_enc_codec_ops(void)
+{
+ return &encoder_codec_ops;
+}
+
+struct vb2_ops *get_enc_queue_ops(void)
+{
+ return &s5p_mfc_enc_qops;
+}
+
+const struct v4l2_ioctl_ops *get_enc_v4l2_ioctl_ops(void)
+{
+ return &s5p_mfc_enc_ioctl_ops;
+}
+
+#define IS_MFC51_PRIV(x) ((V4L2_CTRL_ID2CLASS(x) == V4L2_CTRL_CLASS_MPEG) \
+ && V4L2_CTRL_DRIVER_PRIV(x))
+
+int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx)
+{
+ struct v4l2_ctrl_config cfg;
+ int i;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, NUM_CTRLS);
+ if (ctx->ctrl_handler.error) {
+ mfc_err("v4l2_ctrl_handler_init failed\n");
+ return ctx->ctrl_handler.error;
+ }
+ for (i = 0; i < NUM_CTRLS; i++) {
+ if (IS_MFC51_PRIV(controls[i].id)) {
+ cfg.ops = &s5p_mfc_enc_ctrl_ops;
+ cfg.id = controls[i].id;
+ cfg.min = controls[i].minimum;
+ cfg.max = controls[i].maximum;
+ cfg.def = controls[i].default_value;
+ cfg.name = controls[i].name;
+ cfg.type = controls[i].type;
+ cfg.flags = 0;
+
+ if (cfg.type == V4L2_CTRL_TYPE_MENU) {
+ cfg.step = 0;
+ cfg.menu_skip_mask = cfg.menu_skip_mask;
+ cfg.qmenu = mfc51_get_menu(cfg.id);
+ } else {
+ cfg.step = controls[i].step;
+ cfg.menu_skip_mask = 0;
+ }
+ ctx->ctrls[i] = v4l2_ctrl_new_custom(&ctx->ctrl_handler,
+ &cfg, NULL);
+ } else {
+ if (controls[i].type == V4L2_CTRL_TYPE_MENU) {
+ ctx->ctrls[i] = v4l2_ctrl_new_std_menu(
+ &ctx->ctrl_handler,
+ &s5p_mfc_enc_ctrl_ops, controls[i].id,
+ controls[i].maximum, 0,
+ controls[i].default_value);
+ } else {
+ ctx->ctrls[i] = v4l2_ctrl_new_std(
+ &ctx->ctrl_handler,
+ &s5p_mfc_enc_ctrl_ops, controls[i].id,
+ controls[i].minimum,
+ controls[i].maximum, controls[i].step,
+ controls[i].default_value);
+ }
+ }
+ if (ctx->ctrl_handler.error) {
+ mfc_err("Adding control (%d) failed\n", i);
+ return ctx->ctrl_handler.error;
+ }
+ if (controls[i].is_volatile && ctx->ctrls[i])
+ ctx->ctrls[i]->is_volatile = 1;
+ }
+ return 0;
+}
+
+void s5p_mfc_enc_ctrls_delete(struct s5p_mfc_ctx *ctx)
+{
+ int i;
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ for (i = 0; i < NUM_CTRLS; i++)
+ ctx->ctrls[i] = NULL;
+}
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.h b/drivers/media/video/s5p-mfc/s5p_mfc_enc.h
new file mode 100644
index 0000000..405bdd3
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.h
@@ -0,0 +1,23 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_enc.h
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_ENC_H_
+#define S5P_MFC_ENC_H_
+
+struct s5p_mfc_codec_ops *get_enc_codec_ops(void);
+struct vb2_ops *get_enc_queue_ops(void);
+const struct v4l2_ioctl_ops *get_enc_v4l2_ioctl_ops(void);
+struct s5p_mfc_fmt *get_enc_def_fmt(bool src);
+int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_enc_ctrls_delete(struct s5p_mfc_ctx *ctx);
+
+#endif /* S5P_MFC_ENC_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_intr.c b/drivers/media/video/s5p-mfc/s5p_mfc_intr.c
new file mode 100644
index 0000000..8f2f8bf
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_intr.c
@@ -0,0 +1,92 @@
+/*
+ * drivers/media/video/samsung/mfc5/s5p_mfc_intr.c
+ *
+ * C file for Samsung MFC (Multi Function Codec - FIMV) driver
+ * This file contains functions used to wait for command completion.
+ *
+ * Kamil Debski, Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include "regs-mfc.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_intr.h"
+
+int s5p_mfc_wait_for_done_dev(struct s5p_mfc_dev *dev, int command)
+{
+ int ret;
+
+ ret = wait_event_interruptible_timeout(dev->queue,
+ (dev->int_cond && (dev->int_type == command
+ || dev->int_type == S5P_FIMV_R2H_CMD_ERR_RET)),
+ msecs_to_jiffies(MFC_INT_TIMEOUT));
+ if (ret == 0) {
+ mfc_err("Interrupt (dev->int_type:%d, command:%d) timed out\n",
+ dev->int_type, command);
+ return 1;
+ } else if (ret == -ERESTARTSYS) {
+ mfc_err("Interrupted by a signal\n");
+ return 1;
+ }
+ mfc_debug(1, "Finished waiting (dev->int_type:%d, command: %d)\n",
+ dev->int_type, command);
+ if (dev->int_type == S5P_FIMV_R2H_CMD_ERR_RET)
+ return 1;
+ return 0;
+}
+
+void s5p_mfc_clean_dev_int_flags(struct s5p_mfc_dev *dev)
+{
+ dev->int_cond = 0;
+ dev->int_type = 0;
+ dev->int_err = 0;
+}
+
+int s5p_mfc_wait_for_done_ctx(struct s5p_mfc_ctx *ctx,
+ int command, int interrupt)
+{
+ int ret;
+
+ if (interrupt) {
+ ret = wait_event_interruptible_timeout(ctx->queue,
+ (ctx->int_cond && (ctx->int_type == command
+ || ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET)),
+ msecs_to_jiffies(MFC_INT_TIMEOUT));
+ } else {
+ ret = wait_event_timeout(ctx->queue,
+ (ctx->int_cond && (ctx->int_type == command
+ || ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET)),
+ msecs_to_jiffies(MFC_INT_TIMEOUT));
+ }
+ if (ret == 0) {
+ mfc_err("Interrupt (ctx->int_type:%d, command:%d) timed out\n",
+ ctx->int_type, command);
+ return 1;
+ } else if (ret == -ERESTARTSYS) {
+ mfc_err("Interrupted by a signal\n");
+ return 1;
+ }
+ mfc_debug(1, "Finished waiting (ctx->int_type:%d, command: %d)\n",
+ ctx->int_type, command);
+ if (ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET)
+ return 1;
+ return 0;
+}
+
+void s5p_mfc_clean_ctx_int_flags(struct s5p_mfc_ctx *ctx)
+{
+ ctx->int_cond = 0;
+ ctx->int_type = 0;
+ ctx->int_err = 0;
+}
+
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_intr.h b/drivers/media/video/s5p-mfc/s5p_mfc_intr.h
new file mode 100644
index 0000000..122d773
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_intr.h
@@ -0,0 +1,26 @@
+/*
+ * drivers/media/video/samsung/mfc5/s5p_mfc_intr.h
+ *
+ * Header file for Samsung MFC (Multi Function Codec - FIMV) driver
+ * It contains waiting functions declarations.
+ *
+ * Kamil Debski, Copyright (C) 2011 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef S5P_MFC_INTR_H_
+#define S5P_MFC_INTR_H_
+
+#include "s5p_mfc_common.h"
+
+int s5p_mfc_wait_for_done_ctx(struct s5p_mfc_ctx *ctx,
+ int command, int interrupt);
+int s5p_mfc_wait_for_done_dev(struct s5p_mfc_dev *dev, int command);
+void s5p_mfc_clean_ctx_int_flags(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_clean_dev_int_flags(struct s5p_mfc_dev *dev);
+
+#endif /* S5P_MFC_INTR_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_opr.c b/drivers/media/video/s5p-mfc/s5p_mfc_opr.c
new file mode 100644
index 0000000..7b23916
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_opr.c
@@ -0,0 +1,1397 @@
+/*
+ * drivers/media/video/samsung/mfc5/s5p_mfc_opr.c
+ *
+ * Samsung MFC (Multi Function Codec - FIMV) driver
+ * This file contains hw related functions.
+ *
+ * Kamil Debski, Copyright (c) 2011 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "regs-mfc.h"
+#include "s5p_mfc_cmd.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_ctrl.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_opr.h"
+#include "s5p_mfc_pm.h"
+#include "s5p_mfc_shm.h"
+#include <asm/cacheflush.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#define OFFSETA(x) (((x) - dev->bank1) >> MFC_OFFSET_SHIFT)
+#define OFFSETB(x) (((x) - dev->bank2) >> MFC_OFFSET_SHIFT)
+
+/* Allocate temporary buffers for decoding */
+int s5p_mfc_alloc_dec_temp_buffers(struct s5p_mfc_ctx *ctx)
+{
+ void *desc_virt;
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ ctx->desc_buf = vb2_dma_contig_memops.alloc(
+ dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], DESC_BUF_SIZE);
+ if (IS_ERR_VALUE((int)ctx->desc_buf)) {
+ ctx->desc_buf = 0;
+ mfc_err("Allocating DESC buffer failed\n");
+ return -ENOMEM;
+ }
+ ctx->desc_phys = s5p_mfc_mem_cookie(
+ dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->desc_buf);
+ BUG_ON(ctx->desc_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
+ desc_virt = vb2_dma_contig_memops.vaddr(ctx->desc_buf);
+ if (desc_virt == NULL) {
+ vb2_dma_contig_memops.put(ctx->desc_buf);
+ ctx->desc_phys = 0;
+ ctx->desc_buf = 0;
+ mfc_err("Remapping DESC buffer failed\n");
+ return -ENOMEM;
+ }
+ memset(desc_virt, 0, DESC_BUF_SIZE);
+ wmb();
+ return 0;
+}
+
+/* Release temporary buffers for decoding */
+void s5p_mfc_release_dec_desc_buffer(struct s5p_mfc_ctx *ctx)
+{
+ if (ctx->desc_phys) {
+ vb2_dma_contig_memops.put(ctx->desc_buf);
+ ctx->desc_phys = 0;
+ ctx->desc_buf = 0;
+ }
+}
+
+/* Allocate codec buffers */
+int s5p_mfc_alloc_codec_buffers(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned int enc_ref_y_size = 0;
+ unsigned int enc_ref_c_size = 0;
+ unsigned int guard_width, guard_height;
+
+ if (ctx->type == MFCINST_DECODER) {
+ mfc_debug(2, "Luma size:%d Chroma size:%d MV size:%d\n",
+ ctx->luma_size, ctx->chroma_size, ctx->mv_size);
+ mfc_debug(2, "Totals bufs: %d\n", ctx->total_dpb_count);
+ } else if (ctx->type == MFCINST_ENCODER) {
+ enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
+ * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
+ enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN);
+
+ if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC) {
+ enc_ref_c_size = ALIGN(ctx->img_width,
+ S5P_FIMV_NV12MT_HALIGN)
+ * ALIGN(ctx->img_height >> 1,
+ S5P_FIMV_NV12MT_VALIGN);
+ enc_ref_c_size = ALIGN(enc_ref_c_size,
+ S5P_FIMV_NV12MT_SALIGN);
+ } else {
+ guard_width = ALIGN(ctx->img_width + 16,
+ S5P_FIMV_NV12MT_HALIGN);
+ guard_height = ALIGN((ctx->img_height >> 1) + 4,
+ S5P_FIMV_NV12MT_VALIGN);
+ enc_ref_c_size = ALIGN(guard_width * guard_height,
+ S5P_FIMV_NV12MT_SALIGN);
+ }
+ mfc_debug(2, "recon luma size: %d chroma size: %d\n",
+ enc_ref_y_size, enc_ref_c_size);
+ } else {
+ return -EINVAL;
+ }
+ /* Codecs have different memory requirements */
+ switch (ctx->codec_mode) {
+ case S5P_FIMV_CODEC_H264_DEC:
+ ctx->bank1_size =
+ ALIGN(S5P_FIMV_DEC_NB_IP_SIZE +
+ S5P_FIMV_DEC_VERT_NB_MV_SIZE,
+ S5P_FIMV_DEC_BUF_ALIGN);
+ ctx->bank2_size = ctx->total_dpb_count * ctx->mv_size;
+ break;
+ case S5P_FIMV_CODEC_MPEG4_DEC:
+ ctx->bank1_size =
+ ALIGN(S5P_FIMV_DEC_NB_DCAC_SIZE +
+ S5P_FIMV_DEC_UPNB_MV_SIZE +
+ S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
+ S5P_FIMV_DEC_STX_PARSER_SIZE +
+ S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE,
+ S5P_FIMV_DEC_BUF_ALIGN);
+ ctx->bank2_size = 0;
+ break;
+ case S5P_FIMV_CODEC_VC1RCV_DEC:
+ case S5P_FIMV_CODEC_VC1_DEC:
+ ctx->bank1_size =
+ ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE +
+ S5P_FIMV_DEC_UPNB_MV_SIZE +
+ S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
+ S5P_FIMV_DEC_NB_DCAC_SIZE +
+ 3 * S5P_FIMV_DEC_VC1_BITPLANE_SIZE,
+ S5P_FIMV_DEC_BUF_ALIGN);
+ ctx->bank2_size = 0;
+ break;
+ case S5P_FIMV_CODEC_MPEG2_DEC:
+ ctx->bank1_size = 0;
+ ctx->bank2_size = 0;
+ break;
+ case S5P_FIMV_CODEC_H263_DEC:
+ ctx->bank1_size =
+ ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE +
+ S5P_FIMV_DEC_UPNB_MV_SIZE +
+ S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
+ S5P_FIMV_DEC_NB_DCAC_SIZE,
+ S5P_FIMV_DEC_BUF_ALIGN);
+ ctx->bank2_size = 0;
+ break;
+ case S5P_FIMV_CODEC_H264_ENC:
+ ctx->bank1_size = (enc_ref_y_size * 2) +
+ S5P_FIMV_ENC_UPMV_SIZE +
+ S5P_FIMV_ENC_COLFLG_SIZE +
+ S5P_FIMV_ENC_INTRAMD_SIZE +
+ S5P_FIMV_ENC_NBORINFO_SIZE;
+ ctx->bank2_size = (enc_ref_y_size * 2) +
+ (enc_ref_c_size * 4) +
+ S5P_FIMV_ENC_INTRAPRED_SIZE;
+ break;
+ case S5P_FIMV_CODEC_MPEG4_ENC:
+ ctx->bank1_size = (enc_ref_y_size * 2) +
+ S5P_FIMV_ENC_UPMV_SIZE +
+ S5P_FIMV_ENC_COLFLG_SIZE +
+ S5P_FIMV_ENC_ACDCCOEF_SIZE;
+ ctx->bank2_size = (enc_ref_y_size * 2) +
+ (enc_ref_c_size * 4);
+ break;
+ case S5P_FIMV_CODEC_H263_ENC:
+ ctx->bank1_size = (enc_ref_y_size * 2) +
+ S5P_FIMV_ENC_UPMV_SIZE +
+ S5P_FIMV_ENC_ACDCCOEF_SIZE;
+ ctx->bank2_size = (enc_ref_y_size * 2) +
+ (enc_ref_c_size * 4);
+ break;
+ default:
+ break;
+ }
+ /* Allocate only if memory from bank 1 is necessary */
+ if (ctx->bank1_size > 0) {
+ ctx->bank1_buf = vb2_dma_contig_memops.alloc(
+ dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_size);
+ if (IS_ERR(ctx->bank1_buf)) {
+ ctx->bank1_buf = 0;
+ printk(KERN_ERR
+ "Buf alloc for decoding failed (port A)\n");
+ return -ENOMEM;
+ }
+ ctx->bank1_phys = s5p_mfc_mem_cookie(
+ dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_buf);
+ BUG_ON(ctx->bank1_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
+ }
+ /* Allocate only if memory from bank 2 is necessary */
+ if (ctx->bank2_size > 0) {
+ ctx->bank2_buf = vb2_dma_contig_memops.alloc(
+ dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_size);
+ if (IS_ERR(ctx->bank2_buf)) {
+ ctx->bank2_buf = 0;
+ mfc_err("Buf alloc for decoding failed (port B)\n");
+ return -ENOMEM;
+ }
+ ctx->bank2_phys = s5p_mfc_mem_cookie(
+ dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_buf);
+ BUG_ON(ctx->bank2_phys & ((1 << MFC_BANK2_ALIGN_ORDER) - 1));
+ }
+ return 0;
+}
+
+/* Release buffers allocated for codec */
+void s5p_mfc_release_codec_buffers(struct s5p_mfc_ctx *ctx)
+{
+ if (ctx->bank1_buf) {
+ vb2_dma_contig_memops.put(ctx->bank1_buf);
+ ctx->bank1_buf = 0;
+ ctx->bank1_phys = 0;
+ ctx->bank1_size = 0;
+ }
+ if (ctx->bank2_buf) {
+ vb2_dma_contig_memops.put(ctx->bank2_buf);
+ ctx->bank2_buf = 0;
+ ctx->bank2_phys = 0;
+ ctx->bank2_size = 0;
+ }
+}
+
+/* Allocate memory for instance data buffer */
+int s5p_mfc_alloc_instance_buffer(struct s5p_mfc_ctx *ctx)
+{
+ void *context_virt;
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC ||
+ ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC)
+ ctx->ctx_size = MFC_H264_CTX_BUF_SIZE;
+ else
+ ctx->ctx_size = MFC_CTX_BUF_SIZE;
+ ctx->ctx_buf = vb2_dma_contig_memops.alloc(
+ dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx_size);
+ if (IS_ERR(ctx->ctx_buf)) {
+ mfc_err("Allocating context buffer failed\n");
+ ctx->ctx_phys = 0;
+ ctx->ctx_buf = 0;
+ return -ENOMEM;
+ }
+ ctx->ctx_phys = s5p_mfc_mem_cookie(
+ dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx_buf);
+ BUG_ON(ctx->ctx_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
+ ctx->ctx_ofs = OFFSETA(ctx->ctx_phys);
+ context_virt = vb2_dma_contig_memops.vaddr(ctx->ctx_buf);
+ if (context_virt == NULL) {
+ mfc_err("Remapping instance buffer failed\n");
+ vb2_dma_contig_memops.put(ctx->ctx_buf);
+ ctx->ctx_phys = 0;
+ ctx->ctx_buf = 0;
+ return -ENOMEM;
+ }
+ /* Zero content of the allocated memory */
+ memset(context_virt, 0, ctx->ctx_size);
+ wmb();
+ if (s5p_mfc_init_shm(ctx) < 0) {
+ vb2_dma_contig_memops.put(ctx->ctx_buf);
+ ctx->ctx_phys = 0;
+ ctx->ctx_buf = 0;
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* Release instance buffer */
+void s5p_mfc_release_instance_buffer(struct s5p_mfc_ctx *ctx)
+{
+ if (ctx->ctx_buf) {
+ vb2_dma_contig_memops.put(ctx->ctx_buf);
+ ctx->ctx_phys = 0;
+ ctx->ctx_buf = 0;
+ }
+ if (ctx->shm_alloc) {
+ vb2_dma_contig_memops.put(ctx->shm_alloc);
+ ctx->shm_alloc = 0;
+ ctx->shm = 0;
+ }
+}
+
+/* Set registers for decoding temporary buffers */
+void s5p_mfc_set_dec_desc_buffer(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mfc_write(dev, OFFSETA(ctx->desc_phys), S5P_FIMV_SI_CH0_DESC_ADR);
+ mfc_write(dev, DESC_BUF_SIZE, S5P_FIMV_SI_CH0_DESC_SIZE);
+}
+
+/* Set registers for shared buffer */
+void s5p_mfc_set_shared_buffer(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ mfc_write(dev, ctx->shm_ofs, S5P_FIMV_SI_CH0_HOST_WR_ADR);
+}
+
+/* Set registers for decoding stream buffer */
+int s5p_mfc_set_dec_stream_buffer(struct s5p_mfc_ctx *ctx, int buf_addr,
+ unsigned int start_num_byte, unsigned int buf_size)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mfc_write(dev, OFFSETA(buf_addr), S5P_FIMV_SI_CH0_SB_ST_ADR);
+ mfc_write(dev, ctx->dec_src_buf_size, S5P_FIMV_SI_CH0_CPB_SIZE);
+ mfc_write(dev, buf_size, S5P_FIMV_SI_CH0_SB_FRM_SIZE);
+ s5p_mfc_write_shm(ctx, start_num_byte, START_BYTE_NUM);
+ return 0;
+}
+
+/* Set decoding frame buffer */
+int s5p_mfc_set_dec_frame_buffer(struct s5p_mfc_ctx *ctx)
+{
+ unsigned int frame_size, i;
+ unsigned int frame_size_ch, frame_size_mv;
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned int dpb;
+ size_t buf_addr1, buf_addr2;
+ int buf_size1, buf_size2;
+
+ buf_addr1 = ctx->bank1_phys;
+ buf_size1 = ctx->bank1_size;
+ buf_addr2 = ctx->bank2_phys;
+ buf_size2 = ctx->bank2_size;
+ dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) &
+ ~S5P_FIMV_DPB_COUNT_MASK;
+ mfc_write(dev, ctx->total_dpb_count | dpb,
+ S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
+ s5p_mfc_set_shared_buffer(ctx);
+ switch (ctx->codec_mode) {
+ case S5P_FIMV_CODEC_H264_DEC:
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_H264_VERT_NB_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_VERT_NB_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_VERT_NB_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_NB_IP_ADR);
+ buf_addr1 += S5P_FIMV_DEC_NB_IP_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_NB_IP_SIZE;
+ break;
+ case S5P_FIMV_CODEC_MPEG4_DEC:
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_NB_DCAC_ADR);
+ buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_NB_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SA_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SP_ADR);
+ buf_addr1 += S5P_FIMV_DEC_STX_PARSER_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_STX_PARSER_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_OT_LINE_ADR);
+ buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+ break;
+ case S5P_FIMV_CODEC_H263_DEC:
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_OT_LINE_ADR);
+ buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_NB_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_SA_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_NB_DCAC_ADR);
+ buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
+ break;
+ case S5P_FIMV_CODEC_VC1_DEC:
+ case S5P_FIMV_CODEC_VC1RCV_DEC:
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_NB_DCAC_ADR);
+ buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_OT_LINE_ADR);
+ buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_UP_NB_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_SA_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE3_ADR);
+ buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE2_ADR);
+ buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE1_ADR);
+ buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+ break;
+ case S5P_FIMV_CODEC_MPEG2_DEC:
+ break;
+ default:
+ mfc_err("Unknown codec for decoding (%x)\n",
+ ctx->codec_mode);
+ return -EINVAL;
+ break;
+ }
+ frame_size = ctx->luma_size;
+ frame_size_ch = ctx->chroma_size;
+ frame_size_mv = ctx->mv_size;
+ mfc_debug(2, "Frm size: %d ch: %d mv: %d\n", frame_size, frame_size_ch,
+ frame_size_mv);
+ for (i = 0; i < ctx->total_dpb_count; i++) {
+ /* Bank2 */
+ mfc_debug(2, "Luma %d: %x\n", i,
+ ctx->dst_bufs[i].cookie.raw.luma);
+ mfc_write(dev, OFFSETB(ctx->dst_bufs[i].cookie.raw.luma),
+ S5P_FIMV_DEC_LUMA_ADR + i * 4);
+ mfc_debug(2, "\tChroma %d: %x\n", i,
+ ctx->dst_bufs[i].cookie.raw.chroma);
+ mfc_write(dev, OFFSETA(ctx->dst_bufs[i].cookie.raw.chroma),
+ S5P_FIMV_DEC_CHROMA_ADR + i * 4);
+ if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC) {
+ mfc_debug(2, "\tBuf2: %x, size: %d\n",
+ buf_addr2, buf_size2);
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_H264_MV_ADR + i * 4);
+ buf_addr2 += frame_size_mv;
+ buf_size2 -= frame_size_mv;
+ }
+ }
+ mfc_debug(2, "Buf1: %u, buf_size1: %d\n", buf_addr1, buf_size1);
+ mfc_debug(2, "Buf 1/2 size after: %d/%d (frames %d)\n",
+ buf_size1, buf_size2, ctx->total_dpb_count);
+ if (buf_size1 < 0 || buf_size2 < 0) {
+ mfc_debug(2, "Not enough memory has been allocated\n");
+ return -ENOMEM;
+ }
+ s5p_mfc_write_shm(ctx, frame_size, ALLOC_LUMA_DPB_SIZE);
+ s5p_mfc_write_shm(ctx, frame_size_ch, ALLOC_CHROMA_DPB_SIZE);
+ if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC)
+ s5p_mfc_write_shm(ctx, frame_size_mv, ALLOC_MV_SIZE);
+ mfc_write(dev, ((S5P_FIMV_CH_INIT_BUFS & S5P_FIMV_CH_MASK)
+ << S5P_FIMV_CH_SHIFT) | (ctx->inst_no),
+ S5P_FIMV_SI_CH0_INST_ID);
+ return 0;
+}
+
+/* Set registers for encoding stream buffer */
+int s5p_mfc_set_enc_stream_buffer(struct s5p_mfc_ctx *ctx,
+ unsigned long addr, unsigned int size)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mfc_write(dev, OFFSETA(addr), S5P_FIMV_ENC_SI_CH0_SB_ADR);
+ mfc_write(dev, size, S5P_FIMV_ENC_SI_CH0_SB_SIZE);
+ return 0;
+}
+
+void s5p_mfc_set_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
+ unsigned long y_addr, unsigned long c_addr)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mfc_write(dev, OFFSETB(y_addr), S5P_FIMV_ENC_SI_CH0_CUR_Y_ADR);
+ mfc_write(dev, OFFSETB(c_addr), S5P_FIMV_ENC_SI_CH0_CUR_C_ADR);
+}
+
+void s5p_mfc_get_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
+ unsigned long *y_addr, unsigned long *c_addr)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ *y_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_Y_ADDR)
+ << MFC_OFFSET_SHIFT);
+ *c_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_C_ADDR)
+ << MFC_OFFSET_SHIFT);
+}
+
+/* Set encoding ref & codec buffer */
+int s5p_mfc_set_enc_ref_buffer(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ size_t buf_addr1, buf_addr2;
+ size_t buf_size1, buf_size2;
+ unsigned int enc_ref_y_size, enc_ref_c_size;
+ unsigned int guard_width, guard_height;
+ int i;
+
+ buf_addr1 = ctx->bank1_phys;
+ buf_size1 = ctx->bank1_size;
+ buf_addr2 = ctx->bank2_phys;
+ buf_size2 = ctx->bank2_size;
+ enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
+ * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
+ enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN);
+ if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC) {
+ enc_ref_c_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
+ * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12MT_VALIGN);
+ enc_ref_c_size = ALIGN(enc_ref_c_size, S5P_FIMV_NV12MT_SALIGN);
+ } else {
+ guard_width = ALIGN(ctx->img_width + 16,
+ S5P_FIMV_NV12MT_HALIGN);
+ guard_height = ALIGN((ctx->img_height >> 1) + 4,
+ S5P_FIMV_NV12MT_VALIGN);
+ enc_ref_c_size = ALIGN(guard_width * guard_height,
+ S5P_FIMV_NV12MT_SALIGN);
+ }
+ mfc_debug(2, "buf_size1: %d, buf_size2: %d\n", buf_size1, buf_size2);
+ switch (ctx->codec_mode) {
+ case S5P_FIMV_CODEC_H264_ENC:
+ for (i = 0; i < 2; i++) {
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
+ buf_addr1 += enc_ref_y_size;
+ buf_size1 -= enc_ref_y_size;
+
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
+ buf_addr2 += enc_ref_y_size;
+ buf_size2 -= enc_ref_y_size;
+ }
+ for (i = 0; i < 4; i++) {
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
+ buf_addr2 += enc_ref_c_size;
+ buf_size2 -= enc_ref_c_size;
+ }
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_UP_MV_ADR);
+ buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_H264_COZERO_FLAG_ADR);
+ buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_H264_UP_INTRA_MD_ADR);
+ buf_addr1 += S5P_FIMV_ENC_INTRAMD_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_INTRAMD_SIZE;
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_H264_UP_INTRA_PRED_ADR);
+ buf_addr2 += S5P_FIMV_ENC_INTRAPRED_SIZE;
+ buf_size2 -= S5P_FIMV_ENC_INTRAPRED_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_H264_NBOR_INFO_ADR);
+ buf_addr1 += S5P_FIMV_ENC_NBORINFO_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_NBORINFO_SIZE;
+ mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
+ buf_size1, buf_size2);
+ break;
+ case S5P_FIMV_CODEC_MPEG4_ENC:
+ for (i = 0; i < 2; i++) {
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
+ buf_addr1 += enc_ref_y_size;
+ buf_size1 -= enc_ref_y_size;
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
+ buf_addr2 += enc_ref_y_size;
+ buf_size2 -= enc_ref_y_size;
+ }
+ for (i = 0; i < 4; i++) {
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
+ buf_addr2 += enc_ref_c_size;
+ buf_size2 -= enc_ref_c_size;
+ }
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_MV_ADR);
+ buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_MPEG4_COZERO_FLAG_ADR);
+ buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_MPEG4_ACDC_COEF_ADR);
+ buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE;
+ mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
+ buf_size1, buf_size2);
+ break;
+ case S5P_FIMV_CODEC_H263_ENC:
+ for (i = 0; i < 2; i++) {
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
+ buf_addr1 += enc_ref_y_size;
+ buf_size1 -= enc_ref_y_size;
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
+ buf_addr2 += enc_ref_y_size;
+ buf_size2 -= enc_ref_y_size;
+ }
+ for (i = 0; i < 4; i++) {
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
+ buf_addr2 += enc_ref_c_size;
+ buf_size2 -= enc_ref_c_size;
+ }
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_MV_ADR);
+ buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_ACDC_COEF_ADR);
+ buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE;
+ mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
+ buf_size1, buf_size2);
+ break;
+ default:
+ mfc_err("Unknown codec set for encoding: %d\n",
+ ctx->codec_mode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ unsigned int reg;
+ unsigned int shm;
+
+ /* width */
+ mfc_write(dev, ctx->img_width, S5P_FIMV_ENC_HSIZE_PX);
+ /* height */
+ mfc_write(dev, ctx->img_height, S5P_FIMV_ENC_VSIZE_PX);
+ /* pictype : enable, IDR period */
+ reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+ reg |= (1 << 18);
+ reg &= ~(0xFFFF);
+ reg |= p->gop_size;
+ mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+ mfc_write(dev, 0, S5P_FIMV_ENC_B_RECON_WRITE_ON);
+ /* multi-slice control */
+ /* multi-slice MB number or bit size */
+ mfc_write(dev, p->slice_mode, S5P_FIMV_ENC_MSLICE_CTRL);
+ if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) {
+ mfc_write(dev, p->slice_mb, S5P_FIMV_ENC_MSLICE_MB);
+ } else if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) {
+ mfc_write(dev, p->slice_bit, S5P_FIMV_ENC_MSLICE_BIT);
+ } else {
+ mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_MB);
+ mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_BIT);
+ }
+ /* cyclic intra refresh */
+ mfc_write(dev, p->intra_refresh_mb, S5P_FIMV_ENC_CIR_CTRL);
+ /* memory structure cur. frame */
+ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M)
+ mfc_write(dev, 0, S5P_FIMV_ENC_MAP_FOR_CUR);
+ else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT)
+ mfc_write(dev, 3, S5P_FIMV_ENC_MAP_FOR_CUR);
+ /* padding control & value */
+ reg = mfc_read(dev, S5P_FIMV_ENC_PADDING_CTRL);
+ if (p->pad) {
+ /** enable */
+ reg |= (1 << 31);
+ /** cr value */
+ reg &= ~(0xFF << 16);
+ reg |= (p->pad_cr << 16);
+ /** cb value */
+ reg &= ~(0xFF << 8);
+ reg |= (p->pad_cb << 8);
+ /** y value */
+ reg &= ~(0xFF);
+ reg |= (p->pad_luma);
+ } else {
+ /** disable & all value clear */
+ reg = 0;
+ }
+ mfc_write(dev, reg, S5P_FIMV_ENC_PADDING_CTRL);
+ /* rate control config. */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
+ /** frame-level rate control */
+ reg &= ~(0x1 << 9);
+ reg |= (p->rc_frame << 9);
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
+ /* bit rate */
+ if (p->rc_frame)
+ mfc_write(dev, p->rc_bitrate,
+ S5P_FIMV_ENC_RC_BIT_RATE);
+ else
+ mfc_write(dev, 0, S5P_FIMV_ENC_RC_BIT_RATE);
+ /* reaction coefficient */
+ if (p->rc_frame)
+ mfc_write(dev, p->rc_reaction_coeff, S5P_FIMV_ENC_RC_RPARA);
+ shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
+ /* seq header ctrl */
+ shm &= ~(0x1 << 3);
+ shm |= (p->seq_hdr_mode << 3);
+ /* frame skip mode */
+ shm &= ~(0x3 << 1);
+ shm |= (p->frame_skip_mode << 1);
+ s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
+ /* fixed target bit */
+ s5p_mfc_write_shm(ctx, p->fixed_target_bit, RC_CONTROL_CONFIG);
+ return 0;
+}
+
+static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ struct s5p_mfc_h264_enc_params *p_264 = &p->codec.h264;
+ unsigned int reg;
+ unsigned int shm;
+
+ s5p_mfc_set_enc_params(ctx);
+ /* pictype : number of B */
+ reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+ /* num_b_frame - 0 ~ 2 */
+ reg &= ~(0x3 << 16);
+ reg |= (p->num_b_frame << 16);
+ mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+ /* profile & level */
+ reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE);
+ /* level */
+ reg &= ~(0xFF << 8);
+ reg |= (p_264->level << 8);
+ /* profile - 0 ~ 2 */
+ reg &= ~(0x3F);
+ reg |= p_264->profile;
+ mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE);
+ /* interlace */
+ mfc_write(dev, p->interlace, S5P_FIMV_ENC_PIC_STRUCT);
+ /* height */
+ if (p->interlace)
+ mfc_write(dev, ctx->img_height >> 1, S5P_FIMV_ENC_VSIZE_PX);
+ /* loopfilter ctrl */
+ mfc_write(dev, p_264->loop_filter_mode, S5P_FIMV_ENC_LF_CTRL);
+ /* loopfilter alpha offset */
+ if (p_264->loop_filter_alpha < 0) {
+ reg = 0x10;
+ reg |= (0xFF - p_264->loop_filter_alpha) + 1;
+ } else {
+ reg = 0x00;
+ reg |= (p_264->loop_filter_alpha & 0xF);
+ }
+ mfc_write(dev, reg, S5P_FIMV_ENC_ALPHA_OFF);
+ /* loopfilter beta offset */
+ if (p_264->loop_filter_beta < 0) {
+ reg = 0x10;
+ reg |= (0xFF - p_264->loop_filter_beta) + 1;
+ } else {
+ reg = 0x00;
+ reg |= (p_264->loop_filter_beta & 0xF);
+ }
+ mfc_write(dev, reg, S5P_FIMV_ENC_BETA_OFF);
+ /* entropy coding mode */
+ if (p_264->entropy_mode == V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC)
+ mfc_write(dev, 1, S5P_FIMV_ENC_H264_ENTROPY_MODE);
+ else
+ mfc_write(dev, 0, S5P_FIMV_ENC_H264_ENTROPY_MODE);
+ /* number of ref. picture */
+ reg = mfc_read(dev, S5P_FIMV_ENC_H264_NUM_OF_REF);
+ /* num of ref. pictures of P */
+ reg &= ~(0x3 << 5);
+ reg |= (p_264->num_ref_pic_4p << 5);
+ /* max number of ref. pictures */
+ reg &= ~(0x1F);
+ reg |= p_264->max_ref_pic;
+ mfc_write(dev, reg, S5P_FIMV_ENC_H264_NUM_OF_REF);
+ /* 8x8 transform enable */
+ mfc_write(dev, p_264->_8x8_transform, S5P_FIMV_ENC_H264_TRANS_FLAG);
+ /* rate control config. */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
+ /* macroblock level rate control */
+ reg &= ~(0x1 << 8);
+ reg |= (p_264->rc_mb << 8);
+ /* frame QP */
+ reg &= ~(0x3F);
+ reg |= p_264->rc_frame_qp;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
+ /* frame rate */
+ if (p->rc_frame && p->rc_framerate_denom)
+ mfc_write(dev, p->rc_framerate_num * 1000
+ / p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE);
+ else
+ mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
+ /* max & min value of QP */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
+ /* max QP */
+ reg &= ~(0x3F << 8);
+ reg |= (p_264->rc_max_qp << 8);
+ /* min QP */
+ reg &= ~(0x3F);
+ reg |= p_264->rc_min_qp;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
+ /* macroblock adaptive scaling features */
+ if (p_264->rc_mb) {
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_MB_CTRL);
+ /* dark region */
+ reg &= ~(0x1 << 3);
+ reg |= (p_264->rc_mb_dark << 3);
+ /* smooth region */
+ reg &= ~(0x1 << 2);
+ reg |= (p_264->rc_mb_smooth << 2);
+ /* static region */
+ reg &= ~(0x1 << 1);
+ reg |= (p_264->rc_mb_static << 1);
+ /* high activity region */
+ reg &= ~(0x1);
+ reg |= p_264->rc_mb_activity;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_MB_CTRL);
+ }
+ if (!p->rc_frame &&
+ !p_264->rc_mb) {
+ shm = s5p_mfc_read_shm(ctx, P_B_FRAME_QP);
+ shm &= ~(0xFFF);
+ shm |= ((p_264->rc_b_frame_qp & 0x3F) << 6);
+ shm |= (p_264->rc_p_frame_qp & 0x3F);
+ s5p_mfc_write_shm(ctx, shm, P_B_FRAME_QP);
+ }
+ /* extended encoder ctrl */
+ shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
+ /* AR VUI control */
+ shm &= ~(0x1 << 15);
+ shm |= (p_264->vui_sar << 1);
+ s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
+ if (p_264->vui_sar) {
+ /* aspect ration IDC */
+ shm = s5p_mfc_read_shm(ctx, SAMPLE_ASPECT_RATIO_IDC);
+ shm &= ~(0xFF);
+ shm |= p_264->vui_sar_idc;
+ s5p_mfc_write_shm(ctx, shm, SAMPLE_ASPECT_RATIO_IDC);
+ if (p_264->vui_sar_idc == 0xFF) {
+ /* sample AR info */
+ shm = s5p_mfc_read_shm(ctx, EXTENDED_SAR);
+ shm &= ~(0xFFFFFFFF);
+ shm |= p_264->vui_ext_sar_width << 16;
+ shm |= p_264->vui_ext_sar_height;
+ s5p_mfc_write_shm(ctx, shm, EXTENDED_SAR);
+ }
+ }
+ /* intra picture period for H.264 */
+ shm = s5p_mfc_read_shm(ctx, H264_I_PERIOD);
+ /* control */
+ shm &= ~(0x1 << 16);
+ shm |= (p_264->open_gop << 16);
+ /* value */
+ if (p_264->open_gop) {
+ shm &= ~(0xFFFF);
+ shm |= p_264->open_gop_size;
+ }
+ s5p_mfc_write_shm(ctx, shm, H264_I_PERIOD);
+ /* extended encoder ctrl */
+ shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
+ /* vbv buffer size */
+ if (p->frame_skip_mode ==
+ V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
+ shm &= ~(0xFFFF << 16);
+ shm |= (p_264->cpb_size << 16);
+ }
+ s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
+ return 0;
+}
+
+static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ struct s5p_mfc_mpeg4_enc_params *p_mpeg4 = &p->codec.mpeg4;
+ unsigned int reg;
+ unsigned int shm;
+ unsigned int framerate;
+
+ s5p_mfc_set_enc_params(ctx);
+ /* pictype : number of B */
+ reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+ /* num_b_frame - 0 ~ 2 */
+ reg &= ~(0x3 << 16);
+ reg |= (p->num_b_frame << 16);
+ mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+ /* profile & level */
+ reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE);
+ /* level */
+ reg &= ~(0xFF << 8);
+ reg |= (p_mpeg4->level << 8);
+ /* profile - 0 ~ 2 */
+ reg &= ~(0x3F);
+ reg |= p_mpeg4->profile;
+ mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE);
+ /* quarter_pixel */
+ mfc_write(dev, p_mpeg4->quarter_pixel, S5P_FIMV_ENC_MPEG4_QUART_PXL);
+ /* qp */
+ if (!p->rc_frame) {
+ shm = s5p_mfc_read_shm(ctx, P_B_FRAME_QP);
+ shm &= ~(0xFFF);
+ shm |= ((p_mpeg4->rc_b_frame_qp & 0x3F) << 6);
+ shm |= (p_mpeg4->rc_p_frame_qp & 0x3F);
+ s5p_mfc_write_shm(ctx, shm, P_B_FRAME_QP);
+ }
+ /* frame rate */
+ if (p->rc_frame) {
+ if (p->rc_framerate_denom > 0) {
+ framerate = p->rc_framerate_num * 1000 /
+ p->rc_framerate_denom;
+ mfc_write(dev, framerate,
+ S5P_FIMV_ENC_RC_FRAME_RATE);
+ shm = s5p_mfc_read_shm(ctx, RC_VOP_TIMING);
+ shm &= ~(0xFFFFFFFF);
+ shm |= (1 << 31);
+ shm |= ((p->rc_framerate_num & 0x7FFF) << 16);
+ shm |= (p->rc_framerate_denom & 0xFFFF);
+ s5p_mfc_write_shm(ctx, shm, RC_VOP_TIMING);
+ }
+ } else {
+ mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
+ }
+ /* rate control config. */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
+ /* frame QP */
+ reg &= ~(0x3F);
+ reg |= p_mpeg4->rc_frame_qp;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
+ /* max & min value of QP */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
+ /* max QP */
+ reg &= ~(0x3F << 8);
+ reg |= (p_mpeg4->rc_max_qp << 8);
+ /* min QP */
+ reg &= ~(0x3F);
+ reg |= p_mpeg4->rc_min_qp;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
+ /* extended encoder ctrl */
+ shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
+ /* vbv buffer size */
+ if (p->frame_skip_mode ==
+ V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
+ shm &= ~(0xFFFF << 16);
+ shm |= (p->vbv_size << 16);
+ }
+ s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
+ return 0;
+}
+
+static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ struct s5p_mfc_mpeg4_enc_params *p_h263 = &p->codec.mpeg4;
+ unsigned int reg;
+ unsigned int shm;
+
+ s5p_mfc_set_enc_params(ctx);
+ /* qp */
+ if (!p->rc_frame) {
+ shm = s5p_mfc_read_shm(ctx, P_B_FRAME_QP);
+ shm &= ~(0xFFF);
+ shm |= (p_h263->rc_p_frame_qp & 0x3F);
+ s5p_mfc_write_shm(ctx, shm, P_B_FRAME_QP);
+ }
+ /* frame rate */
+ if (p->rc_frame && p->rc_framerate_denom)
+ mfc_write(dev, p->rc_framerate_num * 1000
+ / p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE);
+ else
+ mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
+ /* rate control config. */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
+ /* frame QP */
+ reg &= ~(0x3F);
+ reg |= p_h263->rc_frame_qp;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
+ /* max & min value of QP */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
+ /* max QP */
+ reg &= ~(0x3F << 8);
+ reg |= (p_h263->rc_max_qp << 8);
+ /* min QP */
+ reg &= ~(0x3F);
+ reg |= p_h263->rc_min_qp;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
+ /* extended encoder ctrl */
+ shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
+ /* vbv buffer size */
+ if (p->frame_skip_mode ==
+ V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
+ shm &= ~(0xFFFF << 16);
+ shm |= (p->vbv_size << 16);
+ }
+ s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
+ return 0;
+}
+
+/* Initialize decoding */
+int s5p_mfc_init_decode(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ s5p_mfc_set_shared_buffer(ctx);
+ /* Setup loop filter, for decoding this is only valid for MPEG4 */
+ if (ctx->codec_mode == S5P_FIMV_CODEC_MPEG4_DEC)
+ mfc_write(dev, ctx->loop_filter_mpeg4, S5P_FIMV_ENC_LF_CTRL);
+ else
+ mfc_write(dev, 0, S5P_FIMV_ENC_LF_CTRL);
+ mfc_write(dev, ((ctx->slice_interface & S5P_FIMV_SLICE_INT_MASK) <<
+ S5P_FIMV_SLICE_INT_SHIFT) | (ctx->display_delay_enable <<
+ S5P_FIMV_DDELAY_ENA_SHIFT) | ((ctx->display_delay &
+ S5P_FIMV_DDELAY_VAL_MASK) << S5P_FIMV_DDELAY_VAL_SHIFT),
+ S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
+ mfc_write(dev,
+ ((S5P_FIMV_CH_SEQ_HEADER & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT)
+ | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
+ return 0;
+}
+
+static void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned int dpb;
+
+ if (flush)
+ dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) | (
+ S5P_FIMV_DPB_FLUSH_MASK << S5P_FIMV_DPB_FLUSH_SHIFT);
+ else
+ dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) &
+ ~(S5P_FIMV_DPB_FLUSH_MASK << S5P_FIMV_DPB_FLUSH_SHIFT);
+ mfc_write(dev, dpb, S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
+}
+
+/* Decode a single frame */
+int s5p_mfc_decode_one_frame(struct s5p_mfc_ctx *ctx,
+ enum s5p_mfc_decode_arg last_frame)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mfc_write(dev, ctx->dec_dst_flag, S5P_FIMV_SI_CH0_RELEASE_BUF);
+ s5p_mfc_set_shared_buffer(ctx);
+ s5p_mfc_set_flush(ctx, ctx->dpb_flush_flag);
+ /* Issue different commands to instance basing on whether it
+ * is the last frame or not. */
+ switch (last_frame) {
+ case MFC_DEC_FRAME:
+ mfc_write(dev, ((S5P_FIMV_CH_FRAME_START & S5P_FIMV_CH_MASK) <<
+ S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
+ break;
+ case MFC_DEC_LAST_FRAME:
+ mfc_write(dev, ((S5P_FIMV_CH_LAST_FRAME & S5P_FIMV_CH_MASK) <<
+ S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
+ break;
+ case MFC_DEC_RES_CHANGE:
+ mfc_write(dev, ((S5P_FIMV_CH_FRAME_START_REALLOC &
+ S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no),
+ S5P_FIMV_SI_CH0_INST_ID);
+ break;
+ }
+ mfc_debug(2, "Decoding a usual frame\n");
+ return 0;
+}
+
+int s5p_mfc_init_encode(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC)
+ s5p_mfc_set_enc_params_h264(ctx);
+ else if (ctx->codec_mode == S5P_FIMV_CODEC_MPEG4_ENC)
+ s5p_mfc_set_enc_params_mpeg4(ctx);
+ else if (ctx->codec_mode == S5P_FIMV_CODEC_H263_ENC)
+ s5p_mfc_set_enc_params_h263(ctx);
+ else {
+ mfc_err("Unknown codec for encoding (%x)\n",
+ ctx->codec_mode);
+ return -EINVAL;
+ }
+ s5p_mfc_set_shared_buffer(ctx);
+ mfc_write(dev, ((S5P_FIMV_CH_SEQ_HEADER << 16) & 0x70000) |
+ (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
+ return 0;
+}
+
+/* Encode a single frame */
+int s5p_mfc_encode_one_frame(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ /* memory structure cur. frame */
+ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M)
+ mfc_write(dev, 0, S5P_FIMV_ENC_MAP_FOR_CUR);
+ else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT)
+ mfc_write(dev, 3, S5P_FIMV_ENC_MAP_FOR_CUR);
+ s5p_mfc_set_shared_buffer(ctx);
+ mfc_write(dev, (S5P_FIMV_CH_FRAME_START << 16 & 0x70000) |
+ (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
+ return 0;
+}
+
+static int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev)
+{
+ unsigned long flags;
+ int new_ctx;
+ int cnt;
+
+ spin_lock_irqsave(&dev->condlock, flags);
+ new_ctx = (dev->curr_ctx + 1) % MFC_NUM_CONTEXTS;
+ cnt = 0;
+ while (!test_bit(new_ctx, &dev->ctx_work_bits)) {
+ new_ctx = (new_ctx + 1) % MFC_NUM_CONTEXTS;
+ if (++cnt > MFC_NUM_CONTEXTS) {
+ /* No contexts to run */
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ return -EAGAIN;
+ }
+ }
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ return new_ctx;
+}
+
+static void s5p_mfc_run_res_change(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ s5p_mfc_set_dec_stream_buffer(ctx, 0, 0, 0);
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_decode_one_frame(ctx, MFC_DEC_RES_CHANGE);
+}
+
+static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *temp_vb;
+ unsigned long flags;
+ unsigned int index;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ /* Frames are being decoded */
+ if (list_empty(&ctx->src_queue)) {
+ mfc_debug(2, "No src buffers\n");
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return -EAGAIN;
+ }
+ /* Get the next source buffer */
+ temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ temp_vb->used = 1;
+ s5p_mfc_set_dec_stream_buffer(ctx,
+ vb2_dma_contig_plane_paddr(temp_vb->b, 0), ctx->consumed_stream,
+ temp_vb->b->v4l2_planes[0].bytesused);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ index = temp_vb->b->v4l2_buf.index;
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ if (temp_vb->b->v4l2_planes[0].bytesused == 0) {
+ last_frame = MFC_DEC_LAST_FRAME;
+ mfc_debug(2, "Setting ctx->state to FINISHING\n");
+ ctx->state = MFCINST_FINISHING;
+ }
+ s5p_mfc_decode_one_frame(ctx, last_frame);
+ return 0;
+}
+
+static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+ struct s5p_mfc_buf *dst_mb;
+ struct s5p_mfc_buf *src_mb;
+ unsigned long src_y_addr, src_c_addr, dst_addr;
+ unsigned int dst_size;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (list_empty(&ctx->src_queue)) {
+ mfc_debug(2, "no src buffers\n");
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return -EAGAIN;
+ }
+ if (list_empty(&ctx->dst_queue)) {
+ mfc_debug(2, "no dst buffers\n");
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return -EAGAIN;
+ }
+ src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ src_mb->used = 1;
+ src_y_addr = vb2_dma_contig_plane_paddr(src_mb->b, 0);
+ src_c_addr = vb2_dma_contig_plane_paddr(src_mb->b, 1);
+ s5p_mfc_set_enc_frame_buffer(ctx, src_y_addr, src_c_addr);
+ dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
+ dst_mb->used = 1;
+ dst_addr = vb2_dma_contig_plane_paddr(dst_mb->b, 0);
+ dst_size = vb2_plane_size(dst_mb->b, 0);
+ s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_encode_one_frame(ctx);
+ return 0;
+}
+
+static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+ struct s5p_mfc_buf *temp_vb;
+
+ /* Initializing decoding - parsing header */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ mfc_debug(2, "Preparing to init decoding\n");
+ temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ s5p_mfc_set_dec_desc_buffer(ctx);
+ mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
+ s5p_mfc_set_dec_stream_buffer(ctx,
+ vb2_dma_contig_plane_paddr(temp_vb->b, 0),
+ 0, temp_vb->b->v4l2_planes[0].bytesused);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_init_decode(ctx);
+}
+
+static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+ struct s5p_mfc_buf *dst_mb;
+ unsigned long dst_addr;
+ unsigned int dst_size;
+
+ s5p_mfc_set_enc_ref_buffer(ctx);
+ spin_lock_irqsave(&dev->irqlock, flags);
+ dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
+ dst_addr = vb2_dma_contig_plane_paddr(dst_mb->b, 0);
+ dst_size = vb2_plane_size(dst_mb->b, 0);
+ s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_init_encode(ctx);
+}
+
+static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+ struct s5p_mfc_buf *temp_vb;
+ int ret;
+
+ /*
+ * Header was parsed now starting processing
+ * First set the output frame buffers
+ */
+ if (ctx->capture_state != QUEUE_BUFS_MMAPED) {
+ mfc_err("It seems that not all destionation buffers were "
+ "mmaped\nMFC requires that all destination are mmaped "
+ "before starting processing\n");
+ return -EAGAIN;
+ }
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (list_empty(&ctx->src_queue)) {
+ mfc_err("Header has been deallocated in the middle of"
+ " initialization\n");
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return -EIO;
+ }
+ temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
+ s5p_mfc_set_dec_stream_buffer(ctx,
+ vb2_dma_contig_plane_paddr(temp_vb->b, 0),
+ 0, temp_vb->b->v4l2_planes[0].bytesused);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ ret = s5p_mfc_set_dec_frame_buffer(ctx);
+ if (ret) {
+ mfc_err("Failed to alloc frame mem\n");
+ ctx->state = MFCINST_ERROR;
+ }
+ return ret;
+}
+
+/* Try running an operation on hardware */
+void s5p_mfc_try_run(struct s5p_mfc_dev *dev)
+{
+ struct s5p_mfc_ctx *ctx;
+ int new_ctx;
+ unsigned int ret = 0;
+
+ if (test_bit(0, &dev->enter_suspend)) {
+ mfc_debug(1, "Entering suspend so do not schedule any jobs\n");
+ return;
+ }
+ /* Check whether hardware is not running */
+ if (test_and_set_bit(0, &dev->hw_lock) != 0) {
+ /* This is perfectly ok, the scheduled ctx should wait */
+ mfc_debug(1, "Couldn't lock HW\n");
+ return;
+ }
+ /* Choose the context to run */
+ new_ctx = s5p_mfc_get_new_ctx(dev);
+ if (new_ctx < 0) {
+ /* No contexts to run */
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0) {
+ mfc_err("Failed to unlock hardware\n");
+ return;
+ }
+ mfc_debug(1, "No ctx is scheduled to be run\n");
+ return;
+ }
+ ctx = dev->ctx[new_ctx];
+ /* Got context to run in ctx */
+ /*
+ * Last frame has already been sent to MFC.
+ * Now obtaining frames from MFC buffer
+ */
+ s5p_mfc_clock_on();
+ if (ctx->type == MFCINST_DECODER) {
+ s5p_mfc_set_dec_desc_buffer(ctx);
+ switch (ctx->state) {
+ case MFCINST_FINISHING:
+ s5p_mfc_run_dec_frame(ctx, MFC_DEC_LAST_FRAME);
+ break;
+ case MFCINST_RUNNING:
+ ret = s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME);
+ break;
+ case MFCINST_INIT:
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ ret = s5p_mfc_open_inst_cmd(ctx);
+ break;
+ case MFCINST_RETURN_INST:
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ ret = s5p_mfc_close_inst_cmd(ctx);
+ break;
+ case MFCINST_GOT_INST:
+ s5p_mfc_run_init_dec(ctx);
+ break;
+ case MFCINST_HEAD_PARSED:
+ ret = s5p_mfc_run_init_dec_buffers(ctx);
+ mfc_debug(1, "head parsed\n");
+ break;
+ case MFCINST_RES_CHANGE_INIT:
+ s5p_mfc_run_res_change(ctx);
+ break;
+ case MFCINST_RES_CHANGE_FLUSH:
+ s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME);
+ break;
+ case MFCINST_RES_CHANGE_END:
+ mfc_debug(2, "Finished remaining frames after resolution change\n");
+ ctx->capture_state = QUEUE_FREE;
+ mfc_debug(2, "Will re-init the codec\n");
+ s5p_mfc_run_init_dec(ctx);
+ break;
+ default:
+ ret = -EAGAIN;
+ }
+ } else if (ctx->type == MFCINST_ENCODER) {
+ switch (ctx->state) {
+ case MFCINST_FINISHING:
+ case MFCINST_RUNNING:
+ ret = s5p_mfc_run_enc_frame(ctx);
+ break;
+ case MFCINST_INIT:
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ ret = s5p_mfc_open_inst_cmd(ctx);
+ break;
+ case MFCINST_RETURN_INST:
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ ret = s5p_mfc_close_inst_cmd(ctx);
+ break;
+ case MFCINST_GOT_INST:
+ s5p_mfc_run_init_enc(ctx);
+ break;
+ default:
+ ret = -EAGAIN;
+ }
+ } else {
+ mfc_err("Invalid context type: %d\n", ctx->type);
+ ret = -EAGAIN;
+ }
+
+ if (ret) {
+ /* Free hardware lock */
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ mfc_err("Failed to unlock hardware\n");
+
+ /* This is in deed imporant, as no operation has been
+ * scheduled, reduce the clock count as no one will
+ * ever do this, because no interrupt related to this try_run
+ * will ever come from hardware. */
+ s5p_mfc_clock_off();
+ }
+}
+
+
+void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq)
+{
+ struct s5p_mfc_buf *b;
+ int i;
+
+ while (!list_empty(lh)) {
+ b = list_entry(lh->next, struct s5p_mfc_buf, list);
+ for (i = 0; i < b->b->num_planes; i++)
+ vb2_set_plane_payload(b->b, i, 0);
+ vb2_buffer_done(b->b, VB2_BUF_STATE_ERROR);
+ list_del(&b->list);
+ }
+}
+
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_opr.h b/drivers/media/video/s5p-mfc/s5p_mfc_opr.h
new file mode 100644
index 0000000..db83836
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_opr.h
@@ -0,0 +1,91 @@
+/*
+ * drivers/media/video/samsung/mfc5/s5p_mfc_opr.h
+ *
+ * Header file for Samsung MFC (Multi Function Codec - FIMV) driver
+ * Contains declarations of hw related functions.
+ *
+ * Kamil Debski, Copyright (C) 2011 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef S5P_MFC_OPR_H_
+#define S5P_MFC_OPR_H_
+
+#include "s5p_mfc_common.h"
+
+int s5p_mfc_init_decode(struct s5p_mfc_ctx *ctx);
+int s5p_mfc_init_encode(struct s5p_mfc_ctx *mfc_ctx);
+
+/* Decoding functions */
+int s5p_mfc_set_dec_frame_buffer(struct s5p_mfc_ctx *ctx);
+int s5p_mfc_set_dec_stream_buffer(struct s5p_mfc_ctx *ctx, int buf_addr,
+ unsigned int start_num_byte,
+ unsigned int buf_size);
+
+/* Encoding functions */
+void s5p_mfc_set_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
+ unsigned long y_addr, unsigned long c_addr);
+int s5p_mfc_set_enc_stream_buffer(struct s5p_mfc_ctx *ctx,
+ unsigned long addr, unsigned int size);
+void s5p_mfc_get_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
+ unsigned long *y_addr, unsigned long *c_addr);
+int s5p_mfc_set_enc_ref_buffer(struct s5p_mfc_ctx *mfc_ctx);
+
+int s5p_mfc_decode_one_frame(struct s5p_mfc_ctx *ctx,
+ enum s5p_mfc_decode_arg last_frame);
+int s5p_mfc_encode_one_frame(struct s5p_mfc_ctx *mfc_ctx);
+
+/* Memory allocation */
+int s5p_mfc_alloc_dec_temp_buffers(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_set_dec_desc_buffer(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_release_dec_desc_buffer(struct s5p_mfc_ctx *ctx);
+
+int s5p_mfc_alloc_codec_buffers(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_release_codec_buffers(struct s5p_mfc_ctx *ctx);
+
+int s5p_mfc_alloc_instance_buffer(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_release_instance_buffer(struct s5p_mfc_ctx *ctx);
+
+void s5p_mfc_try_run(struct s5p_mfc_dev *dev);
+void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq);
+
+#define s5p_mfc_get_dspl_y_adr() (readl(dev->regs_base + \
+ S5P_FIMV_SI_DISPLAY_Y_ADR) << \
+ MFC_OFFSET_SHIFT)
+#define s5p_mfc_get_dec_y_adr() (readl(dev->regs_base + \
+ S5P_FIMV_SI_DISPLAY_Y_ADR) << \
+ MFC_OFFSET_SHIFT)
+#define s5p_mfc_get_dspl_status() readl(dev->regs_base + \
+ S5P_FIMV_SI_DISPLAY_STATUS)
+#define s5p_mfc_get_frame_type() (readl(dev->regs_base + \
+ S5P_FIMV_DECODE_FRAME_TYPE) \
+ & S5P_FIMV_DECODE_FRAME_MASK)
+#define s5p_mfc_get_consumed_stream() readl(dev->regs_base + \
+ S5P_FIMV_SI_CONSUMED_BYTES)
+#define s5p_mfc_get_int_reason() (readl(dev->regs_base + \
+ S5P_FIMV_RISC2HOST_CMD) & \
+ S5P_FIMV_RISC2HOST_CMD_MASK)
+#define s5p_mfc_get_int_err() readl(dev->regs_base + \
+ S5P_FIMV_RISC2HOST_ARG2)
+#define s5p_mfc_err_dec(x) (((x) & S5P_FIMV_ERR_DEC_MASK) >> \
+ S5P_FIMV_ERR_DEC_SHIFT)
+#define s5p_mfc_err_dspl(x) (((x) & S5P_FIMV_ERR_DSPL_MASK) >> \
+ S5P_FIMV_ERR_DSPL_SHIFT)
+#define s5p_mfc_get_img_width() readl(dev->regs_base + \
+ S5P_FIMV_SI_HRESOL)
+#define s5p_mfc_get_img_height() readl(dev->regs_base + \
+ S5P_FIMV_SI_VRESOL)
+#define s5p_mfc_get_dpb_count() readl(dev->regs_base + \
+ S5P_FIMV_SI_BUF_NUMBER)
+#define s5p_mfc_get_inst_no() readl(dev->regs_base + \
+ S5P_FIMV_RISC2HOST_ARG1)
+#define s5p_mfc_get_enc_strm_size() readl(dev->regs_base + \
+ S5P_FIMV_ENC_SI_STRM_SIZE)
+#define s5p_mfc_get_enc_slice_type() readl(dev->regs_base + \
+ S5P_FIMV_ENC_SI_SLICE_TYPE)
+
+#endif /* S5P_MFC_OPR_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_pm.c b/drivers/media/video/s5p-mfc/s5p_mfc_pm.c
new file mode 100644
index 0000000..f6a3035
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_pm.c
@@ -0,0 +1,117 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_pm.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_pm.h"
+
+#define MFC_CLKNAME "sclk_mfc"
+#define MFC_GATE_CLK_NAME "mfc"
+
+#define CLK_DEBUG
+
+static struct s5p_mfc_pm *pm;
+static struct s5p_mfc_dev *p_dev;
+
+#ifdef CLK_DEBUG
+atomic_t clk_ref;
+#endif
+
+int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
+{
+ int ret = 0;
+
+ pm = &dev->pm;
+ p_dev = dev;
+ pm->clock_gate = clk_get(&dev->plat_dev->dev, MFC_GATE_CLK_NAME);
+ if (IS_ERR(pm->clock_gate)) {
+ mfc_err("Failed to get clock-gating control\n");
+ ret = -ENOENT;
+ goto err_g_ip_clk;
+ }
+ pm->clock = clk_get(&dev->plat_dev->dev, MFC_CLKNAME);
+ if (IS_ERR(pm->clock)) {
+ mfc_err("Failed to get MFC clock\n");
+ ret = -ENOENT;
+ goto err_g_ip_clk_2;
+ }
+ atomic_set(&pm->power, 0);
+#ifdef CONFIG_PM_RUNTIME
+ pm->device = &dev->plat_dev->dev;
+ pm_runtime_enable(pm->device);
+#endif
+#ifdef CLK_DEBUG
+ atomic_set(&clk_ref, 0);
+#endif
+ return 0;
+err_g_ip_clk_2:
+ clk_put(pm->clock_gate);
+err_g_ip_clk:
+ return ret;
+}
+
+void s5p_mfc_final_pm(struct s5p_mfc_dev *dev)
+{
+ clk_put(pm->clock_gate);
+ clk_put(pm->clock);
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_disable(pm->device);
+#endif
+}
+
+int s5p_mfc_clock_on(void)
+{
+ int ret;
+#ifdef CLK_DEBUG
+ atomic_inc(&clk_ref);
+ mfc_debug(3, "+ %d", atomic_read(&clk_ref));
+#endif
+ ret = clk_enable(pm->clock_gate);
+ return ret;
+}
+
+void s5p_mfc_clock_off(void)
+{
+#ifdef CLK_DEBUG
+ atomic_dec(&clk_ref);
+ mfc_debug(3, "- %d", atomic_read(&clk_ref));
+#endif
+ clk_disable(pm->clock_gate);
+}
+
+int s5p_mfc_power_on(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ return pm_runtime_get_sync(pm->device);
+#else
+ atomic_set(&pm->power, 1);
+ return 0;
+#endif
+}
+
+int s5p_mfc_power_off(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ return pm_runtime_put_sync(pm->device);
+#else
+ atomic_set(&pm->power, 0);
+ return 0;
+#endif
+}
+
+
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_pm.h b/drivers/media/video/s5p-mfc/s5p_mfc_pm.h
new file mode 100644
index 0000000..5107914
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_pm.h
@@ -0,0 +1,24 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_pm.h
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_PM_H_
+#define S5P_MFC_PM_H_
+
+int s5p_mfc_init_pm(struct s5p_mfc_dev *dev);
+void s5p_mfc_final_pm(struct s5p_mfc_dev *dev);
+
+int s5p_mfc_clock_on(void);
+void s5p_mfc_clock_off(void);
+int s5p_mfc_power_on(void);
+int s5p_mfc_power_off(void);
+
+#endif /* S5P_MFC_PM_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_shm.c b/drivers/media/video/s5p-mfc/s5p_mfc_shm.c
new file mode 100644
index 0000000..91fdbac8
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_shm.c
@@ -0,0 +1,47 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_shm.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifdef CONFIG_ARCH_EXYNOS4
+#include <linux/dma-mapping.h>
+#endif
+#include <linux/io.h>
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+
+int s5p_mfc_init_shm(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ void *shm_alloc_ctx = dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
+
+ ctx->shm_alloc = vb2_dma_contig_memops.alloc(shm_alloc_ctx,
+ SHARED_BUF_SIZE);
+ if (IS_ERR(ctx->shm_alloc)) {
+ mfc_err("failed to allocate shared memory\n");
+ return PTR_ERR(ctx->shm_alloc);
+ }
+ /* shm_ofs only keeps the offset from base (port a) */
+ ctx->shm_ofs = s5p_mfc_mem_cookie(shm_alloc_ctx, ctx->shm_alloc)
+ - dev->bank1;
+ BUG_ON(ctx->shm_ofs & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
+ ctx->shm = vb2_dma_contig_memops.vaddr(ctx->shm_alloc);
+ if (!ctx->shm) {
+ vb2_dma_contig_memops.put(ctx->shm_alloc);
+ ctx->shm_ofs = 0;
+ ctx->shm_alloc = NULL;
+ mfc_err("failed to virt addr of shared memory\n");
+ return -ENOMEM;
+ }
+ memset((void *)ctx->shm, 0, SHARED_BUF_SIZE);
+ wmb();
+ return 0;
+}
+
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_shm.h b/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
new file mode 100644
index 0000000..764eac6
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
@@ -0,0 +1,91 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_SHM_H_
+#define S5P_MFC_SHM_H_
+
+enum MFC_SHM_OFS
+{
+ EXTENEDED_DECODE_STATUS = 0x00, /* D */
+ SET_FRAME_TAG = 0x04, /* D */
+ GET_FRAME_TAG_TOP = 0x08, /* D */
+ GET_FRAME_TAG_BOT = 0x0C, /* D */
+ PIC_TIME_TOP = 0x10, /* D */
+ PIC_TIME_BOT = 0x14, /* D */
+ START_BYTE_NUM = 0x18, /* D */
+
+ CROP_INFO_H = 0x20, /* D */
+ CROP_INFO_V = 0x24, /* D */
+ EXT_ENC_CONTROL = 0x28, /* E */
+ ENC_PARAM_CHANGE = 0x2C, /* E */
+ RC_VOP_TIMING = 0x30, /* E, MPEG4 */
+ HEC_PERIOD = 0x34, /* E, MPEG4 */
+ METADATA_ENABLE = 0x38, /* C */
+ METADATA_STATUS = 0x3C, /* C */
+ METADATA_DISPLAY_INDEX = 0x40, /* C */
+ EXT_METADATA_START_ADDR = 0x44, /* C */
+ PUT_EXTRADATA = 0x48, /* C */
+ EXTRADATA_ADDR = 0x4C, /* C */
+
+ ALLOC_LUMA_DPB_SIZE = 0x64, /* D */
+ ALLOC_CHROMA_DPB_SIZE = 0x68, /* D */
+ ALLOC_MV_SIZE = 0x6C, /* D */
+ P_B_FRAME_QP = 0x70, /* E */
+ SAMPLE_ASPECT_RATIO_IDC = 0x74, /* E, H.264, depend on
+ ASPECT_RATIO_VUI_ENABLE in EXT_ENC_CONTROL */
+ EXTENDED_SAR = 0x78, /* E, H.264, depned on
+ ASPECT_RATIO_VUI_ENABLE in EXT_ENC_CONTROL */
+ DISP_PIC_PROFILE = 0x7C, /* D */
+ FLUSH_CMD_TYPE = 0x80, /* C */
+ FLUSH_CMD_INBUF1 = 0x84, /* C */
+ FLUSH_CMD_INBUF2 = 0x88, /* C */
+ FLUSH_CMD_OUTBUF = 0x8C, /* E */
+ NEW_RC_BIT_RATE = 0x90, /* E, format as RC_BIT_RATE(0xC5A8)
+ depend on RC_BIT_RATE_CHANGE in ENC_PARAM_CHANGE */
+ NEW_RC_FRAME_RATE = 0x94, /* E, format as RC_FRAME_RATE(0xD0D0)
+ depend on RC_FRAME_RATE_CHANGE in ENC_PARAM_CHANGE */
+ NEW_I_PERIOD = 0x98, /* E, format as I_FRM_CTRL(0xC504)
+ depend on I_PERIOD_CHANGE in ENC_PARAM_CHANGE */
+ H264_I_PERIOD = 0x9C, /* E, H.264, open GOP */
+ RC_CONTROL_CONFIG = 0xA0, /* E */
+ BATCH_INPUT_ADDR = 0xA4, /* E */
+ BATCH_OUTPUT_ADDR = 0xA8, /* E */
+ BATCH_OUTPUT_SIZE = 0xAC, /* E */
+ MIN_LUMA_DPB_SIZE = 0xB0, /* D */
+ DEVICE_FORMAT_ID = 0xB4, /* C */
+ H264_POC_TYPE = 0xB8, /* D */
+ MIN_CHROMA_DPB_SIZE = 0xBC, /* D */
+ DISP_PIC_FRAME_TYPE = 0xC0, /* D */
+ FREE_LUMA_DPB = 0xC4, /* D, VC1 MPEG4 */
+ ASPECT_RATIO_INFO = 0xC8, /* D, MPEG4 */
+ EXTENDED_PAR = 0xCC, /* D, MPEG4 */
+ DBG_HISTORY_INPUT0 = 0xD0, /* C */
+ DBG_HISTORY_INPUT1 = 0xD4, /* C */
+ DBG_HISTORY_OUTPUT = 0xD8, /* C */
+ HIERARCHICAL_P_QP = 0xE0, /* E, H.264 */
+};
+
+int s5p_mfc_init_shm(struct s5p_mfc_ctx *ctx);
+
+#define s5p_mfc_write_shm(ctx, x, ofs) \
+ do { \
+ writel(x, (ctx->shm + ofs)); \
+ wmb(); \
+ } while (0)
+
+static inline u32 s5p_mfc_read_shm(struct s5p_mfc_ctx *ctx, unsigned int ofs)
+{
+ rmb();
+ return readl(ctx->shm + ofs);
+}
+
+#endif /* S5P_MFC_SHM_H_ */
diff --git a/drivers/media/video/s5p-tv/Kconfig b/drivers/media/video/s5p-tv/Kconfig
new file mode 100644
index 0000000..9c37dee
--- /dev/null
+++ b/drivers/media/video/s5p-tv/Kconfig
@@ -0,0 +1,76 @@
+# drivers/media/video/s5p-tv/Kconfig
+#
+# Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+# http://www.samsung.com/
+# Tomasz Stanislawski <t.stanislaws@samsung.com>
+#
+# Licensed under GPL
+
+config VIDEO_SAMSUNG_S5P_TV
+ bool "Samsung TV driver for S5P platform (experimental)"
+ depends on PLAT_S5P
+ depends on EXPERIMENTAL
+ default n
+ ---help---
+ Say Y here to enable selecting the TV output devices for
+ Samsung S5P platform.
+
+if VIDEO_SAMSUNG_S5P_TV
+
+config VIDEO_SAMSUNG_S5P_HDMI
+ tristate "Samsung HDMI Driver"
+ depends on VIDEO_V4L2
+ depends on VIDEO_SAMSUNG_S5P_TV
+ select VIDEO_SAMSUNG_S5P_HDMIPHY
+ help
+ Say Y here if you want support for the HDMI output
+ interface in S5P Samsung SoC. The driver can be compiled
+ as module. It is an auxiliary driver, that exposes a V4L2
+ subdev for use by other drivers. This driver requires
+ hdmiphy driver to work correctly.
+
+config VIDEO_SAMSUNG_S5P_HDMI_DEBUG
+ bool "Enable debug for HDMI Driver"
+ depends on VIDEO_SAMSUNG_S5P_HDMI
+ default n
+ help
+ Enables debugging for HDMI driver.
+
+config VIDEO_SAMSUNG_S5P_HDMIPHY
+ tristate "Samsung HDMIPHY Driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && I2C
+ depends on VIDEO_SAMSUNG_S5P_TV
+ help
+ Say Y here if you want support for the physical HDMI
+ interface in S5P Samsung SoC. The driver can be compiled
+ as module. It is an I2C driver, that exposes a V4L2
+ subdev for use by other drivers.
+
+config VIDEO_SAMSUNG_S5P_SDO
+ tristate "Samsung Analog TV Driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on VIDEO_SAMSUNG_S5P_TV
+ help
+ Say Y here if you want support for the analog TV output
+ interface in S5P Samsung SoC. The driver can be compiled
+ as module. It is an auxiliary driver, that exposes a V4L2
+ subdev for use by other drivers. This driver requires
+ hdmiphy driver to work correctly.
+
+config VIDEO_SAMSUNG_S5P_MIXER
+ tristate "Samsung Mixer and Video Processor Driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on VIDEO_SAMSUNG_S5P_TV
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ Say Y here if you want support for the Mixer in Samsung S5P SoCs.
+ This device produce image data to one of output interfaces.
+
+config VIDEO_SAMSUNG_S5P_MIXER_DEBUG
+ bool "Enable debug for Mixer Driver"
+ depends on VIDEO_SAMSUNG_S5P_MIXER
+ default n
+ help
+ Enables debugging for Mixer driver.
+
+endif # VIDEO_SAMSUNG_S5P_TV
diff --git a/drivers/media/video/s5p-tv/Makefile b/drivers/media/video/s5p-tv/Makefile
new file mode 100644
index 0000000..37e4c17
--- /dev/null
+++ b/drivers/media/video/s5p-tv/Makefile
@@ -0,0 +1,17 @@
+# drivers/media/video/samsung/tvout/Makefile
+#
+# Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+# http://www.samsung.com/
+# Tomasz Stanislawski <t.stanislaws@samsung.com>
+#
+# Licensed under GPL
+
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_HDMIPHY) += s5p-hdmiphy.o
+s5p-hdmiphy-y += hdmiphy_drv.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_HDMI) += s5p-hdmi.o
+s5p-hdmi-y += hdmi_drv.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_SDO) += s5p-sdo.o
+s5p-sdo-y += sdo_drv.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MIXER) += s5p-mixer.o
+s5p-mixer-y += mixer_drv.o mixer_video.o mixer_reg.o mixer_grp_layer.o mixer_vp_layer.o
+
diff --git a/drivers/media/video/s5p-tv/hdmi_drv.c b/drivers/media/video/s5p-tv/hdmi_drv.c
new file mode 100644
index 0000000..06d6663
--- /dev/null
+++ b/drivers/media/video/s5p-tv/hdmi_drv.c
@@ -0,0 +1,1042 @@
+/*
+ * Samsung HDMI interface driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#ifdef CONFIG_VIDEO_SAMSUNG_S5P_HDMI_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-subdev.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/bug.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+
+#include "regs-hdmi.h"
+
+MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>");
+MODULE_DESCRIPTION("Samsung HDMI");
+MODULE_LICENSE("GPL");
+
+/* default preset configured on probe */
+#define HDMI_DEFAULT_PRESET V4L2_DV_1080P60
+
+struct hdmi_resources {
+ struct clk *hdmi;
+ struct clk *sclk_hdmi;
+ struct clk *sclk_pixel;
+ struct clk *sclk_hdmiphy;
+ struct clk *hdmiphy;
+ struct regulator_bulk_data *regul_bulk;
+ int regul_count;
+};
+
+struct hdmi_device {
+ /** base address of HDMI registers */
+ void __iomem *regs;
+ /** HDMI interrupt */
+ unsigned int irq;
+ /** pointer to device parent */
+ struct device *dev;
+ /** subdev generated by HDMI device */
+ struct v4l2_subdev sd;
+ /** V4L2 device structure */
+ struct v4l2_device v4l2_dev;
+ /** subdev of HDMIPHY interface */
+ struct v4l2_subdev *phy_sd;
+ /** configuration of current graphic mode */
+ const struct hdmi_preset_conf *cur_conf;
+ /** current preset */
+ u32 cur_preset;
+ /** other resources */
+ struct hdmi_resources res;
+};
+
+struct hdmi_driver_data {
+ int hdmiphy_bus;
+};
+
+struct hdmi_tg_regs {
+ u8 cmd;
+ u8 h_fsz_l;
+ u8 h_fsz_h;
+ u8 hact_st_l;
+ u8 hact_st_h;
+ u8 hact_sz_l;
+ u8 hact_sz_h;
+ u8 v_fsz_l;
+ u8 v_fsz_h;
+ u8 vsync_l;
+ u8 vsync_h;
+ u8 vsync2_l;
+ u8 vsync2_h;
+ u8 vact_st_l;
+ u8 vact_st_h;
+ u8 vact_sz_l;
+ u8 vact_sz_h;
+ u8 field_chg_l;
+ u8 field_chg_h;
+ u8 vact_st2_l;
+ u8 vact_st2_h;
+ u8 vsync_top_hdmi_l;
+ u8 vsync_top_hdmi_h;
+ u8 vsync_bot_hdmi_l;
+ u8 vsync_bot_hdmi_h;
+ u8 field_top_hdmi_l;
+ u8 field_top_hdmi_h;
+ u8 field_bot_hdmi_l;
+ u8 field_bot_hdmi_h;
+};
+
+struct hdmi_core_regs {
+ u8 h_blank[2];
+ u8 v_blank[3];
+ u8 h_v_line[3];
+ u8 vsync_pol[1];
+ u8 int_pro_mode[1];
+ u8 v_blank_f[3];
+ u8 h_sync_gen[3];
+ u8 v_sync_gen1[3];
+ u8 v_sync_gen2[3];
+ u8 v_sync_gen3[3];
+};
+
+struct hdmi_preset_conf {
+ struct hdmi_core_regs core;
+ struct hdmi_tg_regs tg;
+ struct v4l2_mbus_framefmt mbus_fmt;
+};
+
+/* I2C module and id for HDMIPHY */
+static struct i2c_board_info hdmiphy_info = {
+ I2C_BOARD_INFO("hdmiphy", 0x38),
+};
+
+static struct hdmi_driver_data hdmi_driver_data[] = {
+ { .hdmiphy_bus = 3 },
+ { .hdmiphy_bus = 8 },
+};
+
+static struct platform_device_id hdmi_driver_types[] = {
+ {
+ .name = "s5pv210-hdmi",
+ .driver_data = (unsigned long)&hdmi_driver_data[0],
+ }, {
+ .name = "exynos4-hdmi",
+ .driver_data = (unsigned long)&hdmi_driver_data[1],
+ }, {
+ /* end node */
+ }
+};
+
+static const struct v4l2_subdev_ops hdmi_sd_ops;
+
+static struct hdmi_device *sd_to_hdmi_dev(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct hdmi_device, sd);
+}
+
+static inline
+void hdmi_write(struct hdmi_device *hdev, u32 reg_id, u32 value)
+{
+ writel(value, hdev->regs + reg_id);
+}
+
+static inline
+void hdmi_write_mask(struct hdmi_device *hdev, u32 reg_id, u32 value, u32 mask)
+{
+ u32 old = readl(hdev->regs + reg_id);
+ value = (value & mask) | (old & ~mask);
+ writel(value, hdev->regs + reg_id);
+}
+
+static inline
+void hdmi_writeb(struct hdmi_device *hdev, u32 reg_id, u8 value)
+{
+ writeb(value, hdev->regs + reg_id);
+}
+
+static inline u32 hdmi_read(struct hdmi_device *hdev, u32 reg_id)
+{
+ return readl(hdev->regs + reg_id);
+}
+
+static irqreturn_t hdmi_irq_handler(int irq, void *dev_data)
+{
+ struct hdmi_device *hdev = dev_data;
+ u32 intc_flag;
+
+ (void)irq;
+ intc_flag = hdmi_read(hdev, HDMI_INTC_FLAG);
+ /* clearing flags for HPD plug/unplug */
+ if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
+ printk(KERN_INFO "unplugged\n");
+ hdmi_write_mask(hdev, HDMI_INTC_FLAG, ~0,
+ HDMI_INTC_FLAG_HPD_UNPLUG);
+ }
+ if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
+ printk(KERN_INFO "plugged\n");
+ hdmi_write_mask(hdev, HDMI_INTC_FLAG, ~0,
+ HDMI_INTC_FLAG_HPD_PLUG);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void hdmi_reg_init(struct hdmi_device *hdev)
+{
+ /* enable HPD interrupts */
+ hdmi_write_mask(hdev, HDMI_INTC_CON, ~0, HDMI_INTC_EN_GLOBAL |
+ HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
+ /* choose HDMI mode */
+ hdmi_write_mask(hdev, HDMI_MODE_SEL,
+ HDMI_MODE_HDMI_EN, HDMI_MODE_MASK);
+ /* disable bluescreen */
+ hdmi_write_mask(hdev, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
+ /* choose bluescreen (fecal) color */
+ hdmi_writeb(hdev, HDMI_BLUE_SCREEN_0, 0x12);
+ hdmi_writeb(hdev, HDMI_BLUE_SCREEN_1, 0x34);
+ hdmi_writeb(hdev, HDMI_BLUE_SCREEN_2, 0x56);
+ /* enable AVI packet every vsync, fixes purple line problem */
+ hdmi_writeb(hdev, HDMI_AVI_CON, 0x02);
+ /* force YUV444, look to CEA-861-D, table 7 for more detail */
+ hdmi_writeb(hdev, HDMI_AVI_BYTE(0), 2 << 5);
+ hdmi_write_mask(hdev, HDMI_CON_1, 2, 3 << 5);
+}
+
+static void hdmi_timing_apply(struct hdmi_device *hdev,
+ const struct hdmi_preset_conf *conf)
+{
+ const struct hdmi_core_regs *core = &conf->core;
+ const struct hdmi_tg_regs *tg = &conf->tg;
+
+ /* setting core registers */
+ hdmi_writeb(hdev, HDMI_H_BLANK_0, core->h_blank[0]);
+ hdmi_writeb(hdev, HDMI_H_BLANK_1, core->h_blank[1]);
+ hdmi_writeb(hdev, HDMI_V_BLANK_0, core->v_blank[0]);
+ hdmi_writeb(hdev, HDMI_V_BLANK_1, core->v_blank[1]);
+ hdmi_writeb(hdev, HDMI_V_BLANK_2, core->v_blank[2]);
+ hdmi_writeb(hdev, HDMI_H_V_LINE_0, core->h_v_line[0]);
+ hdmi_writeb(hdev, HDMI_H_V_LINE_1, core->h_v_line[1]);
+ hdmi_writeb(hdev, HDMI_H_V_LINE_2, core->h_v_line[2]);
+ hdmi_writeb(hdev, HDMI_VSYNC_POL, core->vsync_pol[0]);
+ hdmi_writeb(hdev, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
+ hdmi_writeb(hdev, HDMI_V_BLANK_F_0, core->v_blank_f[0]);
+ hdmi_writeb(hdev, HDMI_V_BLANK_F_1, core->v_blank_f[1]);
+ hdmi_writeb(hdev, HDMI_V_BLANK_F_2, core->v_blank_f[2]);
+ hdmi_writeb(hdev, HDMI_H_SYNC_GEN_0, core->h_sync_gen[0]);
+ hdmi_writeb(hdev, HDMI_H_SYNC_GEN_1, core->h_sync_gen[1]);
+ hdmi_writeb(hdev, HDMI_H_SYNC_GEN_2, core->h_sync_gen[2]);
+ hdmi_writeb(hdev, HDMI_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
+ hdmi_writeb(hdev, HDMI_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
+ hdmi_writeb(hdev, HDMI_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
+ hdmi_writeb(hdev, HDMI_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
+ hdmi_writeb(hdev, HDMI_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
+ hdmi_writeb(hdev, HDMI_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
+ hdmi_writeb(hdev, HDMI_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
+ hdmi_writeb(hdev, HDMI_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
+ hdmi_writeb(hdev, HDMI_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
+ /* Timing generator registers */
+ hdmi_writeb(hdev, HDMI_TG_H_FSZ_L, tg->h_fsz_l);
+ hdmi_writeb(hdev, HDMI_TG_H_FSZ_H, tg->h_fsz_h);
+ hdmi_writeb(hdev, HDMI_TG_HACT_ST_L, tg->hact_st_l);
+ hdmi_writeb(hdev, HDMI_TG_HACT_ST_H, tg->hact_st_h);
+ hdmi_writeb(hdev, HDMI_TG_HACT_SZ_L, tg->hact_sz_l);
+ hdmi_writeb(hdev, HDMI_TG_HACT_SZ_H, tg->hact_sz_h);
+ hdmi_writeb(hdev, HDMI_TG_V_FSZ_L, tg->v_fsz_l);
+ hdmi_writeb(hdev, HDMI_TG_V_FSZ_H, tg->v_fsz_h);
+ hdmi_writeb(hdev, HDMI_TG_VSYNC_L, tg->vsync_l);
+ hdmi_writeb(hdev, HDMI_TG_VSYNC_H, tg->vsync_h);
+ hdmi_writeb(hdev, HDMI_TG_VSYNC2_L, tg->vsync2_l);
+ hdmi_writeb(hdev, HDMI_TG_VSYNC2_H, tg->vsync2_h);
+ hdmi_writeb(hdev, HDMI_TG_VACT_ST_L, tg->vact_st_l);
+ hdmi_writeb(hdev, HDMI_TG_VACT_ST_H, tg->vact_st_h);
+ hdmi_writeb(hdev, HDMI_TG_VACT_SZ_L, tg->vact_sz_l);
+ hdmi_writeb(hdev, HDMI_TG_VACT_SZ_H, tg->vact_sz_h);
+ hdmi_writeb(hdev, HDMI_TG_FIELD_CHG_L, tg->field_chg_l);
+ hdmi_writeb(hdev, HDMI_TG_FIELD_CHG_H, tg->field_chg_h);
+ hdmi_writeb(hdev, HDMI_TG_VACT_ST2_L, tg->vact_st2_l);
+ hdmi_writeb(hdev, HDMI_TG_VACT_ST2_H, tg->vact_st2_h);
+ hdmi_writeb(hdev, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l);
+ hdmi_writeb(hdev, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h);
+ hdmi_writeb(hdev, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l);
+ hdmi_writeb(hdev, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h);
+ hdmi_writeb(hdev, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l);
+ hdmi_writeb(hdev, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h);
+ hdmi_writeb(hdev, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l);
+ hdmi_writeb(hdev, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h);
+}
+
+static int hdmi_conf_apply(struct hdmi_device *hdmi_dev)
+{
+ struct device *dev = hdmi_dev->dev;
+ const struct hdmi_preset_conf *conf = hdmi_dev->cur_conf;
+ struct v4l2_dv_preset preset;
+ int ret;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /* reset hdmiphy */
+ hdmi_write_mask(hdmi_dev, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
+ mdelay(10);
+ hdmi_write_mask(hdmi_dev, HDMI_PHY_RSTOUT, 0, HDMI_PHY_SW_RSTOUT);
+ mdelay(10);
+
+ /* configure presets */
+ preset.preset = hdmi_dev->cur_preset;
+ ret = v4l2_subdev_call(hdmi_dev->phy_sd, video, s_dv_preset, &preset);
+ if (ret) {
+ dev_err(dev, "failed to set preset (%u)\n", preset.preset);
+ return ret;
+ }
+
+ /* resetting HDMI core */
+ hdmi_write_mask(hdmi_dev, HDMI_CORE_RSTOUT, 0, HDMI_CORE_SW_RSTOUT);
+ mdelay(10);
+ hdmi_write_mask(hdmi_dev, HDMI_CORE_RSTOUT, ~0, HDMI_CORE_SW_RSTOUT);
+ mdelay(10);
+
+ hdmi_reg_init(hdmi_dev);
+
+ /* setting core registers */
+ hdmi_timing_apply(hdmi_dev, conf);
+
+ return 0;
+}
+
+static void hdmi_dumpregs(struct hdmi_device *hdev, char *prefix)
+{
+#define DUMPREG(reg_id) \
+ dev_dbg(hdev->dev, "%s:" #reg_id " = %08x\n", prefix, \
+ readl(hdev->regs + reg_id))
+
+ dev_dbg(hdev->dev, "%s: ---- CONTROL REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_INTC_FLAG);
+ DUMPREG(HDMI_INTC_CON);
+ DUMPREG(HDMI_HPD_STATUS);
+ DUMPREG(HDMI_PHY_RSTOUT);
+ DUMPREG(HDMI_PHY_VPLL);
+ DUMPREG(HDMI_PHY_CMU);
+ DUMPREG(HDMI_CORE_RSTOUT);
+
+ dev_dbg(hdev->dev, "%s: ---- CORE REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_CON_0);
+ DUMPREG(HDMI_CON_1);
+ DUMPREG(HDMI_CON_2);
+ DUMPREG(HDMI_SYS_STATUS);
+ DUMPREG(HDMI_PHY_STATUS);
+ DUMPREG(HDMI_STATUS_EN);
+ DUMPREG(HDMI_HPD);
+ DUMPREG(HDMI_MODE_SEL);
+ DUMPREG(HDMI_HPD_GEN);
+ DUMPREG(HDMI_DC_CONTROL);
+ DUMPREG(HDMI_VIDEO_PATTERN_GEN);
+
+ dev_dbg(hdev->dev, "%s: ---- CORE SYNC REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_H_BLANK_0);
+ DUMPREG(HDMI_H_BLANK_1);
+ DUMPREG(HDMI_V_BLANK_0);
+ DUMPREG(HDMI_V_BLANK_1);
+ DUMPREG(HDMI_V_BLANK_2);
+ DUMPREG(HDMI_H_V_LINE_0);
+ DUMPREG(HDMI_H_V_LINE_1);
+ DUMPREG(HDMI_H_V_LINE_2);
+ DUMPREG(HDMI_VSYNC_POL);
+ DUMPREG(HDMI_INT_PRO_MODE);
+ DUMPREG(HDMI_V_BLANK_F_0);
+ DUMPREG(HDMI_V_BLANK_F_1);
+ DUMPREG(HDMI_V_BLANK_F_2);
+ DUMPREG(HDMI_H_SYNC_GEN_0);
+ DUMPREG(HDMI_H_SYNC_GEN_1);
+ DUMPREG(HDMI_H_SYNC_GEN_2);
+ DUMPREG(HDMI_V_SYNC_GEN_1_0);
+ DUMPREG(HDMI_V_SYNC_GEN_1_1);
+ DUMPREG(HDMI_V_SYNC_GEN_1_2);
+ DUMPREG(HDMI_V_SYNC_GEN_2_0);
+ DUMPREG(HDMI_V_SYNC_GEN_2_1);
+ DUMPREG(HDMI_V_SYNC_GEN_2_2);
+ DUMPREG(HDMI_V_SYNC_GEN_3_0);
+ DUMPREG(HDMI_V_SYNC_GEN_3_1);
+ DUMPREG(HDMI_V_SYNC_GEN_3_2);
+
+ dev_dbg(hdev->dev, "%s: ---- TG REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_TG_CMD);
+ DUMPREG(HDMI_TG_H_FSZ_L);
+ DUMPREG(HDMI_TG_H_FSZ_H);
+ DUMPREG(HDMI_TG_HACT_ST_L);
+ DUMPREG(HDMI_TG_HACT_ST_H);
+ DUMPREG(HDMI_TG_HACT_SZ_L);
+ DUMPREG(HDMI_TG_HACT_SZ_H);
+ DUMPREG(HDMI_TG_V_FSZ_L);
+ DUMPREG(HDMI_TG_V_FSZ_H);
+ DUMPREG(HDMI_TG_VSYNC_L);
+ DUMPREG(HDMI_TG_VSYNC_H);
+ DUMPREG(HDMI_TG_VSYNC2_L);
+ DUMPREG(HDMI_TG_VSYNC2_H);
+ DUMPREG(HDMI_TG_VACT_ST_L);
+ DUMPREG(HDMI_TG_VACT_ST_H);
+ DUMPREG(HDMI_TG_VACT_SZ_L);
+ DUMPREG(HDMI_TG_VACT_SZ_H);
+ DUMPREG(HDMI_TG_FIELD_CHG_L);
+ DUMPREG(HDMI_TG_FIELD_CHG_H);
+ DUMPREG(HDMI_TG_VACT_ST2_L);
+ DUMPREG(HDMI_TG_VACT_ST2_H);
+ DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
+ DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
+ DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
+ DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
+ DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
+ DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
+ DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
+ DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
+#undef DUMPREG
+}
+
+static const struct hdmi_preset_conf hdmi_conf_480p = {
+ .core = {
+ .h_blank = {0x8a, 0x00},
+ .v_blank = {0x0d, 0x6a, 0x01},
+ .h_v_line = {0x0d, 0xa2, 0x35},
+ .vsync_pol = {0x01},
+ .int_pro_mode = {0x00},
+ .v_blank_f = {0x00, 0x00, 0x00},
+ .h_sync_gen = {0x0e, 0x30, 0x11},
+ .v_sync_gen1 = {0x0f, 0x90, 0x00},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x5a, 0x03, /* h_fsz */
+ 0x8a, 0x00, 0xd0, 0x02, /* hact */
+ 0x0d, 0x02, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0xe0, 0x01, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x49, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+ .mbus_fmt = {
+ .width = 720,
+ .height = 480,
+ .code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
+ .field = V4L2_FIELD_NONE,
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_720p60 = {
+ .core = {
+ .h_blank = {0x72, 0x01},
+ .v_blank = {0xee, 0xf2, 0x00},
+ .h_v_line = {0xee, 0x22, 0x67},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+ .h_sync_gen = {0x6c, 0x50, 0x02},
+ .v_sync_gen1 = {0x0a, 0x50, 0x00},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x72, 0x06, /* h_fsz */
+ 0x72, 0x01, 0x00, 0x05, /* hact */
+ 0xee, 0x02, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x1e, 0x00, 0xd0, 0x02, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x49, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+ .mbus_fmt = {
+ .width = 1280,
+ .height = 720,
+ .code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
+ .field = V4L2_FIELD_NONE,
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
+ .core = {
+ .h_blank = {0xd0, 0x02},
+ .v_blank = {0x65, 0x6c, 0x01},
+ .h_v_line = {0x65, 0x04, 0xa5},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+ .h_sync_gen = {0x0e, 0xea, 0x08},
+ .v_sync_gen1 = {0x09, 0x40, 0x00},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x98, 0x08, /* h_fsz */
+ 0x18, 0x01, 0x80, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0x38, 0x04, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x49, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+ .mbus_fmt = {
+ .width = 1920,
+ .height = 1080,
+ .code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
+ .field = V4L2_FIELD_NONE,
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
+ .core = {
+ .h_blank = {0x18, 0x01},
+ .v_blank = {0x65, 0x6c, 0x01},
+ .h_v_line = {0x65, 0x84, 0x89},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+ .h_sync_gen = {0x56, 0x08, 0x02},
+ .v_sync_gen1 = {0x09, 0x40, 0x00},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x98, 0x08, /* h_fsz */
+ 0x18, 0x01, 0x80, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0x38, 0x04, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x48, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+ .mbus_fmt = {
+ .width = 1920,
+ .height = 1080,
+ .code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
+ .field = V4L2_FIELD_NONE,
+ },
+};
+
+static const struct {
+ u32 preset;
+ const struct hdmi_preset_conf *conf;
+} hdmi_conf[] = {
+ { V4L2_DV_480P59_94, &hdmi_conf_480p },
+ { V4L2_DV_720P59_94, &hdmi_conf_720p60 },
+ { V4L2_DV_1080P50, &hdmi_conf_1080p50 },
+ { V4L2_DV_1080P30, &hdmi_conf_1080p60 },
+ { V4L2_DV_1080P60, &hdmi_conf_1080p60 },
+};
+
+static const struct hdmi_preset_conf *hdmi_preset2conf(u32 preset)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_conf); ++i)
+ if (hdmi_conf[i].preset == preset)
+ return hdmi_conf[i].conf;
+ return NULL;
+}
+
+static int hdmi_streamon(struct hdmi_device *hdev)
+{
+ struct device *dev = hdev->dev;
+ struct hdmi_resources *res = &hdev->res;
+ int ret, tries;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ ret = v4l2_subdev_call(hdev->phy_sd, video, s_stream, 1);
+ if (ret)
+ return ret;
+
+ /* waiting for HDMIPHY's PLL to get to steady state */
+ for (tries = 100; tries; --tries) {
+ u32 val = hdmi_read(hdev, HDMI_PHY_STATUS);
+ if (val & HDMI_PHY_STATUS_READY)
+ break;
+ mdelay(1);
+ }
+ /* steady state not achieved */
+ if (tries == 0) {
+ dev_err(dev, "hdmiphy's pll could not reach steady state.\n");
+ v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
+ hdmi_dumpregs(hdev, "s_stream");
+ return -EIO;
+ }
+
+ /* hdmiphy clock is used for HDMI in streaming mode */
+ clk_disable(res->sclk_hdmi);
+ clk_set_parent(res->sclk_hdmi, res->sclk_hdmiphy);
+ clk_enable(res->sclk_hdmi);
+
+ /* enable HDMI and timing generator */
+ hdmi_write_mask(hdev, HDMI_CON_0, ~0, HDMI_EN);
+ hdmi_write_mask(hdev, HDMI_TG_CMD, ~0, HDMI_TG_EN);
+ hdmi_dumpregs(hdev, "streamon");
+ return 0;
+}
+
+static int hdmi_streamoff(struct hdmi_device *hdev)
+{
+ struct device *dev = hdev->dev;
+ struct hdmi_resources *res = &hdev->res;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ hdmi_write_mask(hdev, HDMI_CON_0, 0, HDMI_EN);
+ hdmi_write_mask(hdev, HDMI_TG_CMD, 0, HDMI_TG_EN);
+
+ /* pixel(vpll) clock is used for HDMI in config mode */
+ clk_disable(res->sclk_hdmi);
+ clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
+ clk_enable(res->sclk_hdmi);
+
+ v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
+
+ hdmi_dumpregs(hdev, "streamoff");
+ return 0;
+}
+
+static int hdmi_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
+ struct device *dev = hdev->dev;
+
+ dev_dbg(dev, "%s(%d)\n", __func__, enable);
+ if (enable)
+ return hdmi_streamon(hdev);
+ return hdmi_streamoff(hdev);
+}
+
+static void hdmi_resource_poweron(struct hdmi_resources *res)
+{
+ /* turn HDMI power on */
+ regulator_bulk_enable(res->regul_count, res->regul_bulk);
+ /* power-on hdmi physical interface */
+ clk_enable(res->hdmiphy);
+ /* use VPP as parent clock; HDMIPHY is not working yet */
+ clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
+ /* turn clocks on */
+ clk_enable(res->sclk_hdmi);
+}
+
+static void hdmi_resource_poweroff(struct hdmi_resources *res)
+{
+ /* turn clocks off */
+ clk_disable(res->sclk_hdmi);
+ /* power-off hdmiphy */
+ clk_disable(res->hdmiphy);
+ /* turn HDMI power off */
+ regulator_bulk_disable(res->regul_count, res->regul_bulk);
+}
+
+static int hdmi_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
+ int ret;
+
+ if (on)
+ ret = pm_runtime_get_sync(hdev->dev);
+ else
+ ret = pm_runtime_put_sync(hdev->dev);
+ /* only values < 0 indicate errors */
+ return IS_ERR_VALUE(ret) ? ret : 0;
+}
+
+static int hdmi_s_dv_preset(struct v4l2_subdev *sd,
+ struct v4l2_dv_preset *preset)
+{
+ struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
+ struct device *dev = hdev->dev;
+ const struct hdmi_preset_conf *conf;
+
+ conf = hdmi_preset2conf(preset->preset);
+ if (conf == NULL) {
+ dev_err(dev, "preset (%u) not supported\n", preset->preset);
+ return -EINVAL;
+ }
+ hdev->cur_conf = conf;
+ hdev->cur_preset = preset->preset;
+ return 0;
+}
+
+static int hdmi_g_dv_preset(struct v4l2_subdev *sd,
+ struct v4l2_dv_preset *preset)
+{
+ memset(preset, 0, sizeof(*preset));
+ preset->preset = sd_to_hdmi_dev(sd)->cur_preset;
+ return 0;
+}
+
+static int hdmi_g_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
+ struct device *dev = hdev->dev;
+
+ dev_dbg(dev, "%s\n", __func__);
+ if (!hdev->cur_conf)
+ return -EINVAL;
+ *fmt = hdev->cur_conf->mbus_fmt;
+ return 0;
+}
+
+static int hdmi_enum_dv_presets(struct v4l2_subdev *sd,
+ struct v4l2_dv_enum_preset *preset)
+{
+ if (preset->index >= ARRAY_SIZE(hdmi_conf))
+ return -EINVAL;
+ return v4l_fill_dv_preset_info(hdmi_conf[preset->index].preset, preset);
+}
+
+static const struct v4l2_subdev_core_ops hdmi_sd_core_ops = {
+ .s_power = hdmi_s_power,
+};
+
+static const struct v4l2_subdev_video_ops hdmi_sd_video_ops = {
+ .s_dv_preset = hdmi_s_dv_preset,
+ .g_dv_preset = hdmi_g_dv_preset,
+ .enum_dv_presets = hdmi_enum_dv_presets,
+ .g_mbus_fmt = hdmi_g_mbus_fmt,
+ .s_stream = hdmi_s_stream,
+};
+
+static const struct v4l2_subdev_ops hdmi_sd_ops = {
+ .core = &hdmi_sd_core_ops,
+ .video = &hdmi_sd_video_ops,
+};
+
+static int hdmi_runtime_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
+
+ dev_dbg(dev, "%s\n", __func__);
+ hdmi_resource_poweroff(&hdev->res);
+ return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
+ int ret = 0;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ hdmi_resource_poweron(&hdev->res);
+
+ ret = hdmi_conf_apply(hdev);
+ if (ret)
+ goto fail;
+
+ dev_dbg(dev, "poweron succeed\n");
+
+ return 0;
+
+fail:
+ hdmi_resource_poweroff(&hdev->res);
+ dev_err(dev, "poweron failed\n");
+
+ return ret;
+}
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+ .runtime_suspend = hdmi_runtime_suspend,
+ .runtime_resume = hdmi_runtime_resume,
+};
+
+static void hdmi_resources_cleanup(struct hdmi_device *hdev)
+{
+ struct hdmi_resources *res = &hdev->res;
+
+ dev_dbg(hdev->dev, "HDMI resource cleanup\n");
+ /* put clocks, power */
+ if (res->regul_count)
+ regulator_bulk_free(res->regul_count, res->regul_bulk);
+ /* kfree is NULL-safe */
+ kfree(res->regul_bulk);
+ if (!IS_ERR_OR_NULL(res->hdmiphy))
+ clk_put(res->hdmiphy);
+ if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
+ clk_put(res->sclk_hdmiphy);
+ if (!IS_ERR_OR_NULL(res->sclk_pixel))
+ clk_put(res->sclk_pixel);
+ if (!IS_ERR_OR_NULL(res->sclk_hdmi))
+ clk_put(res->sclk_hdmi);
+ if (!IS_ERR_OR_NULL(res->hdmi))
+ clk_put(res->hdmi);
+ memset(res, 0, sizeof *res);
+}
+
+static int hdmi_resources_init(struct hdmi_device *hdev)
+{
+ struct device *dev = hdev->dev;
+ struct hdmi_resources *res = &hdev->res;
+ static char *supply[] = {
+ "hdmi-en",
+ "vdd",
+ "vdd_osc",
+ "vdd_pll",
+ };
+ int i, ret;
+
+ dev_dbg(dev, "HDMI resource init\n");
+
+ memset(res, 0, sizeof *res);
+ /* get clocks, power */
+
+ res->hdmi = clk_get(dev, "hdmi");
+ if (IS_ERR_OR_NULL(res->hdmi)) {
+ dev_err(dev, "failed to get clock 'hdmi'\n");
+ goto fail;
+ }
+ res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+ if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
+ dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
+ goto fail;
+ }
+ res->sclk_pixel = clk_get(dev, "sclk_pixel");
+ if (IS_ERR_OR_NULL(res->sclk_pixel)) {
+ dev_err(dev, "failed to get clock 'sclk_pixel'\n");
+ goto fail;
+ }
+ res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
+ if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
+ dev_err(dev, "failed to get clock 'sclk_hdmiphy'\n");
+ goto fail;
+ }
+ res->hdmiphy = clk_get(dev, "hdmiphy");
+ if (IS_ERR_OR_NULL(res->hdmiphy)) {
+ dev_err(dev, "failed to get clock 'hdmiphy'\n");
+ goto fail;
+ }
+ res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
+ sizeof res->regul_bulk[0], GFP_KERNEL);
+ if (!res->regul_bulk) {
+ dev_err(dev, "failed to get memory for regulators\n");
+ goto fail;
+ }
+ for (i = 0; i < ARRAY_SIZE(supply); ++i) {
+ res->regul_bulk[i].supply = supply[i];
+ res->regul_bulk[i].consumer = NULL;
+ }
+
+ ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
+ if (ret) {
+ dev_err(dev, "failed to get regulators\n");
+ goto fail;
+ }
+ res->regul_count = ARRAY_SIZE(supply);
+
+ return 0;
+fail:
+ dev_err(dev, "HDMI resource init - failed\n");
+ hdmi_resources_cleanup(hdev);
+ return -ENODEV;
+}
+
+static int __devinit hdmi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct i2c_adapter *phy_adapter;
+ struct v4l2_subdev *sd;
+ struct hdmi_device *hdmi_dev = NULL;
+ struct hdmi_driver_data *drv_data;
+ int ret;
+
+ dev_dbg(dev, "probe start\n");
+
+ hdmi_dev = kzalloc(sizeof(*hdmi_dev), GFP_KERNEL);
+ if (!hdmi_dev) {
+ dev_err(dev, "out of memory\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ hdmi_dev->dev = dev;
+
+ ret = hdmi_resources_init(hdmi_dev);
+ if (ret)
+ goto fail_hdev;
+
+ /* mapping HDMI registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "get memory resource failed.\n");
+ ret = -ENXIO;
+ goto fail_init;
+ }
+
+ hdmi_dev->regs = ioremap(res->start, resource_size(res));
+ if (hdmi_dev->regs == NULL) {
+ dev_err(dev, "register mapping failed.\n");
+ ret = -ENXIO;
+ goto fail_hdev;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(dev, "get interrupt resource failed.\n");
+ ret = -ENXIO;
+ goto fail_regs;
+ }
+
+ ret = request_irq(res->start, hdmi_irq_handler, 0, "hdmi", hdmi_dev);
+ if (ret) {
+ dev_err(dev, "request interrupt failed.\n");
+ goto fail_regs;
+ }
+ hdmi_dev->irq = res->start;
+
+ /* setting v4l2 name to prevent WARN_ON in v4l2_device_register */
+ strlcpy(hdmi_dev->v4l2_dev.name, dev_name(dev),
+ sizeof(hdmi_dev->v4l2_dev.name));
+ /* passing NULL owner prevents driver from erasing drvdata */
+ ret = v4l2_device_register(NULL, &hdmi_dev->v4l2_dev);
+ if (ret) {
+ dev_err(dev, "could not register v4l2 device.\n");
+ goto fail_irq;
+ }
+
+ drv_data = (struct hdmi_driver_data *)
+ platform_get_device_id(pdev)->driver_data;
+ phy_adapter = i2c_get_adapter(drv_data->hdmiphy_bus);
+ if (phy_adapter == NULL) {
+ dev_err(dev, "adapter request failed\n");
+ ret = -ENXIO;
+ goto fail_vdev;
+ }
+
+ hdmi_dev->phy_sd = v4l2_i2c_new_subdev_board(&hdmi_dev->v4l2_dev,
+ phy_adapter, &hdmiphy_info, NULL);
+ /* on failure or not adapter is no longer useful */
+ i2c_put_adapter(phy_adapter);
+ if (hdmi_dev->phy_sd == NULL) {
+ dev_err(dev, "missing subdev for hdmiphy\n");
+ ret = -ENODEV;
+ goto fail_vdev;
+ }
+
+ clk_enable(hdmi_dev->res.hdmi);
+
+ pm_runtime_enable(dev);
+
+ sd = &hdmi_dev->sd;
+ v4l2_subdev_init(sd, &hdmi_sd_ops);
+ sd->owner = THIS_MODULE;
+
+ strlcpy(sd->name, "s5p-hdmi", sizeof sd->name);
+ hdmi_dev->cur_preset = HDMI_DEFAULT_PRESET;
+ /* FIXME: missing fail preset is not supported */
+ hdmi_dev->cur_conf = hdmi_preset2conf(hdmi_dev->cur_preset);
+
+ /* storing subdev for call that have only access to struct device */
+ dev_set_drvdata(dev, sd);
+
+ dev_info(dev, "probe sucessful\n");
+
+ return 0;
+
+fail_vdev:
+ v4l2_device_unregister(&hdmi_dev->v4l2_dev);
+
+fail_irq:
+ free_irq(hdmi_dev->irq, hdmi_dev);
+
+fail_regs:
+ iounmap(hdmi_dev->regs);
+
+fail_init:
+ hdmi_resources_cleanup(hdmi_dev);
+
+fail_hdev:
+ kfree(hdmi_dev);
+
+fail:
+ dev_err(dev, "probe failed\n");
+ return ret;
+}
+
+static int __devexit hdmi_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct hdmi_device *hdmi_dev = sd_to_hdmi_dev(sd);
+
+ pm_runtime_disable(dev);
+ clk_disable(hdmi_dev->res.hdmi);
+ v4l2_device_unregister(&hdmi_dev->v4l2_dev);
+ disable_irq(hdmi_dev->irq);
+ free_irq(hdmi_dev->irq, hdmi_dev);
+ iounmap(hdmi_dev->regs);
+ hdmi_resources_cleanup(hdmi_dev);
+ kfree(hdmi_dev);
+ dev_info(dev, "remove sucessful\n");
+
+ return 0;
+}
+
+static struct platform_driver hdmi_driver __refdata = {
+ .probe = hdmi_probe,
+ .remove = __devexit_p(hdmi_remove),
+ .id_table = hdmi_driver_types,
+ .driver = {
+ .name = "s5p-hdmi",
+ .owner = THIS_MODULE,
+ .pm = &hdmi_pm_ops,
+ }
+};
+
+/* D R I V E R I N I T I A L I Z A T I O N */
+
+static int __init hdmi_init(void)
+{
+ int ret;
+ static const char banner[] __initdata = KERN_INFO \
+ "Samsung HDMI output driver, "
+ "(c) 2010-2011 Samsung Electronics Co., Ltd.\n";
+ printk(banner);
+
+ ret = platform_driver_register(&hdmi_driver);
+ if (ret)
+ printk(KERN_ERR "HDMI platform driver register failed\n");
+
+ return ret;
+}
+module_init(hdmi_init);
+
+static void __exit hdmi_exit(void)
+{
+ platform_driver_unregister(&hdmi_driver);
+}
+module_exit(hdmi_exit);
+
+
diff --git a/drivers/media/video/s5p-tv/hdmiphy_drv.c b/drivers/media/video/s5p-tv/hdmiphy_drv.c
new file mode 100644
index 0000000..6693f4a
--- /dev/null
+++ b/drivers/media/video/s5p-tv/hdmiphy_drv.c
@@ -0,0 +1,188 @@
+/*
+ * Samsung HDMI Physical interface driver
+ *
+ * Copyright (C) 2010-2011 Samsung Electronics Co.Ltd
+ * Author: Tomasz Stanislawski <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+
+#include <media/v4l2-subdev.h>
+
+MODULE_AUTHOR("Tomasz Stanislawski <t.stanislaws@samsung.com>");
+MODULE_DESCRIPTION("Samsung HDMI Physical interface driver");
+MODULE_LICENSE("GPL");
+
+struct hdmiphy_conf {
+ u32 preset;
+ const u8 *data;
+};
+
+static const u8 hdmiphy_conf27[32] = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6B, 0x10, 0x02, 0x51, 0xDf, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xe3, 0x26, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf74_175[32] = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
+ 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf74_25[32] = {
+ 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
+ 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xe0,
+ 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf148_5[32] = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
+ 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
+};
+
+static const struct hdmiphy_conf hdmiphy_conf[] = {
+ { V4L2_DV_480P59_94, hdmiphy_conf27 },
+ { V4L2_DV_1080P30, hdmiphy_conf74_175 },
+ { V4L2_DV_720P59_94, hdmiphy_conf74_175 },
+ { V4L2_DV_720P60, hdmiphy_conf74_25 },
+ { V4L2_DV_1080P50, hdmiphy_conf148_5 },
+ { V4L2_DV_1080P60, hdmiphy_conf148_5 },
+};
+
+const u8 *hdmiphy_preset2conf(u32 preset)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(hdmiphy_conf); ++i)
+ if (hdmiphy_conf[i].preset == preset)
+ return hdmiphy_conf[i].data;
+ return NULL;
+}
+
+static int hdmiphy_s_power(struct v4l2_subdev *sd, int on)
+{
+ /* to be implemented */
+ return 0;
+}
+
+static int hdmiphy_s_dv_preset(struct v4l2_subdev *sd,
+ struct v4l2_dv_preset *preset)
+{
+ const u8 *data;
+ u8 buffer[32];
+ int ret;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct device *dev = &client->dev;
+
+ dev_info(dev, "s_dv_preset(preset = %d)\n", preset->preset);
+ data = hdmiphy_preset2conf(preset->preset);
+ if (!data) {
+ dev_err(dev, "format not supported\n");
+ return -EINVAL;
+ }
+
+ /* storing configuration to the device */
+ memcpy(buffer, data, 32);
+ ret = i2c_master_send(client, buffer, 32);
+ if (ret != 32) {
+ dev_err(dev, "failed to configure HDMIPHY via I2C\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int hdmiphy_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct device *dev = &client->dev;
+ u8 buffer[2];
+ int ret;
+
+ dev_info(dev, "s_stream(%d)\n", enable);
+ /* going to/from configuration from/to operation mode */
+ buffer[0] = 0x1f;
+ buffer[1] = enable ? 0x80 : 0x00;
+
+ ret = i2c_master_send(client, buffer, 2);
+ if (ret != 2) {
+ dev_err(dev, "stream (%d) failed\n", enable);
+ return -EIO;
+ }
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops hdmiphy_core_ops = {
+ .s_power = hdmiphy_s_power,
+};
+
+static const struct v4l2_subdev_video_ops hdmiphy_video_ops = {
+ .s_dv_preset = hdmiphy_s_dv_preset,
+ .s_stream = hdmiphy_s_stream,
+};
+
+static const struct v4l2_subdev_ops hdmiphy_ops = {
+ .core = &hdmiphy_core_ops,
+ .video = &hdmiphy_video_ops,
+};
+
+static int __devinit hdmiphy_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ static struct v4l2_subdev sd;
+
+ v4l2_i2c_subdev_init(&sd, client, &hdmiphy_ops);
+ dev_info(&client->dev, "probe successful\n");
+ return 0;
+}
+
+static int __devexit hdmiphy_remove(struct i2c_client *client)
+{
+ dev_info(&client->dev, "remove successful\n");
+ return 0;
+}
+
+static const struct i2c_device_id hdmiphy_id[] = {
+ { "hdmiphy", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, hdmiphy_id);
+
+static struct i2c_driver hdmiphy_driver = {
+ .driver = {
+ .name = "s5p-hdmiphy",
+ .owner = THIS_MODULE,
+ },
+ .probe = hdmiphy_probe,
+ .remove = __devexit_p(hdmiphy_remove),
+ .id_table = hdmiphy_id,
+};
+
+static int __init hdmiphy_init(void)
+{
+ return i2c_add_driver(&hdmiphy_driver);
+}
+module_init(hdmiphy_init);
+
+static void __exit hdmiphy_exit(void)
+{
+ i2c_del_driver(&hdmiphy_driver);
+}
+module_exit(hdmiphy_exit);
diff --git a/drivers/media/video/s5p-tv/mixer.h b/drivers/media/video/s5p-tv/mixer.h
new file mode 100644
index 0000000..e224224
--- /dev/null
+++ b/drivers/media/video/s5p-tv/mixer.h
@@ -0,0 +1,354 @@
+/*
+ * Samsung TV Mixer driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#ifndef SAMSUNG_MIXER_H
+#define SAMSUNG_MIXER_H
+
+#ifdef CONFIG_VIDEO_SAMSUNG_S5P_MIXER_DEBUG
+ #define DEBUG
+#endif
+
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-core.h>
+
+#include "regs-mixer.h"
+
+/** maximum number of output interfaces */
+#define MXR_MAX_OUTPUTS 2
+/** maximum number of input interfaces (layers) */
+#define MXR_MAX_LAYERS 3
+#define MXR_DRIVER_NAME "s5p-mixer"
+/** maximal number of planes for every layer */
+#define MXR_MAX_PLANES 2
+
+#define MXR_ENABLE 1
+#define MXR_DISABLE 0
+
+/** description of a macroblock for packed formats */
+struct mxr_block {
+ /** vertical number of pixels in macroblock */
+ unsigned int width;
+ /** horizontal number of pixels in macroblock */
+ unsigned int height;
+ /** size of block in bytes */
+ unsigned int size;
+};
+
+/** description of supported format */
+struct mxr_format {
+ /** format name/mnemonic */
+ const char *name;
+ /** fourcc identifier */
+ u32 fourcc;
+ /** colorspace identifier */
+ enum v4l2_colorspace colorspace;
+ /** number of planes in image data */
+ int num_planes;
+ /** description of block for each plane */
+ struct mxr_block plane[MXR_MAX_PLANES];
+ /** number of subframes in image data */
+ int num_subframes;
+ /** specifies to which subframe belong given plane */
+ int plane2subframe[MXR_MAX_PLANES];
+ /** internal code, driver dependant */
+ unsigned long cookie;
+};
+
+/** description of crop configuration for image */
+struct mxr_crop {
+ /** width of layer in pixels */
+ unsigned int full_width;
+ /** height of layer in pixels */
+ unsigned int full_height;
+ /** horizontal offset of first pixel to be displayed */
+ unsigned int x_offset;
+ /** vertical offset of first pixel to be displayed */
+ unsigned int y_offset;
+ /** width of displayed data in pixels */
+ unsigned int width;
+ /** height of displayed data in pixels */
+ unsigned int height;
+ /** indicate which fields are present in buffer */
+ unsigned int field;
+};
+
+/** description of transformation from source to destination image */
+struct mxr_geometry {
+ /** cropping for source image */
+ struct mxr_crop src;
+ /** cropping for destination image */
+ struct mxr_crop dst;
+ /** layer-dependant description of horizontal scaling */
+ unsigned int x_ratio;
+ /** layer-dependant description of vertical scaling */
+ unsigned int y_ratio;
+};
+
+/** instance of a buffer */
+struct mxr_buffer {
+ /** common v4l buffer stuff -- must be first */
+ struct vb2_buffer vb;
+ /** node for layer's lists */
+ struct list_head list;
+};
+
+
+/** internal states of layer */
+enum mxr_layer_state {
+ /** layers is not shown */
+ MXR_LAYER_IDLE = 0,
+ /** state between STREAMON and hardware start */
+ MXR_LAYER_STREAMING_START,
+ /** layer is shown */
+ MXR_LAYER_STREAMING,
+ /** state before STREAMOFF is finished */
+ MXR_LAYER_STREAMING_FINISH,
+};
+
+/** forward declarations */
+struct mxr_device;
+struct mxr_layer;
+
+/** callback for layers operation */
+struct mxr_layer_ops {
+ /* TODO: try to port it to subdev API */
+ /** handler for resource release function */
+ void (*release)(struct mxr_layer *);
+ /** setting buffer to HW */
+ void (*buffer_set)(struct mxr_layer *, struct mxr_buffer *);
+ /** setting format and geometry in HW */
+ void (*format_set)(struct mxr_layer *);
+ /** streaming stop/start */
+ void (*stream_set)(struct mxr_layer *, int);
+ /** adjusting geometry */
+ void (*fix_geometry)(struct mxr_layer *);
+};
+
+/** layer instance, a single window and content displayed on output */
+struct mxr_layer {
+ /** parent mixer device */
+ struct mxr_device *mdev;
+ /** layer index (unique identifier) */
+ int idx;
+ /** callbacks for layer methods */
+ struct mxr_layer_ops ops;
+ /** format array */
+ const struct mxr_format **fmt_array;
+ /** size of format array */
+ unsigned long fmt_array_size;
+
+ /** lock for protection of list and state fields */
+ spinlock_t enq_slock;
+ /** list for enqueued buffers */
+ struct list_head enq_list;
+ /** buffer currently owned by hardware in temporary registers */
+ struct mxr_buffer *update_buf;
+ /** buffer currently owned by hardware in shadow registers */
+ struct mxr_buffer *shadow_buf;
+ /** state of layer IDLE/STREAMING */
+ enum mxr_layer_state state;
+
+ /** mutex for protection of fields below */
+ struct mutex mutex;
+ /** handler for video node */
+ struct video_device vfd;
+ /** queue for output buffers */
+ struct vb2_queue vb_queue;
+ /** current image format */
+ const struct mxr_format *fmt;
+ /** current geometry of image */
+ struct mxr_geometry geo;
+};
+
+/** description of mixers output interface */
+struct mxr_output {
+ /** name of output */
+ char name[32];
+ /** output subdev */
+ struct v4l2_subdev *sd;
+ /** cookie used for configuration of registers */
+ int cookie;
+};
+
+/** specify source of output subdevs */
+struct mxr_output_conf {
+ /** name of output (connector) */
+ char *output_name;
+ /** name of module that generates output subdev */
+ char *module_name;
+ /** cookie need for mixer HW */
+ int cookie;
+};
+
+struct clk;
+struct regulator;
+
+/** auxiliary resources used my mixer */
+struct mxr_resources {
+ /** interrupt index */
+ int irq;
+ /** pointer to Mixer registers */
+ void __iomem *mxr_regs;
+ /** pointer to Video Processor registers */
+ void __iomem *vp_regs;
+ /** other resources, should used under mxr_device.mutex */
+ struct clk *mixer;
+ struct clk *vp;
+ struct clk *sclk_mixer;
+ struct clk *sclk_hdmi;
+ struct clk *sclk_dac;
+};
+
+/* event flags used */
+enum mxr_devide_flags {
+ MXR_EVENT_VSYNC = 0,
+};
+
+/** drivers instance */
+struct mxr_device {
+ /** master device */
+ struct device *dev;
+ /** state of each layer */
+ struct mxr_layer *layer[MXR_MAX_LAYERS];
+ /** state of each output */
+ struct mxr_output *output[MXR_MAX_OUTPUTS];
+ /** number of registered outputs */
+ int output_cnt;
+
+ /* video resources */
+
+ /** V4L2 device */
+ struct v4l2_device v4l2_dev;
+ /** context of allocator */
+ void *alloc_ctx;
+ /** event wait queue */
+ wait_queue_head_t event_queue;
+ /** state flags */
+ unsigned long event_flags;
+
+ /** spinlock for protection of registers */
+ spinlock_t reg_slock;
+
+ /** mutex for protection of fields below */
+ struct mutex mutex;
+ /** number of entities depndant on output configuration */
+ int n_output;
+ /** number of users that do streaming */
+ int n_streamer;
+ /** index of current output */
+ int current_output;
+ /** auxiliary resources used my mixer */
+ struct mxr_resources res;
+};
+
+/** transform device structure into mixer device */
+static inline struct mxr_device *to_mdev(struct device *dev)
+{
+ struct v4l2_device *vdev = dev_get_drvdata(dev);
+ return container_of(vdev, struct mxr_device, v4l2_dev);
+}
+
+/** get current output data, should be called under mdev's mutex */
+static inline struct mxr_output *to_output(struct mxr_device *mdev)
+{
+ return mdev->output[mdev->current_output];
+}
+
+/** get current output subdev, should be called under mdev's mutex */
+static inline struct v4l2_subdev *to_outsd(struct mxr_device *mdev)
+{
+ struct mxr_output *out = to_output(mdev);
+ return out ? out->sd : NULL;
+}
+
+/** forward declaration for mixer platform data */
+struct mxr_platform_data;
+
+/** acquiring common video resources */
+int __devinit mxr_acquire_video(struct mxr_device *mdev,
+ struct mxr_output_conf *output_cont, int output_count);
+
+/** releasing common video resources */
+void __devexit mxr_release_video(struct mxr_device *mdev);
+
+struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx);
+struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx);
+struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
+ int idx, char *name, struct mxr_layer_ops *ops);
+
+void mxr_base_layer_release(struct mxr_layer *layer);
+void mxr_layer_release(struct mxr_layer *layer);
+
+int mxr_base_layer_register(struct mxr_layer *layer);
+void mxr_base_layer_unregister(struct mxr_layer *layer);
+
+unsigned long mxr_get_plane_size(const struct mxr_block *blk,
+ unsigned int width, unsigned int height);
+
+/** adds new consumer for mixer's power */
+int __must_check mxr_power_get(struct mxr_device *mdev);
+/** removes consumer for mixer's power */
+void mxr_power_put(struct mxr_device *mdev);
+/** add new client for output configuration */
+void mxr_output_get(struct mxr_device *mdev);
+/** removes new client for output configuration */
+void mxr_output_put(struct mxr_device *mdev);
+/** add new client for streaming */
+void mxr_streamer_get(struct mxr_device *mdev);
+/** removes new client for streaming */
+void mxr_streamer_put(struct mxr_device *mdev);
+/** returns format of data delivared to current output */
+void mxr_get_mbus_fmt(struct mxr_device *mdev,
+ struct v4l2_mbus_framefmt *mbus_fmt);
+
+/* Debug */
+
+#define mxr_err(mdev, fmt, ...) dev_err(mdev->dev, fmt, ##__VA_ARGS__)
+#define mxr_warn(mdev, fmt, ...) dev_warn(mdev->dev, fmt, ##__VA_ARGS__)
+#define mxr_info(mdev, fmt, ...) dev_info(mdev->dev, fmt, ##__VA_ARGS__)
+
+#ifdef CONFIG_VIDEO_SAMSUNG_S5P_MIXER_DEBUG
+ #define mxr_dbg(mdev, fmt, ...) dev_dbg(mdev->dev, fmt, ##__VA_ARGS__)
+#else
+ #define mxr_dbg(mdev, fmt, ...) do { (void) mdev; } while (0)
+#endif
+
+/* accessing Mixer's and Video Processor's registers */
+
+void mxr_vsync_set_update(struct mxr_device *mdev, int en);
+void mxr_reg_reset(struct mxr_device *mdev);
+irqreturn_t mxr_irq_handler(int irq, void *dev_data);
+void mxr_reg_s_output(struct mxr_device *mdev, int cookie);
+void mxr_reg_streamon(struct mxr_device *mdev);
+void mxr_reg_streamoff(struct mxr_device *mdev);
+int mxr_reg_wait4vsync(struct mxr_device *mdev);
+void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
+ struct v4l2_mbus_framefmt *fmt);
+void mxr_reg_graph_layer_stream(struct mxr_device *mdev, int idx, int en);
+void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr);
+void mxr_reg_graph_format(struct mxr_device *mdev, int idx,
+ const struct mxr_format *fmt, const struct mxr_geometry *geo);
+
+void mxr_reg_vp_layer_stream(struct mxr_device *mdev, int en);
+void mxr_reg_vp_buffer(struct mxr_device *mdev,
+ dma_addr_t luma_addr[2], dma_addr_t chroma_addr[2]);
+void mxr_reg_vp_format(struct mxr_device *mdev,
+ const struct mxr_format *fmt, const struct mxr_geometry *geo);
+void mxr_reg_dump(struct mxr_device *mdev);
+
+#endif /* SAMSUNG_MIXER_H */
+
diff --git a/drivers/media/video/s5p-tv/mixer_drv.c b/drivers/media/video/s5p-tv/mixer_drv.c
new file mode 100644
index 0000000..0064309
--- /dev/null
+++ b/drivers/media/video/s5p-tv/mixer_drv.c
@@ -0,0 +1,487 @@
+/*
+ * Samsung TV Mixer driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#include "mixer.h"
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/fb.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+
+MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>");
+MODULE_DESCRIPTION("Samsung MIXER");
+MODULE_LICENSE("GPL");
+
+/* --------- DRIVER PARAMETERS ---------- */
+
+static struct mxr_output_conf mxr_output_conf[] = {
+ {
+ .output_name = "S5P HDMI connector",
+ .module_name = "s5p-hdmi",
+ .cookie = 1,
+ },
+ {
+ .output_name = "S5P SDO connector",
+ .module_name = "s5p-sdo",
+ .cookie = 0,
+ },
+};
+
+void mxr_get_mbus_fmt(struct mxr_device *mdev,
+ struct v4l2_mbus_framefmt *mbus_fmt)
+{
+ struct v4l2_subdev *sd;
+ int ret;
+
+ mutex_lock(&mdev->mutex);
+ sd = to_outsd(mdev);
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, mbus_fmt);
+ WARN(ret, "failed to get mbus_fmt for output %s\n", sd->name);
+ mutex_unlock(&mdev->mutex);
+}
+
+void mxr_streamer_get(struct mxr_device *mdev)
+{
+ mutex_lock(&mdev->mutex);
+ ++mdev->n_streamer;
+ mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_streamer);
+ if (mdev->n_streamer == 1) {
+ struct v4l2_subdev *sd = to_outsd(mdev);
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct mxr_resources *res = &mdev->res;
+ int ret;
+
+ if (to_output(mdev)->cookie == 0)
+ clk_set_parent(res->sclk_mixer, res->sclk_dac);
+ else
+ clk_set_parent(res->sclk_mixer, res->sclk_hdmi);
+ mxr_reg_s_output(mdev, to_output(mdev)->cookie);
+
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mbus_fmt);
+ WARN(ret, "failed to get mbus_fmt for output %s\n", sd->name);
+ ret = v4l2_subdev_call(sd, video, s_stream, 1);
+ WARN(ret, "starting stream failed for output %s\n", sd->name);
+
+ mxr_reg_set_mbus_fmt(mdev, &mbus_fmt);
+ mxr_reg_streamon(mdev);
+ ret = mxr_reg_wait4vsync(mdev);
+ WARN(ret, "failed to get vsync (%d) from output\n", ret);
+ }
+ mutex_unlock(&mdev->mutex);
+ mxr_reg_dump(mdev);
+ /* FIXME: what to do when streaming fails? */
+}
+
+void mxr_streamer_put(struct mxr_device *mdev)
+{
+ mutex_lock(&mdev->mutex);
+ --mdev->n_streamer;
+ mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_streamer);
+ if (mdev->n_streamer == 0) {
+ int ret;
+ struct v4l2_subdev *sd = to_outsd(mdev);
+
+ mxr_reg_streamoff(mdev);
+ /* vsync applies Mixer setup */
+ ret = mxr_reg_wait4vsync(mdev);
+ WARN(ret, "failed to get vsync (%d) from output\n", ret);
+ ret = v4l2_subdev_call(sd, video, s_stream, 0);
+ WARN(ret, "stopping stream failed for output %s\n", sd->name);
+ }
+ WARN(mdev->n_streamer < 0, "negative number of streamers (%d)\n",
+ mdev->n_streamer);
+ mutex_unlock(&mdev->mutex);
+ mxr_reg_dump(mdev);
+}
+
+void mxr_output_get(struct mxr_device *mdev)
+{
+ mutex_lock(&mdev->mutex);
+ ++mdev->n_output;
+ mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_output);
+ /* turn on auxiliary driver */
+ if (mdev->n_output == 1)
+ v4l2_subdev_call(to_outsd(mdev), core, s_power, 1);
+ mutex_unlock(&mdev->mutex);
+}
+
+void mxr_output_put(struct mxr_device *mdev)
+{
+ mutex_lock(&mdev->mutex);
+ --mdev->n_output;
+ mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_output);
+ /* turn on auxiliary driver */
+ if (mdev->n_output == 0)
+ v4l2_subdev_call(to_outsd(mdev), core, s_power, 0);
+ WARN(mdev->n_output < 0, "negative number of output users (%d)\n",
+ mdev->n_output);
+ mutex_unlock(&mdev->mutex);
+}
+
+int mxr_power_get(struct mxr_device *mdev)
+{
+ int ret = pm_runtime_get_sync(mdev->dev);
+
+ /* returning 1 means that power is already enabled,
+ * so zero success be returned */
+ if (IS_ERR_VALUE(ret))
+ return ret;
+ return 0;
+}
+
+void mxr_power_put(struct mxr_device *mdev)
+{
+ pm_runtime_put_sync(mdev->dev);
+}
+
+/* --------- RESOURCE MANAGEMENT -------------*/
+
+static int __devinit mxr_acquire_plat_resources(struct mxr_device *mdev,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mxr");
+ if (res == NULL) {
+ mxr_err(mdev, "get memory resource failed.\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ mdev->res.mxr_regs = ioremap(res->start, resource_size(res));
+ if (mdev->res.mxr_regs == NULL) {
+ mxr_err(mdev, "register mapping failed.\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vp");
+ if (res == NULL) {
+ mxr_err(mdev, "get memory resource failed.\n");
+ ret = -ENXIO;
+ goto fail_mxr_regs;
+ }
+
+ mdev->res.vp_regs = ioremap(res->start, resource_size(res));
+ if (mdev->res.vp_regs == NULL) {
+ mxr_err(mdev, "register mapping failed.\n");
+ ret = -ENXIO;
+ goto fail_mxr_regs;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq");
+ if (res == NULL) {
+ mxr_err(mdev, "get interrupt resource failed.\n");
+ ret = -ENXIO;
+ goto fail_vp_regs;
+ }
+
+ ret = request_irq(res->start, mxr_irq_handler, 0, "s5p-mixer", mdev);
+ if (ret) {
+ mxr_err(mdev, "request interrupt failed.\n");
+ goto fail_vp_regs;
+ }
+ mdev->res.irq = res->start;
+
+ return 0;
+
+fail_vp_regs:
+ iounmap(mdev->res.vp_regs);
+
+fail_mxr_regs:
+ iounmap(mdev->res.mxr_regs);
+
+fail:
+ return ret;
+}
+
+static void mxr_release_plat_resources(struct mxr_device *mdev)
+{
+ free_irq(mdev->res.irq, mdev);
+ iounmap(mdev->res.vp_regs);
+ iounmap(mdev->res.mxr_regs);
+}
+
+static void mxr_release_clocks(struct mxr_device *mdev)
+{
+ struct mxr_resources *res = &mdev->res;
+
+ if (!IS_ERR_OR_NULL(res->sclk_dac))
+ clk_put(res->sclk_dac);
+ if (!IS_ERR_OR_NULL(res->sclk_hdmi))
+ clk_put(res->sclk_hdmi);
+ if (!IS_ERR_OR_NULL(res->sclk_mixer))
+ clk_put(res->sclk_mixer);
+ if (!IS_ERR_OR_NULL(res->vp))
+ clk_put(res->vp);
+ if (!IS_ERR_OR_NULL(res->mixer))
+ clk_put(res->mixer);
+}
+
+static int mxr_acquire_clocks(struct mxr_device *mdev)
+{
+ struct mxr_resources *res = &mdev->res;
+ struct device *dev = mdev->dev;
+
+ res->mixer = clk_get(dev, "mixer");
+ if (IS_ERR_OR_NULL(res->mixer)) {
+ mxr_err(mdev, "failed to get clock 'mixer'\n");
+ goto fail;
+ }
+ res->vp = clk_get(dev, "vp");
+ if (IS_ERR_OR_NULL(res->vp)) {
+ mxr_err(mdev, "failed to get clock 'vp'\n");
+ goto fail;
+ }
+ res->sclk_mixer = clk_get(dev, "sclk_mixer");
+ if (IS_ERR_OR_NULL(res->sclk_mixer)) {
+ mxr_err(mdev, "failed to get clock 'sclk_mixer'\n");
+ goto fail;
+ }
+ res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+ if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
+ mxr_err(mdev, "failed to get clock 'sclk_hdmi'\n");
+ goto fail;
+ }
+ res->sclk_dac = clk_get(dev, "sclk_dac");
+ if (IS_ERR_OR_NULL(res->sclk_dac)) {
+ mxr_err(mdev, "failed to get clock 'sclk_dac'\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ mxr_release_clocks(mdev);
+ return -ENODEV;
+}
+
+static int __devinit mxr_acquire_resources(struct mxr_device *mdev,
+ struct platform_device *pdev)
+{
+ int ret;
+ ret = mxr_acquire_plat_resources(mdev, pdev);
+
+ if (ret)
+ goto fail;
+
+ ret = mxr_acquire_clocks(mdev);
+ if (ret)
+ goto fail_plat;
+
+ mxr_info(mdev, "resources acquired\n");
+ return 0;
+
+fail_plat:
+ mxr_release_plat_resources(mdev);
+fail:
+ mxr_err(mdev, "resources acquire failed\n");
+ return ret;
+}
+
+static void mxr_release_resources(struct mxr_device *mdev)
+{
+ mxr_release_clocks(mdev);
+ mxr_release_plat_resources(mdev);
+ memset(&mdev->res, 0, sizeof mdev->res);
+}
+
+static void mxr_release_layers(struct mxr_device *mdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mdev->layer); ++i)
+ if (mdev->layer[i])
+ mxr_layer_release(mdev->layer[i]);
+}
+
+static int __devinit mxr_acquire_layers(struct mxr_device *mdev,
+ struct mxr_platform_data *pdata)
+{
+ mdev->layer[0] = mxr_graph_layer_create(mdev, 0);
+ mdev->layer[1] = mxr_graph_layer_create(mdev, 1);
+ mdev->layer[2] = mxr_vp_layer_create(mdev, 0);
+
+ if (!mdev->layer[0] || !mdev->layer[1] || !mdev->layer[2]) {
+ mxr_err(mdev, "failed to acquire layers\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ mxr_release_layers(mdev);
+ return -ENODEV;
+}
+
+/* ---------- POWER MANAGEMENT ----------- */
+
+static int mxr_runtime_resume(struct device *dev)
+{
+ struct mxr_device *mdev = to_mdev(dev);
+ struct mxr_resources *res = &mdev->res;
+
+ mxr_dbg(mdev, "resume - start\n");
+ mutex_lock(&mdev->mutex);
+ /* turn clocks on */
+ clk_enable(res->mixer);
+ clk_enable(res->vp);
+ clk_enable(res->sclk_mixer);
+ /* apply default configuration */
+ mxr_reg_reset(mdev);
+ mxr_dbg(mdev, "resume - finished\n");
+
+ mutex_unlock(&mdev->mutex);
+ return 0;
+}
+
+static int mxr_runtime_suspend(struct device *dev)
+{
+ struct mxr_device *mdev = to_mdev(dev);
+ struct mxr_resources *res = &mdev->res;
+ mxr_dbg(mdev, "suspend - start\n");
+ mutex_lock(&mdev->mutex);
+ /* turn clocks off */
+ clk_disable(res->sclk_mixer);
+ clk_disable(res->vp);
+ clk_disable(res->mixer);
+ mutex_unlock(&mdev->mutex);
+ mxr_dbg(mdev, "suspend - finished\n");
+ return 0;
+}
+
+static const struct dev_pm_ops mxr_pm_ops = {
+ .runtime_suspend = mxr_runtime_suspend,
+ .runtime_resume = mxr_runtime_resume,
+};
+
+/* --------- DRIVER INITIALIZATION ---------- */
+
+static int __devinit mxr_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mxr_platform_data *pdata = dev->platform_data;
+ struct mxr_device *mdev;
+ int ret;
+
+ /* mdev does not exist yet so no mxr_dbg is used */
+ dev_info(dev, "probe start\n");
+
+ mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
+ if (!mdev) {
+ mxr_err(mdev, "not enough memory.\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /* setup pointer to master device */
+ mdev->dev = dev;
+
+ mutex_init(&mdev->mutex);
+ spin_lock_init(&mdev->reg_slock);
+ init_waitqueue_head(&mdev->event_queue);
+
+ /* acquire resources: regs, irqs, clocks, regulators */
+ ret = mxr_acquire_resources(mdev, pdev);
+ if (ret)
+ goto fail_mem;
+
+ /* configure resources for video output */
+ ret = mxr_acquire_video(mdev, mxr_output_conf,
+ ARRAY_SIZE(mxr_output_conf));
+ if (ret)
+ goto fail_resources;
+
+ /* configure layers */
+ ret = mxr_acquire_layers(mdev, pdata);
+ if (ret)
+ goto fail_video;
+
+ pm_runtime_enable(dev);
+
+ mxr_info(mdev, "probe successful\n");
+ return 0;
+
+fail_video:
+ mxr_release_video(mdev);
+
+fail_resources:
+ mxr_release_resources(mdev);
+
+fail_mem:
+ kfree(mdev);
+
+fail:
+ dev_info(dev, "probe failed\n");
+ return ret;
+}
+
+static int __devexit mxr_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mxr_device *mdev = to_mdev(dev);
+
+ pm_runtime_disable(dev);
+
+ mxr_release_layers(mdev);
+ mxr_release_video(mdev);
+ mxr_release_resources(mdev);
+
+ kfree(mdev);
+
+ dev_info(dev, "remove sucessful\n");
+ return 0;
+}
+
+static struct platform_driver mxr_driver __refdata = {
+ .probe = mxr_probe,
+ .remove = __devexit_p(mxr_remove),
+ .driver = {
+ .name = MXR_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &mxr_pm_ops,
+ }
+};
+
+static int __init mxr_init(void)
+{
+ int i, ret;
+ static const char banner[] __initdata = KERN_INFO
+ "Samsung TV Mixer driver, "
+ "(c) 2010-2011 Samsung Electronics Co., Ltd.\n";
+ printk(banner);
+
+ /* Loading auxiliary modules */
+ for (i = 0; i < ARRAY_SIZE(mxr_output_conf); ++i)
+ request_module(mxr_output_conf[i].module_name);
+
+ ret = platform_driver_register(&mxr_driver);
+ if (ret != 0) {
+ printk(KERN_ERR "registration of MIXER driver failed\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+module_init(mxr_init);
+
+static void __exit mxr_exit(void)
+{
+ platform_driver_unregister(&mxr_driver);
+}
+module_exit(mxr_exit);
diff --git a/drivers/media/video/s5p-tv/mixer_grp_layer.c b/drivers/media/video/s5p-tv/mixer_grp_layer.c
new file mode 100644
index 0000000..58f0ba4
--- /dev/null
+++ b/drivers/media/video/s5p-tv/mixer_grp_layer.c
@@ -0,0 +1,185 @@
+/*
+ * Samsung TV Mixer driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#include "mixer.h"
+
+#include <media/videobuf2-dma-contig.h>
+
+/* FORMAT DEFINITIONS */
+
+static const struct mxr_format mxr_fb_fmt_rgb565 = {
+ .name = "RGB565",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .num_planes = 1,
+ .plane = {
+ { .width = 1, .height = 1, .size = 2 },
+ },
+ .num_subframes = 1,
+ .cookie = 4,
+};
+
+static const struct mxr_format mxr_fb_fmt_argb1555 = {
+ .name = "ARGB1555",
+ .num_planes = 1,
+ .fourcc = V4L2_PIX_FMT_RGB555,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .plane = {
+ { .width = 1, .height = 1, .size = 2 },
+ },
+ .num_subframes = 1,
+ .cookie = 5,
+};
+
+static const struct mxr_format mxr_fb_fmt_argb4444 = {
+ .name = "ARGB4444",
+ .num_planes = 1,
+ .fourcc = V4L2_PIX_FMT_RGB444,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .plane = {
+ { .width = 1, .height = 1, .size = 2 },
+ },
+ .num_subframes = 1,
+ .cookie = 6,
+};
+
+static const struct mxr_format mxr_fb_fmt_argb8888 = {
+ .name = "ARGB8888",
+ .fourcc = V4L2_PIX_FMT_BGR32,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .num_planes = 1,
+ .plane = {
+ { .width = 1, .height = 1, .size = 4 },
+ },
+ .num_subframes = 1,
+ .cookie = 7,
+};
+
+static const struct mxr_format *mxr_graph_format[] = {
+ &mxr_fb_fmt_rgb565,
+ &mxr_fb_fmt_argb1555,
+ &mxr_fb_fmt_argb4444,
+ &mxr_fb_fmt_argb8888,
+};
+
+/* AUXILIARY CALLBACKS */
+
+static void mxr_graph_layer_release(struct mxr_layer *layer)
+{
+ mxr_base_layer_unregister(layer);
+ mxr_base_layer_release(layer);
+}
+
+static void mxr_graph_buffer_set(struct mxr_layer *layer,
+ struct mxr_buffer *buf)
+{
+ dma_addr_t addr = 0;
+
+ if (buf)
+ addr = vb2_dma_contig_plane_paddr(&buf->vb, 0);
+ mxr_reg_graph_buffer(layer->mdev, layer->idx, addr);
+}
+
+static void mxr_graph_stream_set(struct mxr_layer *layer, int en)
+{
+ mxr_reg_graph_layer_stream(layer->mdev, layer->idx, en);
+}
+
+static void mxr_graph_format_set(struct mxr_layer *layer)
+{
+ mxr_reg_graph_format(layer->mdev, layer->idx,
+ layer->fmt, &layer->geo);
+}
+
+static void mxr_graph_fix_geometry(struct mxr_layer *layer)
+{
+ struct mxr_geometry *geo = &layer->geo;
+
+ /* limit to boundary size */
+ geo->src.full_width = clamp_val(geo->src.full_width, 1, 32767);
+ geo->src.full_height = clamp_val(geo->src.full_height, 1, 2047);
+ geo->src.width = clamp_val(geo->src.width, 1, geo->src.full_width);
+ geo->src.width = min(geo->src.width, 2047U);
+ /* not possible to crop of Y axis */
+ geo->src.y_offset = min(geo->src.y_offset, geo->src.full_height - 1);
+ geo->src.height = geo->src.full_height - geo->src.y_offset;
+ /* limitting offset */
+ geo->src.x_offset = min(geo->src.x_offset,
+ geo->src.full_width - geo->src.width);
+
+ /* setting position in output */
+ geo->dst.width = min(geo->dst.width, geo->dst.full_width);
+ geo->dst.height = min(geo->dst.height, geo->dst.full_height);
+
+ /* Mixer supports only 1x and 2x scaling */
+ if (geo->dst.width >= 2 * geo->src.width) {
+ geo->x_ratio = 1;
+ geo->dst.width = 2 * geo->src.width;
+ } else {
+ geo->x_ratio = 0;
+ geo->dst.width = geo->src.width;
+ }
+
+ if (geo->dst.height >= 2 * geo->src.height) {
+ geo->y_ratio = 1;
+ geo->dst.height = 2 * geo->src.height;
+ } else {
+ geo->y_ratio = 0;
+ geo->dst.height = geo->src.height;
+ }
+
+ geo->dst.x_offset = min(geo->dst.x_offset,
+ geo->dst.full_width - geo->dst.width);
+ geo->dst.y_offset = min(geo->dst.y_offset,
+ geo->dst.full_height - geo->dst.height);
+}
+
+/* PUBLIC API */
+
+struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
+{
+ struct mxr_layer *layer;
+ int ret;
+ struct mxr_layer_ops ops = {
+ .release = mxr_graph_layer_release,
+ .buffer_set = mxr_graph_buffer_set,
+ .stream_set = mxr_graph_stream_set,
+ .format_set = mxr_graph_format_set,
+ .fix_geometry = mxr_graph_fix_geometry,
+ };
+ char name[32];
+
+ sprintf(name, "graph%d", idx);
+
+ layer = mxr_base_layer_create(mdev, idx, name, &ops);
+ if (layer == NULL) {
+ mxr_err(mdev, "failed to initialize layer(%d) base\n", idx);
+ goto fail;
+ }
+
+ layer->fmt_array = mxr_graph_format;
+ layer->fmt_array_size = ARRAY_SIZE(mxr_graph_format);
+
+ ret = mxr_base_layer_register(layer);
+ if (ret)
+ goto fail_layer;
+
+ return layer;
+
+fail_layer:
+ mxr_base_layer_release(layer);
+
+fail:
+ return NULL;
+}
+
diff --git a/drivers/media/video/s5p-tv/mixer_reg.c b/drivers/media/video/s5p-tv/mixer_reg.c
new file mode 100644
index 0000000..38dac67
--- /dev/null
+++ b/drivers/media/video/s5p-tv/mixer_reg.c
@@ -0,0 +1,541 @@
+/*
+ * Samsung TV Mixer driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#include "mixer.h"
+#include "regs-mixer.h"
+#include "regs-vp.h"
+
+#include <linux/delay.h>
+
+/* Register access subroutines */
+
+static inline u32 vp_read(struct mxr_device *mdev, u32 reg_id)
+{
+ return readl(mdev->res.vp_regs + reg_id);
+}
+
+static inline void vp_write(struct mxr_device *mdev, u32 reg_id, u32 val)
+{
+ writel(val, mdev->res.vp_regs + reg_id);
+}
+
+static inline void vp_write_mask(struct mxr_device *mdev, u32 reg_id,
+ u32 val, u32 mask)
+{
+ u32 old = vp_read(mdev, reg_id);
+
+ val = (val & mask) | (old & ~mask);
+ writel(val, mdev->res.vp_regs + reg_id);
+}
+
+static inline u32 mxr_read(struct mxr_device *mdev, u32 reg_id)
+{
+ return readl(mdev->res.mxr_regs + reg_id);
+}
+
+static inline void mxr_write(struct mxr_device *mdev, u32 reg_id, u32 val)
+{
+ writel(val, mdev->res.mxr_regs + reg_id);
+}
+
+static inline void mxr_write_mask(struct mxr_device *mdev, u32 reg_id,
+ u32 val, u32 mask)
+{
+ u32 old = mxr_read(mdev, reg_id);
+
+ val = (val & mask) | (old & ~mask);
+ writel(val, mdev->res.mxr_regs + reg_id);
+}
+
+void mxr_vsync_set_update(struct mxr_device *mdev, int en)
+{
+ /* block update on vsync */
+ mxr_write_mask(mdev, MXR_STATUS, en ? MXR_STATUS_SYNC_ENABLE : 0,
+ MXR_STATUS_SYNC_ENABLE);
+ vp_write(mdev, VP_SHADOW_UPDATE, en ? VP_SHADOW_UPDATE_ENABLE : 0);
+}
+
+static void __mxr_reg_vp_reset(struct mxr_device *mdev)
+{
+ int tries = 100;
+
+ vp_write(mdev, VP_SRESET, VP_SRESET_PROCESSING);
+ for (tries = 100; tries; --tries) {
+ /* waiting until VP_SRESET_PROCESSING is 0 */
+ if (~vp_read(mdev, VP_SRESET) & VP_SRESET_PROCESSING)
+ break;
+ mdelay(10);
+ }
+ WARN(tries == 0, "failed to reset Video Processor\n");
+}
+
+static void mxr_reg_vp_default_filter(struct mxr_device *mdev);
+
+void mxr_reg_reset(struct mxr_device *mdev)
+{
+ unsigned long flags;
+ u32 val; /* value stored to register */
+
+ spin_lock_irqsave(&mdev->reg_slock, flags);
+ mxr_vsync_set_update(mdev, MXR_DISABLE);
+
+ /* set output in RGB888 mode */
+ mxr_write(mdev, MXR_CFG, MXR_CFG_OUT_YUV444);
+
+ /* 16 beat burst in DMA */
+ mxr_write_mask(mdev, MXR_STATUS, MXR_STATUS_16_BURST,
+ MXR_STATUS_BURST_MASK);
+
+ /* setting default layer priority: layer1 > video > layer0
+ * because typical usage scenario would be
+ * layer0 - framebuffer
+ * video - video overlay
+ * layer1 - OSD
+ */
+ val = MXR_LAYER_CFG_GRP0_VAL(1);
+ val |= MXR_LAYER_CFG_VP_VAL(2);
+ val |= MXR_LAYER_CFG_GRP1_VAL(3);
+ mxr_write(mdev, MXR_LAYER_CFG, val);
+
+ /* use dark gray background color */
+ mxr_write(mdev, MXR_BG_COLOR0, 0x808080);
+ mxr_write(mdev, MXR_BG_COLOR1, 0x808080);
+ mxr_write(mdev, MXR_BG_COLOR2, 0x808080);
+
+ /* setting graphical layers */
+
+ val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
+ val |= MXR_GRP_CFG_BLEND_PRE_MUL; /* premul mode */
+ val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
+
+ /* the same configuration for both layers */
+ mxr_write(mdev, MXR_GRAPHIC_CFG(0), val);
+ mxr_write(mdev, MXR_GRAPHIC_CFG(1), val);
+
+ /* configuration of Video Processor Registers */
+ __mxr_reg_vp_reset(mdev);
+ mxr_reg_vp_default_filter(mdev);
+
+ /* enable all interrupts */
+ mxr_write_mask(mdev, MXR_INT_EN, ~0, MXR_INT_EN_ALL);
+
+ mxr_vsync_set_update(mdev, MXR_ENABLE);
+ spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+void mxr_reg_graph_format(struct mxr_device *mdev, int idx,
+ const struct mxr_format *fmt, const struct mxr_geometry *geo)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->reg_slock, flags);
+ mxr_vsync_set_update(mdev, MXR_DISABLE);
+
+ /* setup format */
+ mxr_write_mask(mdev, MXR_GRAPHIC_CFG(idx),
+ MXR_GRP_CFG_FORMAT_VAL(fmt->cookie), MXR_GRP_CFG_FORMAT_MASK);
+
+ /* setup geometry */
+ mxr_write(mdev, MXR_GRAPHIC_SPAN(idx), geo->src.full_width);
+ val = MXR_GRP_WH_WIDTH(geo->src.width);
+ val |= MXR_GRP_WH_HEIGHT(geo->src.height);
+ val |= MXR_GRP_WH_H_SCALE(geo->x_ratio);
+ val |= MXR_GRP_WH_V_SCALE(geo->y_ratio);
+ mxr_write(mdev, MXR_GRAPHIC_WH(idx), val);
+
+ /* setup offsets in source image */
+ val = MXR_GRP_SXY_SX(geo->src.x_offset);
+ val |= MXR_GRP_SXY_SY(geo->src.y_offset);
+ mxr_write(mdev, MXR_GRAPHIC_SXY(idx), val);
+
+ /* setup offsets in display image */
+ val = MXR_GRP_DXY_DX(geo->dst.x_offset);
+ val |= MXR_GRP_DXY_DY(geo->dst.y_offset);
+ mxr_write(mdev, MXR_GRAPHIC_DXY(idx), val);
+
+ mxr_vsync_set_update(mdev, MXR_ENABLE);
+ spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+void mxr_reg_vp_format(struct mxr_device *mdev,
+ const struct mxr_format *fmt, const struct mxr_geometry *geo)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->reg_slock, flags);
+ mxr_vsync_set_update(mdev, MXR_DISABLE);
+
+ vp_write_mask(mdev, VP_MODE, fmt->cookie, VP_MODE_FMT_MASK);
+
+ /* setting size of input image */
+ vp_write(mdev, VP_IMG_SIZE_Y, VP_IMG_HSIZE(geo->src.full_width) |
+ VP_IMG_VSIZE(geo->src.full_height));
+ /* chroma height has to reduced by 2 to avoid chroma distorions */
+ vp_write(mdev, VP_IMG_SIZE_C, VP_IMG_HSIZE(geo->src.full_width) |
+ VP_IMG_VSIZE(geo->src.full_height / 2));
+
+ vp_write(mdev, VP_SRC_WIDTH, geo->src.width);
+ vp_write(mdev, VP_SRC_HEIGHT, geo->src.height);
+ vp_write(mdev, VP_SRC_H_POSITION,
+ VP_SRC_H_POSITION_VAL(geo->src.x_offset));
+ vp_write(mdev, VP_SRC_V_POSITION, geo->src.y_offset);
+
+ vp_write(mdev, VP_DST_WIDTH, geo->dst.width);
+ vp_write(mdev, VP_DST_H_POSITION, geo->dst.x_offset);
+ if (geo->dst.field == V4L2_FIELD_INTERLACED) {
+ vp_write(mdev, VP_DST_HEIGHT, geo->dst.height / 2);
+ vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset / 2);
+ } else {
+ vp_write(mdev, VP_DST_HEIGHT, geo->dst.height);
+ vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset);
+ }
+
+ vp_write(mdev, VP_H_RATIO, geo->x_ratio);
+ vp_write(mdev, VP_V_RATIO, geo->y_ratio);
+
+ vp_write(mdev, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
+
+ mxr_vsync_set_update(mdev, MXR_ENABLE);
+ spin_unlock_irqrestore(&mdev->reg_slock, flags);
+
+}
+
+void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr)
+{
+ u32 val = addr ? ~0 : 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->reg_slock, flags);
+ mxr_vsync_set_update(mdev, MXR_DISABLE);
+
+ if (idx == 0)
+ mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
+ else
+ mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
+ mxr_write(mdev, MXR_GRAPHIC_BASE(idx), addr);
+
+ mxr_vsync_set_update(mdev, MXR_ENABLE);
+ spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+void mxr_reg_vp_buffer(struct mxr_device *mdev,
+ dma_addr_t luma_addr[2], dma_addr_t chroma_addr[2])
+{
+ u32 val = luma_addr[0] ? ~0 : 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->reg_slock, flags);
+ mxr_vsync_set_update(mdev, MXR_DISABLE);
+
+ mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_VP_ENABLE);
+ vp_write_mask(mdev, VP_ENABLE, val, VP_ENABLE_ON);
+ /* TODO: fix tiled mode */
+ vp_write(mdev, VP_TOP_Y_PTR, luma_addr[0]);
+ vp_write(mdev, VP_TOP_C_PTR, chroma_addr[0]);
+ vp_write(mdev, VP_BOT_Y_PTR, luma_addr[1]);
+ vp_write(mdev, VP_BOT_C_PTR, chroma_addr[1]);
+
+ mxr_vsync_set_update(mdev, MXR_ENABLE);
+ spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+static void mxr_irq_layer_handle(struct mxr_layer *layer)
+{
+ struct list_head *head = &layer->enq_list;
+ struct mxr_buffer *done;
+
+ /* skip non-existing layer */
+ if (layer == NULL)
+ return;
+
+ spin_lock(&layer->enq_slock);
+ if (layer->state == MXR_LAYER_IDLE)
+ goto done;
+
+ done = layer->shadow_buf;
+ layer->shadow_buf = layer->update_buf;
+
+ if (list_empty(head)) {
+ if (layer->state != MXR_LAYER_STREAMING)
+ layer->update_buf = NULL;
+ } else {
+ struct mxr_buffer *next;
+ next = list_first_entry(head, struct mxr_buffer, list);
+ list_del(&next->list);
+ layer->update_buf = next;
+ }
+
+ layer->ops.buffer_set(layer, layer->update_buf);
+
+ if (done && done != layer->shadow_buf)
+ vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
+
+done:
+ spin_unlock(&layer->enq_slock);
+}
+
+irqreturn_t mxr_irq_handler(int irq, void *dev_data)
+{
+ struct mxr_device *mdev = dev_data;
+ u32 i, val;
+
+ spin_lock(&mdev->reg_slock);
+ val = mxr_read(mdev, MXR_INT_STATUS);
+
+ /* wake up process waiting for VSYNC */
+ if (val & MXR_INT_STATUS_VSYNC) {
+ set_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
+ wake_up(&mdev->event_queue);
+ }
+
+ /* clear interrupts */
+ if (~val & MXR_INT_EN_VSYNC) {
+ /* vsync interrupt use different bit for read and clear */
+ val &= ~MXR_INT_EN_VSYNC;
+ val |= MXR_INT_CLEAR_VSYNC;
+ }
+ mxr_write(mdev, MXR_INT_STATUS, val);
+
+ spin_unlock(&mdev->reg_slock);
+ /* leave on non-vsync event */
+ if (~val & MXR_INT_CLEAR_VSYNC)
+ return IRQ_HANDLED;
+ for (i = 0; i < MXR_MAX_LAYERS; ++i)
+ mxr_irq_layer_handle(mdev->layer[i]);
+ return IRQ_HANDLED;
+}
+
+void mxr_reg_s_output(struct mxr_device *mdev, int cookie)
+{
+ u32 val;
+
+ val = cookie == 0 ? MXR_CFG_DST_SDO : MXR_CFG_DST_HDMI;
+ mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_DST_MASK);
+}
+
+void mxr_reg_streamon(struct mxr_device *mdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->reg_slock, flags);
+ /* single write -> no need to block vsync update */
+
+ /* start MIXER */
+ mxr_write_mask(mdev, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
+
+ spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+void mxr_reg_streamoff(struct mxr_device *mdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->reg_slock, flags);
+ /* single write -> no need to block vsync update */
+
+ /* stop MIXER */
+ mxr_write_mask(mdev, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
+
+ spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+int mxr_reg_wait4vsync(struct mxr_device *mdev)
+{
+ int ret;
+
+ clear_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
+ /* TODO: consider adding interruptible */
+ ret = wait_event_timeout(mdev->event_queue,
+ test_bit(MXR_EVENT_VSYNC, &mdev->event_flags),
+ msecs_to_jiffies(1000));
+ if (ret > 0)
+ return 0;
+ if (ret < 0)
+ return ret;
+ mxr_warn(mdev, "no vsync detected - timeout\n");
+ return -ETIME;
+}
+
+void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ u32 val = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->reg_slock, flags);
+ mxr_vsync_set_update(mdev, MXR_DISABLE);
+
+ /* choosing between interlace and progressive mode */
+ if (fmt->field == V4L2_FIELD_INTERLACED)
+ val |= MXR_CFG_SCAN_INTERLACE;
+ else
+ val |= MXR_CFG_SCAN_PROGRASSIVE;
+
+ /* choosing between porper HD and SD mode */
+ if (fmt->height == 480)
+ val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
+ else if (fmt->height == 576)
+ val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
+ else if (fmt->height == 720)
+ val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
+ else if (fmt->height == 1080)
+ val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
+ else
+ WARN(1, "unrecognized mbus height %u!\n", fmt->height);
+
+ mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_SCAN_MASK);
+
+ val = (fmt->field == V4L2_FIELD_INTERLACED) ? ~0 : 0;
+ vp_write_mask(mdev, VP_MODE, val,
+ VP_MODE_LINE_SKIP | VP_MODE_FIELD_ID_AUTO_TOGGLING);
+
+ mxr_vsync_set_update(mdev, MXR_ENABLE);
+ spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+void mxr_reg_graph_layer_stream(struct mxr_device *mdev, int idx, int en)
+{
+ /* no extra actions need to be done */
+}
+
+void mxr_reg_vp_layer_stream(struct mxr_device *mdev, int en)
+{
+ /* no extra actions need to be done */
+}
+
+static const u8 filter_y_horiz_tap8[] = {
+ 0, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 0, 0, 0,
+ 0, 2, 4, 5, 6, 6, 6, 6,
+ 6, 5, 5, 4, 3, 2, 1, 1,
+ 0, -6, -12, -16, -18, -20, -21, -20,
+ -20, -18, -16, -13, -10, -8, -5, -2,
+ 127, 126, 125, 121, 114, 107, 99, 89,
+ 79, 68, 57, 46, 35, 25, 16, 8,
+};
+
+static const u8 filter_y_vert_tap4[] = {
+ 0, -3, -6, -8, -8, -8, -8, -7,
+ -6, -5, -4, -3, -2, -1, -1, 0,
+ 127, 126, 124, 118, 111, 102, 92, 81,
+ 70, 59, 48, 37, 27, 19, 11, 5,
+ 0, 5, 11, 19, 27, 37, 48, 59,
+ 70, 81, 92, 102, 111, 118, 124, 126,
+ 0, 0, -1, -1, -2, -3, -4, -5,
+ -6, -7, -8, -8, -8, -8, -6, -3,
+};
+
+static const u8 filter_cr_horiz_tap4[] = {
+ 0, -3, -6, -8, -8, -8, -8, -7,
+ -6, -5, -4, -3, -2, -1, -1, 0,
+ 127, 126, 124, 118, 111, 102, 92, 81,
+ 70, 59, 48, 37, 27, 19, 11, 5,
+};
+
+static inline void mxr_reg_vp_filter_set(struct mxr_device *mdev,
+ int reg_id, const u8 *data, unsigned int size)
+{
+ /* assure 4-byte align */
+ BUG_ON(size & 3);
+ for (; size; size -= 4, reg_id += 4, data += 4) {
+ u32 val = (data[0] << 24) | (data[1] << 16) |
+ (data[2] << 8) | data[3];
+ vp_write(mdev, reg_id, val);
+ }
+}
+
+static void mxr_reg_vp_default_filter(struct mxr_device *mdev)
+{
+ mxr_reg_vp_filter_set(mdev, VP_POLY8_Y0_LL,
+ filter_y_horiz_tap8, sizeof filter_y_horiz_tap8);
+ mxr_reg_vp_filter_set(mdev, VP_POLY4_Y0_LL,
+ filter_y_vert_tap4, sizeof filter_y_vert_tap4);
+ mxr_reg_vp_filter_set(mdev, VP_POLY4_C0_LL,
+ filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4);
+}
+
+static void mxr_reg_mxr_dump(struct mxr_device *mdev)
+{
+#define DUMPREG(reg_id) \
+do { \
+ mxr_dbg(mdev, #reg_id " = %08x\n", \
+ (u32)readl(mdev->res.mxr_regs + reg_id)); \
+} while (0)
+
+ DUMPREG(MXR_STATUS);
+ DUMPREG(MXR_CFG);
+ DUMPREG(MXR_INT_EN);
+ DUMPREG(MXR_INT_STATUS);
+
+ DUMPREG(MXR_LAYER_CFG);
+ DUMPREG(MXR_VIDEO_CFG);
+
+ DUMPREG(MXR_GRAPHIC0_CFG);
+ DUMPREG(MXR_GRAPHIC0_BASE);
+ DUMPREG(MXR_GRAPHIC0_SPAN);
+ DUMPREG(MXR_GRAPHIC0_WH);
+ DUMPREG(MXR_GRAPHIC0_SXY);
+ DUMPREG(MXR_GRAPHIC0_DXY);
+
+ DUMPREG(MXR_GRAPHIC1_CFG);
+ DUMPREG(MXR_GRAPHIC1_BASE);
+ DUMPREG(MXR_GRAPHIC1_SPAN);
+ DUMPREG(MXR_GRAPHIC1_WH);
+ DUMPREG(MXR_GRAPHIC1_SXY);
+ DUMPREG(MXR_GRAPHIC1_DXY);
+#undef DUMPREG
+}
+
+static void mxr_reg_vp_dump(struct mxr_device *mdev)
+{
+#define DUMPREG(reg_id) \
+do { \
+ mxr_dbg(mdev, #reg_id " = %08x\n", \
+ (u32) readl(mdev->res.vp_regs + reg_id)); \
+} while (0)
+
+
+ DUMPREG(VP_ENABLE);
+ DUMPREG(VP_SRESET);
+ DUMPREG(VP_SHADOW_UPDATE);
+ DUMPREG(VP_FIELD_ID);
+ DUMPREG(VP_MODE);
+ DUMPREG(VP_IMG_SIZE_Y);
+ DUMPREG(VP_IMG_SIZE_C);
+ DUMPREG(VP_PER_RATE_CTRL);
+ DUMPREG(VP_TOP_Y_PTR);
+ DUMPREG(VP_BOT_Y_PTR);
+ DUMPREG(VP_TOP_C_PTR);
+ DUMPREG(VP_BOT_C_PTR);
+ DUMPREG(VP_ENDIAN_MODE);
+ DUMPREG(VP_SRC_H_POSITION);
+ DUMPREG(VP_SRC_V_POSITION);
+ DUMPREG(VP_SRC_WIDTH);
+ DUMPREG(VP_SRC_HEIGHT);
+ DUMPREG(VP_DST_H_POSITION);
+ DUMPREG(VP_DST_V_POSITION);
+ DUMPREG(VP_DST_WIDTH);
+ DUMPREG(VP_DST_HEIGHT);
+ DUMPREG(VP_H_RATIO);
+ DUMPREG(VP_V_RATIO);
+
+#undef DUMPREG
+}
+
+void mxr_reg_dump(struct mxr_device *mdev)
+{
+ mxr_reg_mxr_dump(mdev);
+ mxr_reg_vp_dump(mdev);
+}
+
diff --git a/drivers/media/video/s5p-tv/mixer_video.c b/drivers/media/video/s5p-tv/mixer_video.c
new file mode 100644
index 0000000..43ac22f
--- /dev/null
+++ b/drivers/media/video/s5p-tv/mixer_video.c
@@ -0,0 +1,1006 @@
+/*
+ * Samsung TV Mixer driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#include "mixer.h"
+
+#include <media/v4l2-ioctl.h>
+#include <linux/videodev2.h>
+#include <linux/mm.h>
+#include <linux/version.h>
+#include <linux/timer.h>
+#include <media/videobuf2-dma-contig.h>
+
+static int find_reg_callback(struct device *dev, void *p)
+{
+ struct v4l2_subdev **sd = p;
+
+ *sd = dev_get_drvdata(dev);
+ /* non-zero value stops iteration */
+ return 1;
+}
+
+static struct v4l2_subdev *find_and_register_subdev(
+ struct mxr_device *mdev, char *module_name)
+{
+ struct device_driver *drv;
+ struct v4l2_subdev *sd = NULL;
+ int ret;
+
+ /* TODO: add waiting until probe is finished */
+ drv = driver_find(module_name, &platform_bus_type);
+ if (!drv) {
+ mxr_warn(mdev, "module %s is missing\n", module_name);
+ return NULL;
+ }
+ /* driver refcnt is increased, it is safe to iterate over devices */
+ ret = driver_for_each_device(drv, NULL, &sd, find_reg_callback);
+ /* ret == 0 means that find_reg_callback was never executed */
+ if (sd == NULL) {
+ mxr_warn(mdev, "module %s provides no subdev!\n", module_name);
+ goto done;
+ }
+ /* v4l2_device_register_subdev detects if sd is NULL */
+ ret = v4l2_device_register_subdev(&mdev->v4l2_dev, sd);
+ if (ret) {
+ mxr_warn(mdev, "failed to register subdev %s\n", sd->name);
+ sd = NULL;
+ }
+
+done:
+ put_driver(drv);
+ return sd;
+}
+
+int __devinit mxr_acquire_video(struct mxr_device *mdev,
+ struct mxr_output_conf *output_conf, int output_count)
+{
+ struct device *dev = mdev->dev;
+ struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
+ int i;
+ int ret = 0;
+ struct v4l2_subdev *sd;
+
+ strlcpy(v4l2_dev->name, dev_name(mdev->dev), sizeof(v4l2_dev->name));
+ /* prepare context for V4L2 device */
+ ret = v4l2_device_register(dev, v4l2_dev);
+ if (ret) {
+ mxr_err(mdev, "could not register v4l2 device.\n");
+ goto fail;
+ }
+
+ mdev->alloc_ctx = vb2_dma_contig_init_ctx(mdev->dev);
+ if (IS_ERR_OR_NULL(mdev->alloc_ctx)) {
+ mxr_err(mdev, "could not acquire vb2 allocator\n");
+ goto fail_v4l2_dev;
+ }
+
+ /* registering outputs */
+ mdev->output_cnt = 0;
+ for (i = 0; i < output_count; ++i) {
+ struct mxr_output_conf *conf = &output_conf[i];
+ struct mxr_output *out;
+
+ sd = find_and_register_subdev(mdev, conf->module_name);
+ /* trying to register next output */
+ if (sd == NULL)
+ continue;
+ out = kzalloc(sizeof *out, GFP_KERNEL);
+ if (out == NULL) {
+ mxr_err(mdev, "no memory for '%s'\n",
+ conf->output_name);
+ ret = -ENOMEM;
+ /* registered subdevs are removed in fail_v4l2_dev */
+ goto fail_output;
+ }
+ strlcpy(out->name, conf->output_name, sizeof(out->name));
+ out->sd = sd;
+ out->cookie = conf->cookie;
+ mdev->output[mdev->output_cnt++] = out;
+ mxr_info(mdev, "added output '%s' from module '%s'\n",
+ conf->output_name, conf->module_name);
+ /* checking if maximal number of outputs is reached */
+ if (mdev->output_cnt >= MXR_MAX_OUTPUTS)
+ break;
+ }
+
+ if (mdev->output_cnt == 0) {
+ mxr_err(mdev, "failed to register any output\n");
+ ret = -ENODEV;
+ /* skipping fail_output because there is nothing to free */
+ goto fail_vb2_allocator;
+ }
+
+ return 0;
+
+fail_output:
+ /* kfree is NULL-safe */
+ for (i = 0; i < mdev->output_cnt; ++i)
+ kfree(mdev->output[i]);
+ memset(mdev->output, 0, sizeof mdev->output);
+
+fail_vb2_allocator:
+ /* freeing allocator context */
+ vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
+
+fail_v4l2_dev:
+ /* NOTE: automatically unregister all subdevs */
+ v4l2_device_unregister(v4l2_dev);
+
+fail:
+ return ret;
+}
+
+void __devexit mxr_release_video(struct mxr_device *mdev)
+{
+ int i;
+
+ /* kfree is NULL-safe */
+ for (i = 0; i < mdev->output_cnt; ++i)
+ kfree(mdev->output[i]);
+
+ vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
+ v4l2_device_unregister(&mdev->v4l2_dev);
+}
+
+static int mxr_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+
+ strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof cap->driver);
+ strlcpy(cap->card, layer->vfd.name, sizeof cap->card);
+ sprintf(cap->bus_info, "%d", layer->idx);
+ cap->version = KERNEL_VERSION(0, 1, 0);
+ cap->capabilities = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+
+ return 0;
+}
+
+/* Geometry handling */
+static void mxr_layer_geo_fix(struct mxr_layer *layer)
+{
+ struct mxr_device *mdev = layer->mdev;
+ struct v4l2_mbus_framefmt mbus_fmt;
+
+ /* TODO: add some dirty flag to avoid unnecessary adjustments */
+ mxr_get_mbus_fmt(mdev, &mbus_fmt);
+ layer->geo.dst.full_width = mbus_fmt.width;
+ layer->geo.dst.full_height = mbus_fmt.height;
+ layer->geo.dst.field = mbus_fmt.field;
+ layer->ops.fix_geometry(layer);
+}
+
+static void mxr_layer_default_geo(struct mxr_layer *layer)
+{
+ struct mxr_device *mdev = layer->mdev;
+ struct v4l2_mbus_framefmt mbus_fmt;
+
+ memset(&layer->geo, 0, sizeof layer->geo);
+
+ mxr_get_mbus_fmt(mdev, &mbus_fmt);
+
+ layer->geo.dst.full_width = mbus_fmt.width;
+ layer->geo.dst.full_height = mbus_fmt.height;
+ layer->geo.dst.width = layer->geo.dst.full_width;
+ layer->geo.dst.height = layer->geo.dst.full_height;
+ layer->geo.dst.field = mbus_fmt.field;
+
+ layer->geo.src.full_width = mbus_fmt.width;
+ layer->geo.src.full_height = mbus_fmt.height;
+ layer->geo.src.width = layer->geo.src.full_width;
+ layer->geo.src.height = layer->geo.src.full_height;
+
+ layer->ops.fix_geometry(layer);
+}
+
+static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
+{
+ mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
+ geo->src.full_width, geo->src.full_height);
+ mxr_dbg(mdev, "src.size = (%u, %u)\n",
+ geo->src.width, geo->src.height);
+ mxr_dbg(mdev, "src.offset = (%u, %u)\n",
+ geo->src.x_offset, geo->src.y_offset);
+ mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
+ geo->dst.full_width, geo->dst.full_height);
+ mxr_dbg(mdev, "dst.size = (%u, %u)\n",
+ geo->dst.width, geo->dst.height);
+ mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
+ geo->dst.x_offset, geo->dst.y_offset);
+ mxr_dbg(mdev, "ratio = (%u, %u)\n",
+ geo->x_ratio, geo->y_ratio);
+}
+
+
+static const struct mxr_format *find_format_by_fourcc(
+ struct mxr_layer *layer, unsigned long fourcc);
+static const struct mxr_format *find_format_by_index(
+ struct mxr_layer *layer, unsigned long index);
+
+static int mxr_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_device *mdev = layer->mdev;
+ const struct mxr_format *fmt;
+
+ mxr_dbg(mdev, "%s\n", __func__);
+ fmt = find_format_by_index(layer, f->index);
+ if (fmt == NULL)
+ return -EINVAL;
+
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int mxr_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ const struct mxr_format *fmt;
+ struct v4l2_pix_format_mplane *pix;
+ struct mxr_device *mdev = layer->mdev;
+ struct mxr_geometry *geo = &layer->geo;
+
+ mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
+
+ pix = &f->fmt.pix_mp;
+ fmt = find_format_by_fourcc(layer, pix->pixelformat);
+ if (fmt == NULL) {
+ mxr_warn(mdev, "not recognized fourcc: %08x\n",
+ pix->pixelformat);
+ return -EINVAL;
+ }
+ layer->fmt = fmt;
+ geo->src.full_width = pix->width;
+ geo->src.width = pix->width;
+ geo->src.full_height = pix->height;
+ geo->src.height = pix->height;
+ /* assure consistency of geometry */
+ mxr_layer_geo_fix(layer);
+ mxr_dbg(mdev, "width=%u height=%u span=%u\n",
+ geo->src.width, geo->src.height, geo->src.full_width);
+
+ return 0;
+}
+
+static unsigned int divup(unsigned int divident, unsigned int divisor)
+{
+ return (divident + divisor - 1) / divisor;
+}
+
+unsigned long mxr_get_plane_size(const struct mxr_block *blk,
+ unsigned int width, unsigned int height)
+{
+ unsigned int bl_width = divup(width, blk->width);
+ unsigned int bl_height = divup(height, blk->height);
+
+ return bl_width * bl_height * blk->size;
+}
+
+static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
+ const struct mxr_format *fmt, u32 width, u32 height)
+{
+ int i;
+
+ memset(planes, 0, sizeof(*planes) * fmt->num_subframes);
+ for (i = 0; i < fmt->num_planes; ++i) {
+ struct v4l2_plane_pix_format *plane = planes
+ + fmt->plane2subframe[i];
+ const struct mxr_block *blk = &fmt->plane[i];
+ u32 bl_width = divup(width, blk->width);
+ u32 bl_height = divup(height, blk->height);
+ u32 sizeimage = bl_width * bl_height * blk->size;
+ u16 bytesperline = bl_width * blk->size / blk->height;
+
+ plane->sizeimage += sizeimage;
+ plane->bytesperline = max(plane->bytesperline, bytesperline);
+ }
+}
+
+static int mxr_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+
+ pix->width = layer->geo.src.full_width;
+ pix->height = layer->geo.src.full_height;
+ pix->field = V4L2_FIELD_NONE;
+ pix->pixelformat = layer->fmt->fourcc;
+ pix->colorspace = layer->fmt->colorspace;
+ mxr_mplane_fill(pix->plane_fmt, layer->fmt, pix->width, pix->height);
+
+ return 0;
+}
+
+static inline struct mxr_crop *choose_crop_by_type(struct mxr_geometry *geo,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return &geo->dst;
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ return &geo->src;
+ default:
+ return NULL;
+ }
+}
+
+static int mxr_g_crop(struct file *file, void *fh, struct v4l2_crop *a)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_crop *crop;
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ crop = choose_crop_by_type(&layer->geo, a->type);
+ if (crop == NULL)
+ return -EINVAL;
+ mxr_layer_geo_fix(layer);
+ a->c.left = crop->x_offset;
+ a->c.top = crop->y_offset;
+ a->c.width = crop->width;
+ a->c.height = crop->height;
+ return 0;
+}
+
+static int mxr_s_crop(struct file *file, void *fh, struct v4l2_crop *a)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_crop *crop;
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ crop = choose_crop_by_type(&layer->geo, a->type);
+ if (crop == NULL)
+ return -EINVAL;
+ crop->x_offset = a->c.left;
+ crop->y_offset = a->c.top;
+ crop->width = a->c.width;
+ crop->height = a->c.height;
+ mxr_layer_geo_fix(layer);
+ return 0;
+}
+
+static int mxr_cropcap(struct file *file, void *fh, struct v4l2_cropcap *a)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_crop *crop;
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ crop = choose_crop_by_type(&layer->geo, a->type);
+ if (crop == NULL)
+ return -EINVAL;
+ mxr_layer_geo_fix(layer);
+ a->bounds.left = 0;
+ a->bounds.top = 0;
+ a->bounds.width = crop->full_width;
+ a->bounds.top = crop->full_height;
+ a->defrect = a->bounds;
+ /* setting pixel aspect to 1/1 */
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+ return 0;
+}
+
+static int mxr_enum_dv_presets(struct file *file, void *fh,
+ struct v4l2_dv_enum_preset *preset)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_device *mdev = layer->mdev;
+ int ret;
+
+ /* lock protects from changing sd_out */
+ mutex_lock(&mdev->mutex);
+ ret = v4l2_subdev_call(to_outsd(mdev), video, enum_dv_presets, preset);
+ mutex_unlock(&mdev->mutex);
+
+ return ret ? -EINVAL : 0;
+}
+
+static int mxr_s_dv_preset(struct file *file, void *fh,
+ struct v4l2_dv_preset *preset)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_device *mdev = layer->mdev;
+ int ret;
+
+ /* lock protects from changing sd_out */
+ mutex_lock(&mdev->mutex);
+
+ /* preset change cannot be done while there is an entity
+ * dependant on output configuration
+ */
+ if (mdev->n_output > 0) {
+ mutex_unlock(&mdev->mutex);
+ return -EBUSY;
+ }
+
+ ret = v4l2_subdev_call(to_outsd(mdev), video, s_dv_preset, preset);
+
+ mutex_unlock(&mdev->mutex);
+
+ /* any failure should return EINVAL according to V4L2 doc */
+ return ret ? -EINVAL : 0;
+}
+
+static int mxr_g_dv_preset(struct file *file, void *fh,
+ struct v4l2_dv_preset *preset)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_device *mdev = layer->mdev;
+ int ret;
+
+ /* lock protects from changing sd_out */
+ mutex_lock(&mdev->mutex);
+ ret = v4l2_subdev_call(to_outsd(mdev), video, g_dv_preset, preset);
+ mutex_unlock(&mdev->mutex);
+
+ return ret ? -EINVAL : 0;
+}
+
+static int mxr_s_std(struct file *file, void *fh, v4l2_std_id *norm)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_device *mdev = layer->mdev;
+ int ret;
+
+ /* lock protects from changing sd_out */
+ mutex_lock(&mdev->mutex);
+
+ /* standard change cannot be done while there is an entity
+ * dependant on output configuration
+ */
+ if (mdev->n_output > 0) {
+ mutex_unlock(&mdev->mutex);
+ return -EBUSY;
+ }
+
+ ret = v4l2_subdev_call(to_outsd(mdev), video, s_std_output, *norm);
+
+ mutex_unlock(&mdev->mutex);
+
+ return ret ? -EINVAL : 0;
+}
+
+static int mxr_g_std(struct file *file, void *fh, v4l2_std_id *norm)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_device *mdev = layer->mdev;
+ int ret;
+
+ /* lock protects from changing sd_out */
+ mutex_lock(&mdev->mutex);
+ ret = v4l2_subdev_call(to_outsd(mdev), video, g_std_output, norm);
+ mutex_unlock(&mdev->mutex);
+
+ return ret ? -EINVAL : 0;
+}
+
+static int mxr_enum_output(struct file *file, void *fh, struct v4l2_output *a)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_device *mdev = layer->mdev;
+ struct mxr_output *out;
+ struct v4l2_subdev *sd;
+
+ if (a->index >= mdev->output_cnt)
+ return -EINVAL;
+ out = mdev->output[a->index];
+ BUG_ON(out == NULL);
+ sd = out->sd;
+ strlcpy(a->name, out->name, sizeof(a->name));
+
+ /* try to obtain supported tv norms */
+ v4l2_subdev_call(sd, video, g_tvnorms_output, &a->std);
+ a->capabilities = 0;
+ if (sd->ops->video && sd->ops->video->s_dv_preset)
+ a->capabilities |= V4L2_OUT_CAP_PRESETS;
+ if (sd->ops->video && sd->ops->video->s_std_output)
+ a->capabilities |= V4L2_OUT_CAP_STD;
+ a->type = V4L2_OUTPUT_TYPE_ANALOG;
+
+ return 0;
+}
+
+static int mxr_s_output(struct file *file, void *fh, unsigned int i)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_device *mdev = layer->mdev;
+ int ret = 0;
+
+ if (i >= mdev->output_cnt || mdev->output[i] == NULL)
+ return -EINVAL;
+
+ mutex_lock(&mdev->mutex);
+ if (mdev->n_output > 0) {
+ ret = -EBUSY;
+ goto done;
+ }
+ mdev->current_output = i;
+ vfd->tvnorms = 0;
+ v4l2_subdev_call(to_outsd(mdev), video, g_tvnorms_output,
+ &vfd->tvnorms);
+ mxr_dbg(mdev, "tvnorms = %08llx\n", vfd->tvnorms);
+
+done:
+ mutex_unlock(&mdev->mutex);
+ return ret;
+}
+
+static int mxr_g_output(struct file *file, void *fh, unsigned int *p)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_device *mdev = layer->mdev;
+
+ mutex_lock(&mdev->mutex);
+ *p = mdev->current_output;
+ mutex_unlock(&mdev->mutex);
+
+ return 0;
+}
+
+static int mxr_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ return vb2_reqbufs(&layer->vb_queue, p);
+}
+
+static int mxr_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ return vb2_querybuf(&layer->vb_queue, p);
+}
+
+static int mxr_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d(%d)\n", __func__, __LINE__, p->index);
+ return vb2_qbuf(&layer->vb_queue, p);
+}
+
+static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
+}
+
+static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ return vb2_streamon(&layer->vb_queue, i);
+}
+
+static int mxr_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ return vb2_streamoff(&layer->vb_queue, i);
+}
+
+static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
+ .vidioc_querycap = mxr_querycap,
+ /* format handling */
+ .vidioc_enum_fmt_vid_out = mxr_enum_fmt,
+ .vidioc_s_fmt_vid_out_mplane = mxr_s_fmt,
+ .vidioc_g_fmt_vid_out_mplane = mxr_g_fmt,
+ /* buffer control */
+ .vidioc_reqbufs = mxr_reqbufs,
+ .vidioc_querybuf = mxr_querybuf,
+ .vidioc_qbuf = mxr_qbuf,
+ .vidioc_dqbuf = mxr_dqbuf,
+ /* Streaming control */
+ .vidioc_streamon = mxr_streamon,
+ .vidioc_streamoff = mxr_streamoff,
+ /* Preset functions */
+ .vidioc_enum_dv_presets = mxr_enum_dv_presets,
+ .vidioc_s_dv_preset = mxr_s_dv_preset,
+ .vidioc_g_dv_preset = mxr_g_dv_preset,
+ /* analog TV standard functions */
+ .vidioc_s_std = mxr_s_std,
+ .vidioc_g_std = mxr_g_std,
+ /* Output handling */
+ .vidioc_enum_output = mxr_enum_output,
+ .vidioc_s_output = mxr_s_output,
+ .vidioc_g_output = mxr_g_output,
+ /* Crop ioctls */
+ .vidioc_g_crop = mxr_g_crop,
+ .vidioc_s_crop = mxr_s_crop,
+ .vidioc_cropcap = mxr_cropcap,
+};
+
+static int mxr_video_open(struct file *file)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_device *mdev = layer->mdev;
+ int ret = 0;
+
+ mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
+ /* assure device probe is finished */
+ wait_for_device_probe();
+ /* creating context for file descriptor */
+ ret = v4l2_fh_open(file);
+ if (ret) {
+ mxr_err(mdev, "v4l2_fh_open failed\n");
+ return ret;
+ }
+
+ /* leaving if layer is already initialized */
+ if (!v4l2_fh_is_singular_file(file))
+ return 0;
+
+ /* FIXME: should power be enabled on open? */
+ ret = mxr_power_get(mdev);
+ if (ret) {
+ mxr_err(mdev, "power on failed\n");
+ goto fail_fh_open;
+ }
+
+ ret = vb2_queue_init(&layer->vb_queue);
+ if (ret != 0) {
+ mxr_err(mdev, "failed to initialize vb2 queue\n");
+ goto fail_power;
+ }
+ /* set default format, first on the list */
+ layer->fmt = layer->fmt_array[0];
+ /* setup default geometry */
+ mxr_layer_default_geo(layer);
+
+ return 0;
+
+fail_power:
+ mxr_power_put(mdev);
+
+fail_fh_open:
+ v4l2_fh_release(file);
+
+ return ret;
+}
+
+static unsigned int
+mxr_video_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+
+ return vb2_poll(&layer->vb_queue, file, wait);
+}
+
+static int mxr_video_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+
+ return vb2_mmap(&layer->vb_queue, vma);
+}
+
+static int mxr_video_release(struct file *file)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ if (v4l2_fh_is_singular_file(file)) {
+ vb2_queue_release(&layer->vb_queue);
+ mxr_power_put(layer->mdev);
+ }
+ v4l2_fh_release(file);
+ return 0;
+}
+
+static const struct v4l2_file_operations mxr_fops = {
+ .owner = THIS_MODULE,
+ .open = mxr_video_open,
+ .poll = mxr_video_poll,
+ .mmap = mxr_video_mmap,
+ .release = mxr_video_release,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned long sizes[],
+ void *alloc_ctxs[])
+{
+ struct mxr_layer *layer = vb2_get_drv_priv(vq);
+ const struct mxr_format *fmt = layer->fmt;
+ int i;
+ struct mxr_device *mdev = layer->mdev;
+ struct v4l2_plane_pix_format planes[3];
+
+ mxr_dbg(mdev, "%s\n", __func__);
+ /* checking if format was configured */
+ if (fmt == NULL)
+ return -EINVAL;
+ mxr_dbg(mdev, "fmt = %s\n", fmt->name);
+ mxr_mplane_fill(planes, fmt, layer->geo.src.full_width,
+ layer->geo.src.full_height);
+
+ *nplanes = fmt->num_subframes;
+ for (i = 0; i < fmt->num_subframes; ++i) {
+ alloc_ctxs[i] = layer->mdev->alloc_ctx;
+ sizes[i] = PAGE_ALIGN(planes[i].sizeimage);
+ mxr_dbg(mdev, "size[%d] = %08lx\n", i, sizes[i]);
+ }
+
+ if (*nbuffers == 0)
+ *nbuffers = 1;
+
+ return 0;
+}
+
+static void buf_queue(struct vb2_buffer *vb)
+{
+ struct mxr_buffer *buffer = container_of(vb, struct mxr_buffer, vb);
+ struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
+ struct mxr_device *mdev = layer->mdev;
+ unsigned long flags;
+ int must_start = 0;
+
+ spin_lock_irqsave(&layer->enq_slock, flags);
+ if (layer->state == MXR_LAYER_STREAMING_START) {
+ layer->state = MXR_LAYER_STREAMING;
+ must_start = 1;
+ }
+ list_add_tail(&buffer->list, &layer->enq_list);
+ spin_unlock_irqrestore(&layer->enq_slock, flags);
+ if (must_start) {
+ layer->ops.stream_set(layer, MXR_ENABLE);
+ mxr_streamer_get(mdev);
+ }
+
+ mxr_dbg(mdev, "queuing buffer\n");
+}
+
+static void wait_lock(struct vb2_queue *vq)
+{
+ struct mxr_layer *layer = vb2_get_drv_priv(vq);
+
+ mxr_dbg(layer->mdev, "%s\n", __func__);
+ mutex_lock(&layer->mutex);
+}
+
+static void wait_unlock(struct vb2_queue *vq)
+{
+ struct mxr_layer *layer = vb2_get_drv_priv(vq);
+
+ mxr_dbg(layer->mdev, "%s\n", __func__);
+ mutex_unlock(&layer->mutex);
+}
+
+static int start_streaming(struct vb2_queue *vq)
+{
+ struct mxr_layer *layer = vb2_get_drv_priv(vq);
+ struct mxr_device *mdev = layer->mdev;
+ unsigned long flags;
+
+ mxr_dbg(mdev, "%s\n", __func__);
+ /* block any changes in output configuration */
+ mxr_output_get(mdev);
+
+ /* update layers geometry */
+ mxr_layer_geo_fix(layer);
+ mxr_geometry_dump(mdev, &layer->geo);
+
+ layer->ops.format_set(layer);
+ /* enabling layer in hardware */
+ spin_lock_irqsave(&layer->enq_slock, flags);
+ layer->state = MXR_LAYER_STREAMING_START;
+ spin_unlock_irqrestore(&layer->enq_slock, flags);
+
+ return 0;
+}
+
+static void mxr_watchdog(unsigned long arg)
+{
+ struct mxr_layer *layer = (struct mxr_layer *) arg;
+ struct mxr_device *mdev = layer->mdev;
+ unsigned long flags;
+
+ mxr_err(mdev, "watchdog fired for layer %s\n", layer->vfd.name);
+
+ spin_lock_irqsave(&layer->enq_slock, flags);
+
+ if (layer->update_buf == layer->shadow_buf)
+ layer->update_buf = NULL;
+ if (layer->update_buf) {
+ vb2_buffer_done(&layer->update_buf->vb, VB2_BUF_STATE_ERROR);
+ layer->update_buf = NULL;
+ }
+ if (layer->shadow_buf) {
+ vb2_buffer_done(&layer->shadow_buf->vb, VB2_BUF_STATE_ERROR);
+ layer->shadow_buf = NULL;
+ }
+ spin_unlock_irqrestore(&layer->enq_slock, flags);
+}
+
+static int stop_streaming(struct vb2_queue *vq)
+{
+ struct mxr_layer *layer = vb2_get_drv_priv(vq);
+ struct mxr_device *mdev = layer->mdev;
+ unsigned long flags;
+ struct timer_list watchdog;
+ struct mxr_buffer *buf, *buf_tmp;
+
+ mxr_dbg(mdev, "%s\n", __func__);
+
+ spin_lock_irqsave(&layer->enq_slock, flags);
+
+ /* reset list */
+ layer->state = MXR_LAYER_STREAMING_FINISH;
+
+ /* set all buffer to be done */
+ list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ spin_unlock_irqrestore(&layer->enq_slock, flags);
+
+ /* give 1 seconds to complete to complete last buffers */
+ setup_timer_on_stack(&watchdog, mxr_watchdog,
+ (unsigned long)layer);
+ mod_timer(&watchdog, jiffies + msecs_to_jiffies(1000));
+
+ /* wait until all buffers are goes to done state */
+ vb2_wait_for_all_buffers(vq);
+
+ /* stop timer if all synchronization is done */
+ del_timer_sync(&watchdog);
+ destroy_timer_on_stack(&watchdog);
+
+ /* stopping hardware */
+ spin_lock_irqsave(&layer->enq_slock, flags);
+ layer->state = MXR_LAYER_IDLE;
+ spin_unlock_irqrestore(&layer->enq_slock, flags);
+
+ /* disabling layer in hardware */
+ layer->ops.stream_set(layer, MXR_DISABLE);
+ /* remove one streamer */
+ mxr_streamer_put(mdev);
+ /* allow changes in output configuration */
+ mxr_output_put(mdev);
+ return 0;
+}
+
+static struct vb2_ops mxr_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_queue = buf_queue,
+ .wait_prepare = wait_unlock,
+ .wait_finish = wait_lock,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+};
+
+/* FIXME: try to put this functions to mxr_base_layer_create */
+int mxr_base_layer_register(struct mxr_layer *layer)
+{
+ struct mxr_device *mdev = layer->mdev;
+ int ret;
+
+ ret = video_register_device(&layer->vfd, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ mxr_err(mdev, "failed to register video device\n");
+ else
+ mxr_info(mdev, "registered layer %s as /dev/video%d\n",
+ layer->vfd.name, layer->vfd.num);
+ return ret;
+}
+
+void mxr_base_layer_unregister(struct mxr_layer *layer)
+{
+ video_unregister_device(&layer->vfd);
+}
+
+void mxr_layer_release(struct mxr_layer *layer)
+{
+ if (layer->ops.release)
+ layer->ops.release(layer);
+}
+
+void mxr_base_layer_release(struct mxr_layer *layer)
+{
+ kfree(layer);
+}
+
+static void mxr_vfd_release(struct video_device *vdev)
+{
+ printk(KERN_INFO "video device release\n");
+}
+
+struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
+ int idx, char *name, struct mxr_layer_ops *ops)
+{
+ struct mxr_layer *layer;
+
+ layer = kzalloc(sizeof *layer, GFP_KERNEL);
+ if (layer == NULL) {
+ mxr_err(mdev, "not enough memory for layer.\n");
+ goto fail;
+ }
+
+ layer->mdev = mdev;
+ layer->idx = idx;
+ layer->ops = *ops;
+
+ spin_lock_init(&layer->enq_slock);
+ INIT_LIST_HEAD(&layer->enq_list);
+ mutex_init(&layer->mutex);
+
+ layer->vfd = (struct video_device) {
+ .minor = -1,
+ .release = mxr_vfd_release,
+ .fops = &mxr_fops,
+ .ioctl_ops = &mxr_ioctl_ops,
+ };
+ strlcpy(layer->vfd.name, name, sizeof(layer->vfd.name));
+ /* let framework control PRIORITY */
+ set_bit(V4L2_FL_USE_FH_PRIO, &layer->vfd.flags);
+
+ video_set_drvdata(&layer->vfd, layer);
+ layer->vfd.lock = &layer->mutex;
+ layer->vfd.v4l2_dev = &mdev->v4l2_dev;
+
+ layer->vb_queue = (struct vb2_queue) {
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .io_modes = VB2_MMAP | VB2_USERPTR,
+ .drv_priv = layer,
+ .buf_struct_size = sizeof(struct mxr_buffer),
+ .ops = &mxr_video_qops,
+ .mem_ops = &vb2_dma_contig_memops,
+ };
+
+ return layer;
+
+fail:
+ return NULL;
+}
+
+static const struct mxr_format *find_format_by_fourcc(
+ struct mxr_layer *layer, unsigned long fourcc)
+{
+ int i;
+
+ for (i = 0; i < layer->fmt_array_size; ++i)
+ if (layer->fmt_array[i]->fourcc == fourcc)
+ return layer->fmt_array[i];
+ return NULL;
+}
+
+static const struct mxr_format *find_format_by_index(
+ struct mxr_layer *layer, unsigned long index)
+{
+ if (index >= layer->fmt_array_size)
+ return NULL;
+ return layer->fmt_array[index];
+}
+
diff --git a/drivers/media/video/s5p-tv/mixer_vp_layer.c b/drivers/media/video/s5p-tv/mixer_vp_layer.c
new file mode 100644
index 0000000..6950ed8
--- /dev/null
+++ b/drivers/media/video/s5p-tv/mixer_vp_layer.c
@@ -0,0 +1,211 @@
+/*
+ * Samsung TV Mixer driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#include "mixer.h"
+
+#include "regs-vp.h"
+
+#include <media/videobuf2-dma-contig.h>
+
+/* FORMAT DEFINITIONS */
+static const struct mxr_format mxr_fmt_nv12 = {
+ .name = "NV12",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .num_planes = 2,
+ .plane = {
+ { .width = 1, .height = 1, .size = 1 },
+ { .width = 2, .height = 2, .size = 2 },
+ },
+ .num_subframes = 1,
+ .cookie = VP_MODE_NV12 | VP_MODE_MEM_LINEAR,
+};
+
+static const struct mxr_format mxr_fmt_nv21 = {
+ .name = "NV21",
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .num_planes = 2,
+ .plane = {
+ { .width = 1, .height = 1, .size = 1 },
+ { .width = 2, .height = 2, .size = 2 },
+ },
+ .num_subframes = 1,
+ .cookie = VP_MODE_NV21 | VP_MODE_MEM_LINEAR,
+};
+
+static const struct mxr_format mxr_fmt_nv12m = {
+ .name = "NV12 (mplane)",
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .num_planes = 2,
+ .plane = {
+ { .width = 1, .height = 1, .size = 1 },
+ { .width = 2, .height = 2, .size = 2 },
+ },
+ .num_subframes = 2,
+ .plane2subframe = {0, 1},
+ .cookie = VP_MODE_NV12 | VP_MODE_MEM_LINEAR,
+};
+
+static const struct mxr_format mxr_fmt_nv12mt = {
+ .name = "NV12 tiled (mplane)",
+ .fourcc = V4L2_PIX_FMT_NV12MT,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .num_planes = 2,
+ .plane = {
+ { .width = 128, .height = 32, .size = 4096 },
+ { .width = 128, .height = 32, .size = 2048 },
+ },
+ .num_subframes = 2,
+ .plane2subframe = {0, 1},
+ .cookie = VP_MODE_NV12 | VP_MODE_MEM_TILED,
+};
+
+static const struct mxr_format *mxr_video_format[] = {
+ &mxr_fmt_nv12,
+ &mxr_fmt_nv21,
+ &mxr_fmt_nv12m,
+ &mxr_fmt_nv12mt,
+};
+
+/* AUXILIARY CALLBACKS */
+
+static void mxr_vp_layer_release(struct mxr_layer *layer)
+{
+ mxr_base_layer_unregister(layer);
+ mxr_base_layer_release(layer);
+}
+
+static void mxr_vp_buffer_set(struct mxr_layer *layer,
+ struct mxr_buffer *buf)
+{
+ dma_addr_t luma_addr[2] = {0, 0};
+ dma_addr_t chroma_addr[2] = {0, 0};
+
+ if (buf == NULL) {
+ mxr_reg_vp_buffer(layer->mdev, luma_addr, chroma_addr);
+ return;
+ }
+ luma_addr[0] = vb2_dma_contig_plane_paddr(&buf->vb, 0);
+ if (layer->fmt->num_subframes == 2) {
+ chroma_addr[0] = vb2_dma_contig_plane_paddr(&buf->vb, 1);
+ } else {
+ /* FIXME: mxr_get_plane_size compute integer division,
+ * which is slow and should not be performed in interrupt */
+ chroma_addr[0] = luma_addr[0] + mxr_get_plane_size(
+ &layer->fmt->plane[0], layer->geo.src.full_width,
+ layer->geo.src.full_height);
+ }
+ if (layer->fmt->cookie & VP_MODE_MEM_TILED) {
+ luma_addr[1] = luma_addr[0] + 0x40;
+ chroma_addr[1] = chroma_addr[0] + 0x40;
+ } else {
+ luma_addr[1] = luma_addr[0] + layer->geo.src.full_width;
+ chroma_addr[1] = chroma_addr[0];
+ }
+ mxr_reg_vp_buffer(layer->mdev, luma_addr, chroma_addr);
+}
+
+static void mxr_vp_stream_set(struct mxr_layer *layer, int en)
+{
+ mxr_reg_vp_layer_stream(layer->mdev, en);
+}
+
+static void mxr_vp_format_set(struct mxr_layer *layer)
+{
+ mxr_reg_vp_format(layer->mdev, layer->fmt, &layer->geo);
+}
+
+static void mxr_vp_fix_geometry(struct mxr_layer *layer)
+{
+ struct mxr_geometry *geo = &layer->geo;
+
+ /* align horizontal size to 8 pixels */
+ geo->src.full_width = ALIGN(geo->src.full_width, 8);
+ /* limit to boundary size */
+ geo->src.full_width = clamp_val(geo->src.full_width, 8, 8192);
+ geo->src.full_height = clamp_val(geo->src.full_height, 1, 8192);
+ geo->src.width = clamp_val(geo->src.width, 32, geo->src.full_width);
+ geo->src.width = min(geo->src.width, 2047U);
+ geo->src.height = clamp_val(geo->src.height, 4, geo->src.full_height);
+ geo->src.height = min(geo->src.height, 2047U);
+
+ /* setting size of output window */
+ geo->dst.width = clamp_val(geo->dst.width, 8, geo->dst.full_width);
+ geo->dst.height = clamp_val(geo->dst.height, 1, geo->dst.full_height);
+
+ /* ensure that scaling is in range 1/4x to 16x */
+ if (geo->src.width >= 4 * geo->dst.width)
+ geo->src.width = 4 * geo->dst.width;
+ if (geo->dst.width >= 16 * geo->src.width)
+ geo->dst.width = 16 * geo->src.width;
+ if (geo->src.height >= 4 * geo->dst.height)
+ geo->src.height = 4 * geo->dst.height;
+ if (geo->dst.height >= 16 * geo->src.height)
+ geo->dst.height = 16 * geo->src.height;
+
+ /* setting scaling ratio */
+ geo->x_ratio = (geo->src.width << 16) / geo->dst.width;
+ geo->y_ratio = (geo->src.height << 16) / geo->dst.height;
+
+ /* adjust offsets */
+ geo->src.x_offset = min(geo->src.x_offset,
+ geo->src.full_width - geo->src.width);
+ geo->src.y_offset = min(geo->src.y_offset,
+ geo->src.full_height - geo->src.height);
+ geo->dst.x_offset = min(geo->dst.x_offset,
+ geo->dst.full_width - geo->dst.width);
+ geo->dst.y_offset = min(geo->dst.y_offset,
+ geo->dst.full_height - geo->dst.height);
+}
+
+/* PUBLIC API */
+
+struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
+{
+ struct mxr_layer *layer;
+ int ret;
+ struct mxr_layer_ops ops = {
+ .release = mxr_vp_layer_release,
+ .buffer_set = mxr_vp_buffer_set,
+ .stream_set = mxr_vp_stream_set,
+ .format_set = mxr_vp_format_set,
+ .fix_geometry = mxr_vp_fix_geometry,
+ };
+ char name[32];
+
+ sprintf(name, "video%d", idx);
+
+ layer = mxr_base_layer_create(mdev, idx, name, &ops);
+ if (layer == NULL) {
+ mxr_err(mdev, "failed to initialize layer(%d) base\n", idx);
+ goto fail;
+ }
+
+ layer->fmt_array = mxr_video_format;
+ layer->fmt_array_size = ARRAY_SIZE(mxr_video_format);
+
+ ret = mxr_base_layer_register(layer);
+ if (ret)
+ goto fail_layer;
+
+ return layer;
+
+fail_layer:
+ mxr_base_layer_release(layer);
+
+fail:
+ return NULL;
+}
+
diff --git a/drivers/media/video/s5p-tv/regs-hdmi.h b/drivers/media/video/s5p-tv/regs-hdmi.h
new file mode 100644
index 0000000..ac93ad6
--- /dev/null
+++ b/drivers/media/video/s5p-tv/regs-hdmi.h
@@ -0,0 +1,141 @@
+/* linux/arch/arm/mach-exynos4/include/mach/regs-hdmi.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * HDMI register header file for Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef SAMSUNG_REGS_HDMI_H
+#define SAMSUNG_REGS_HDMI_H
+
+/*
+ * Register part
+*/
+
+#define HDMI_CTRL_BASE(x) ((x) + 0x00000000)
+#define HDMI_CORE_BASE(x) ((x) + 0x00010000)
+#define HDMI_TG_BASE(x) ((x) + 0x00050000)
+
+/* Control registers */
+#define HDMI_INTC_CON HDMI_CTRL_BASE(0x0000)
+#define HDMI_INTC_FLAG HDMI_CTRL_BASE(0x0004)
+#define HDMI_HPD_STATUS HDMI_CTRL_BASE(0x000C)
+#define HDMI_PHY_RSTOUT HDMI_CTRL_BASE(0x0014)
+#define HDMI_PHY_VPLL HDMI_CTRL_BASE(0x0018)
+#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x001C)
+#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0020)
+
+/* Core registers */
+#define HDMI_CON_0 HDMI_CORE_BASE(0x0000)
+#define HDMI_CON_1 HDMI_CORE_BASE(0x0004)
+#define HDMI_CON_2 HDMI_CORE_BASE(0x0008)
+#define HDMI_SYS_STATUS HDMI_CORE_BASE(0x0010)
+#define HDMI_PHY_STATUS HDMI_CORE_BASE(0x0014)
+#define HDMI_STATUS_EN HDMI_CORE_BASE(0x0020)
+#define HDMI_HPD HDMI_CORE_BASE(0x0030)
+#define HDMI_MODE_SEL HDMI_CORE_BASE(0x0040)
+#define HDMI_BLUE_SCREEN_0 HDMI_CORE_BASE(0x0050)
+#define HDMI_BLUE_SCREEN_1 HDMI_CORE_BASE(0x0054)
+#define HDMI_BLUE_SCREEN_2 HDMI_CORE_BASE(0x0058)
+#define HDMI_H_BLANK_0 HDMI_CORE_BASE(0x00A0)
+#define HDMI_H_BLANK_1 HDMI_CORE_BASE(0x00A4)
+#define HDMI_V_BLANK_0 HDMI_CORE_BASE(0x00B0)
+#define HDMI_V_BLANK_1 HDMI_CORE_BASE(0x00B4)
+#define HDMI_V_BLANK_2 HDMI_CORE_BASE(0x00B8)
+#define HDMI_H_V_LINE_0 HDMI_CORE_BASE(0x00C0)
+#define HDMI_H_V_LINE_1 HDMI_CORE_BASE(0x00C4)
+#define HDMI_H_V_LINE_2 HDMI_CORE_BASE(0x00C8)
+#define HDMI_VSYNC_POL HDMI_CORE_BASE(0x00E4)
+#define HDMI_INT_PRO_MODE HDMI_CORE_BASE(0x00E8)
+#define HDMI_V_BLANK_F_0 HDMI_CORE_BASE(0x0110)
+#define HDMI_V_BLANK_F_1 HDMI_CORE_BASE(0x0114)
+#define HDMI_V_BLANK_F_2 HDMI_CORE_BASE(0x0118)
+#define HDMI_H_SYNC_GEN_0 HDMI_CORE_BASE(0x0120)
+#define HDMI_H_SYNC_GEN_1 HDMI_CORE_BASE(0x0124)
+#define HDMI_H_SYNC_GEN_2 HDMI_CORE_BASE(0x0128)
+#define HDMI_V_SYNC_GEN_1_0 HDMI_CORE_BASE(0x0130)
+#define HDMI_V_SYNC_GEN_1_1 HDMI_CORE_BASE(0x0134)
+#define HDMI_V_SYNC_GEN_1_2 HDMI_CORE_BASE(0x0138)
+#define HDMI_V_SYNC_GEN_2_0 HDMI_CORE_BASE(0x0140)
+#define HDMI_V_SYNC_GEN_2_1 HDMI_CORE_BASE(0x0144)
+#define HDMI_V_SYNC_GEN_2_2 HDMI_CORE_BASE(0x0148)
+#define HDMI_V_SYNC_GEN_3_0 HDMI_CORE_BASE(0x0150)
+#define HDMI_V_SYNC_GEN_3_1 HDMI_CORE_BASE(0x0154)
+#define HDMI_V_SYNC_GEN_3_2 HDMI_CORE_BASE(0x0158)
+#define HDMI_AVI_CON HDMI_CORE_BASE(0x0300)
+#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0320 + 4 * (n))
+#define HDMI_DC_CONTROL HDMI_CORE_BASE(0x05C0)
+#define HDMI_VIDEO_PATTERN_GEN HDMI_CORE_BASE(0x05C4)
+#define HDMI_HPD_GEN HDMI_CORE_BASE(0x05C8)
+
+/* Timing generator registers */
+#define HDMI_TG_CMD HDMI_TG_BASE(0x0000)
+#define HDMI_TG_H_FSZ_L HDMI_TG_BASE(0x0018)
+#define HDMI_TG_H_FSZ_H HDMI_TG_BASE(0x001C)
+#define HDMI_TG_HACT_ST_L HDMI_TG_BASE(0x0020)
+#define HDMI_TG_HACT_ST_H HDMI_TG_BASE(0x0024)
+#define HDMI_TG_HACT_SZ_L HDMI_TG_BASE(0x0028)
+#define HDMI_TG_HACT_SZ_H HDMI_TG_BASE(0x002C)
+#define HDMI_TG_V_FSZ_L HDMI_TG_BASE(0x0030)
+#define HDMI_TG_V_FSZ_H HDMI_TG_BASE(0x0034)
+#define HDMI_TG_VSYNC_L HDMI_TG_BASE(0x0038)
+#define HDMI_TG_VSYNC_H HDMI_TG_BASE(0x003C)
+#define HDMI_TG_VSYNC2_L HDMI_TG_BASE(0x0040)
+#define HDMI_TG_VSYNC2_H HDMI_TG_BASE(0x0044)
+#define HDMI_TG_VACT_ST_L HDMI_TG_BASE(0x0048)
+#define HDMI_TG_VACT_ST_H HDMI_TG_BASE(0x004C)
+#define HDMI_TG_VACT_SZ_L HDMI_TG_BASE(0x0050)
+#define HDMI_TG_VACT_SZ_H HDMI_TG_BASE(0x0054)
+#define HDMI_TG_FIELD_CHG_L HDMI_TG_BASE(0x0058)
+#define HDMI_TG_FIELD_CHG_H HDMI_TG_BASE(0x005C)
+#define HDMI_TG_VACT_ST2_L HDMI_TG_BASE(0x0060)
+#define HDMI_TG_VACT_ST2_H HDMI_TG_BASE(0x0064)
+#define HDMI_TG_VSYNC_TOP_HDMI_L HDMI_TG_BASE(0x0078)
+#define HDMI_TG_VSYNC_TOP_HDMI_H HDMI_TG_BASE(0x007C)
+#define HDMI_TG_VSYNC_BOT_HDMI_L HDMI_TG_BASE(0x0080)
+#define HDMI_TG_VSYNC_BOT_HDMI_H HDMI_TG_BASE(0x0084)
+#define HDMI_TG_FIELD_TOP_HDMI_L HDMI_TG_BASE(0x0088)
+#define HDMI_TG_FIELD_TOP_HDMI_H HDMI_TG_BASE(0x008C)
+#define HDMI_TG_FIELD_BOT_HDMI_L HDMI_TG_BASE(0x0090)
+#define HDMI_TG_FIELD_BOT_HDMI_H HDMI_TG_BASE(0x0094)
+
+/*
+ * Bit definition part
+ */
+
+/* HDMI_INTC_CON */
+#define HDMI_INTC_EN_GLOBAL (1 << 6)
+#define HDMI_INTC_EN_HPD_PLUG (1 << 3)
+#define HDMI_INTC_EN_HPD_UNPLUG (1 << 2)
+
+/* HDMI_INTC_FLAG */
+#define HDMI_INTC_FLAG_HPD_PLUG (1 << 3)
+#define HDMI_INTC_FLAG_HPD_UNPLUG (1 << 2)
+
+/* HDMI_PHY_RSTOUT */
+#define HDMI_PHY_SW_RSTOUT (1 << 0)
+
+/* HDMI_CORE_RSTOUT */
+#define HDMI_CORE_SW_RSTOUT (1 << 0)
+
+/* HDMI_CON_0 */
+#define HDMI_BLUE_SCR_EN (1 << 5)
+#define HDMI_EN (1 << 0)
+
+/* HDMI_PHY_STATUS */
+#define HDMI_PHY_STATUS_READY (1 << 0)
+
+/* HDMI_MODE_SEL */
+#define HDMI_MODE_HDMI_EN (1 << 1)
+#define HDMI_MODE_DVI_EN (1 << 0)
+#define HDMI_MODE_MASK (3 << 0)
+
+/* HDMI_TG_CMD */
+#define HDMI_TG_EN (1 << 0)
+
+#endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/drivers/media/video/s5p-tv/regs-mixer.h b/drivers/media/video/s5p-tv/regs-mixer.h
new file mode 100644
index 0000000..3c84426
--- /dev/null
+++ b/drivers/media/video/s5p-tv/regs-mixer.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Mixer register header file for Samsung Mixer driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#ifndef SAMSUNG_REGS_MIXER_H
+#define SAMSUNG_REGS_MIXER_H
+
+/*
+ * Register part
+ */
+#define MXR_STATUS 0x0000
+#define MXR_CFG 0x0004
+#define MXR_INT_EN 0x0008
+#define MXR_INT_STATUS 0x000C
+#define MXR_LAYER_CFG 0x0010
+#define MXR_VIDEO_CFG 0x0014
+#define MXR_GRAPHIC0_CFG 0x0020
+#define MXR_GRAPHIC0_BASE 0x0024
+#define MXR_GRAPHIC0_SPAN 0x0028
+#define MXR_GRAPHIC0_SXY 0x002C
+#define MXR_GRAPHIC0_WH 0x0030
+#define MXR_GRAPHIC0_DXY 0x0034
+#define MXR_GRAPHIC0_BLANK 0x0038
+#define MXR_GRAPHIC1_CFG 0x0040
+#define MXR_GRAPHIC1_BASE 0x0044
+#define MXR_GRAPHIC1_SPAN 0x0048
+#define MXR_GRAPHIC1_SXY 0x004C
+#define MXR_GRAPHIC1_WH 0x0050
+#define MXR_GRAPHIC1_DXY 0x0054
+#define MXR_GRAPHIC1_BLANK 0x0058
+#define MXR_BG_CFG 0x0060
+#define MXR_BG_COLOR0 0x0064
+#define MXR_BG_COLOR1 0x0068
+#define MXR_BG_COLOR2 0x006C
+
+/* for parametrized access to layer registers */
+#define MXR_GRAPHIC_CFG(i) (0x0020 + (i) * 0x20)
+#define MXR_GRAPHIC_BASE(i) (0x0024 + (i) * 0x20)
+#define MXR_GRAPHIC_SPAN(i) (0x0028 + (i) * 0x20)
+#define MXR_GRAPHIC_SXY(i) (0x002C + (i) * 0x20)
+#define MXR_GRAPHIC_WH(i) (0x0030 + (i) * 0x20)
+#define MXR_GRAPHIC_DXY(i) (0x0034 + (i) * 0x20)
+
+/*
+ * Bit definition part
+ */
+
+/* generates mask for range of bits */
+#define MXR_MASK(high_bit, low_bit) \
+ (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
+
+#define MXR_MASK_VAL(val, high_bit, low_bit) \
+ (((val) << (low_bit)) & MXR_MASK(high_bit, low_bit))
+
+/* bits for MXR_STATUS */
+#define MXR_STATUS_16_BURST (1 << 7)
+#define MXR_STATUS_BURST_MASK (1 << 7)
+#define MXR_STATUS_SYNC_ENABLE (1 << 2)
+#define MXR_STATUS_REG_RUN (1 << 0)
+
+/* bits for MXR_CFG */
+#define MXR_CFG_OUT_YUV444 (0 << 8)
+#define MXR_CFG_OUT_RGB888 (1 << 8)
+#define MXR_CFG_DST_SDO (0 << 7)
+#define MXR_CFG_DST_HDMI (1 << 7)
+#define MXR_CFG_DST_MASK (1 << 7)
+#define MXR_CFG_SCAN_HD_720 (0 << 6)
+#define MXR_CFG_SCAN_HD_1080 (1 << 6)
+#define MXR_CFG_GRP1_ENABLE (1 << 5)
+#define MXR_CFG_GRP0_ENABLE (1 << 4)
+#define MXR_CFG_VP_ENABLE (1 << 3)
+#define MXR_CFG_SCAN_INTERLACE (0 << 2)
+#define MXR_CFG_SCAN_PROGRASSIVE (1 << 2)
+#define MXR_CFG_SCAN_NTSC (0 << 1)
+#define MXR_CFG_SCAN_PAL (1 << 1)
+#define MXR_CFG_SCAN_SD (0 << 0)
+#define MXR_CFG_SCAN_HD (1 << 0)
+#define MXR_CFG_SCAN_MASK 0x47
+
+/* bits for MXR_GRAPHICn_CFG */
+#define MXR_GRP_CFG_COLOR_KEY_DISABLE (1 << 21)
+#define MXR_GRP_CFG_BLEND_PRE_MUL (1 << 20)
+#define MXR_GRP_CFG_FORMAT_VAL(x) MXR_MASK_VAL(x, 11, 8)
+#define MXR_GRP_CFG_FORMAT_MASK MXR_GRP_CFG_FORMAT_VAL(~0)
+#define MXR_GRP_CFG_ALPHA_VAL(x) MXR_MASK_VAL(x, 7, 0)
+
+/* bits for MXR_GRAPHICn_WH */
+#define MXR_GRP_WH_H_SCALE(x) MXR_MASK_VAL(x, 28, 28)
+#define MXR_GRP_WH_V_SCALE(x) MXR_MASK_VAL(x, 12, 12)
+#define MXR_GRP_WH_WIDTH(x) MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_WH_HEIGHT(x) MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_GRAPHICn_SXY */
+#define MXR_GRP_SXY_SX(x) MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_SXY_SY(x) MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_GRAPHICn_DXY */
+#define MXR_GRP_DXY_DX(x) MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_DXY_DY(x) MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_INT_EN */
+#define MXR_INT_EN_VSYNC (1 << 11)
+#define MXR_INT_EN_ALL (0x0f << 8)
+
+/* bit for MXR_INT_STATUS */
+#define MXR_INT_CLEAR_VSYNC (1 << 11)
+#define MXR_INT_STATUS_VSYNC (1 << 0)
+
+/* bit for MXR_LAYER_CFG */
+#define MXR_LAYER_CFG_GRP1_VAL(x) MXR_MASK_VAL(x, 11, 8)
+#define MXR_LAYER_CFG_GRP0_VAL(x) MXR_MASK_VAL(x, 7, 4)
+#define MXR_LAYER_CFG_VP_VAL(x) MXR_MASK_VAL(x, 3, 0)
+
+#endif /* SAMSUNG_REGS_MIXER_H */
+
diff --git a/drivers/media/video/s5p-tv/regs-sdo.h b/drivers/media/video/s5p-tv/regs-sdo.h
new file mode 100644
index 0000000..7f7c2b8
--- /dev/null
+++ b/drivers/media/video/s5p-tv/regs-sdo.h
@@ -0,0 +1,63 @@
+/* drivers/media/video/s5p-tv/regs-sdo.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * SDO register description file
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SAMSUNG_REGS_SDO_H
+#define SAMSUNG_REGS_SDO_H
+
+/*
+ * Register part
+ */
+
+#define SDO_CLKCON 0x0000
+#define SDO_CONFIG 0x0008
+#define SDO_VBI 0x0014
+#define SDO_DAC 0x003C
+#define SDO_CCCON 0x0180
+#define SDO_IRQ 0x0280
+#define SDO_IRQMASK 0x0284
+#define SDO_VERSION 0x03D8
+
+/*
+ * Bit definition part
+ */
+
+/* SDO Clock Control Register (SDO_CLKCON) */
+#define SDO_TVOUT_SW_RESET (1 << 4)
+#define SDO_TVOUT_CLOCK_READY (1 << 1)
+#define SDO_TVOUT_CLOCK_ON (1 << 0)
+
+/* SDO Video Standard Configuration Register (SDO_CONFIG) */
+#define SDO_PROGRESSIVE (1 << 4)
+#define SDO_NTSC_M 0
+#define SDO_PAL_M 1
+#define SDO_PAL_BGHID 2
+#define SDO_PAL_N 3
+#define SDO_PAL_NC 4
+#define SDO_NTSC_443 8
+#define SDO_PAL_60 9
+#define SDO_STANDARD_MASK 0xf
+
+/* SDO VBI Configuration Register (SDO_VBI) */
+#define SDO_CVBS_WSS_INS (1 << 14)
+#define SDO_CVBS_CLOSED_CAPTION_MASK (3 << 12)
+
+/* SDO DAC Configuration Register (SDO_DAC) */
+#define SDO_POWER_ON_DAC (1 << 0)
+
+/* SDO Color Compensation On/Off Control (SDO_CCCON) */
+#define SDO_COMPENSATION_BHS_ADJ_OFF (1 << 4)
+#define SDO_COMPENSATION_CVBS_COMP_OFF (1 << 0)
+
+/* SDO Interrupt Request Register (SDO_IRQ) */
+#define SDO_VSYNC_IRQ_PEND (1 << 0)
+
+#endif /* SAMSUNG_REGS_SDO_H */
diff --git a/drivers/media/video/s5p-tv/regs-vp.h b/drivers/media/video/s5p-tv/regs-vp.h
new file mode 100644
index 0000000..6c63984
--- /dev/null
+++ b/drivers/media/video/s5p-tv/regs-vp.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Video processor register header file for Samsung Mixer driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SAMSUNG_REGS_VP_H
+#define SAMSUNG_REGS_VP_H
+
+/*
+ * Register part
+ */
+
+#define VP_ENABLE 0x0000
+#define VP_SRESET 0x0004
+#define VP_SHADOW_UPDATE 0x0008
+#define VP_FIELD_ID 0x000C
+#define VP_MODE 0x0010
+#define VP_IMG_SIZE_Y 0x0014
+#define VP_IMG_SIZE_C 0x0018
+#define VP_PER_RATE_CTRL 0x001C
+#define VP_TOP_Y_PTR 0x0028
+#define VP_BOT_Y_PTR 0x002C
+#define VP_TOP_C_PTR 0x0030
+#define VP_BOT_C_PTR 0x0034
+#define VP_ENDIAN_MODE 0x03CC
+#define VP_SRC_H_POSITION 0x0044
+#define VP_SRC_V_POSITION 0x0048
+#define VP_SRC_WIDTH 0x004C
+#define VP_SRC_HEIGHT 0x0050
+#define VP_DST_H_POSITION 0x0054
+#define VP_DST_V_POSITION 0x0058
+#define VP_DST_WIDTH 0x005C
+#define VP_DST_HEIGHT 0x0060
+#define VP_H_RATIO 0x0064
+#define VP_V_RATIO 0x0068
+#define VP_POLY8_Y0_LL 0x006C
+#define VP_POLY4_Y0_LL 0x00EC
+#define VP_POLY4_C0_LL 0x012C
+
+/*
+ * Bit definition part
+ */
+
+/* generates mask for range of bits */
+
+#define VP_MASK(high_bit, low_bit) \
+ (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
+
+#define VP_MASK_VAL(val, high_bit, low_bit) \
+ (((val) << (low_bit)) & VP_MASK(high_bit, low_bit))
+
+ /* VP_ENABLE */
+#define VP_ENABLE_ON (1 << 0)
+
+/* VP_SRESET */
+#define VP_SRESET_PROCESSING (1 << 0)
+
+/* VP_SHADOW_UPDATE */
+#define VP_SHADOW_UPDATE_ENABLE (1 << 0)
+
+/* VP_MODE */
+#define VP_MODE_NV12 (0 << 6)
+#define VP_MODE_NV21 (1 << 6)
+#define VP_MODE_LINE_SKIP (1 << 5)
+#define VP_MODE_MEM_LINEAR (0 << 4)
+#define VP_MODE_MEM_TILED (1 << 4)
+#define VP_MODE_FMT_MASK (5 << 4)
+#define VP_MODE_FIELD_ID_AUTO_TOGGLING (1 << 2)
+#define VP_MODE_2D_IPC (1 << 1)
+
+/* VP_IMG_SIZE_Y */
+/* VP_IMG_SIZE_C */
+#define VP_IMG_HSIZE(x) VP_MASK_VAL(x, 29, 16)
+#define VP_IMG_VSIZE(x) VP_MASK_VAL(x, 13, 0)
+
+/* VP_SRC_H_POSITION */
+#define VP_SRC_H_POSITION_VAL(x) VP_MASK_VAL(x, 14, 4)
+
+/* VP_ENDIAN_MODE */
+#define VP_ENDIAN_MODE_LITTLE (1 << 0)
+
+#endif /* SAMSUNG_REGS_VP_H */
diff --git a/drivers/media/video/s5p-tv/sdo_drv.c b/drivers/media/video/s5p-tv/sdo_drv.c
new file mode 100644
index 0000000..4dddd6b
--- /dev/null
+++ b/drivers/media/video/s5p-tv/sdo_drv.c
@@ -0,0 +1,479 @@
+/*
+ * Samsung Standard Definition Output (SDO) driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "regs-sdo.h"
+
+MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>");
+MODULE_DESCRIPTION("Samsung Standard Definition Output (SDO)");
+MODULE_LICENSE("GPL");
+
+#define SDO_DEFAULT_STD V4L2_STD_PAL
+
+struct sdo_format {
+ v4l2_std_id id;
+ /* all modes are 720 pixels wide */
+ unsigned int height;
+ unsigned int cookie;
+};
+
+struct sdo_device {
+ /** pointer to device parent */
+ struct device *dev;
+ /** base address of SDO registers */
+ void __iomem *regs;
+ /** SDO interrupt */
+ unsigned int irq;
+ /** DAC source clock */
+ struct clk *sclk_dac;
+ /** DAC clock */
+ struct clk *dac;
+ /** DAC physical interface */
+ struct clk *dacphy;
+ /** clock for control of VPLL */
+ struct clk *fout_vpll;
+ /** regulator for SDO IP power */
+ struct regulator *vdac;
+ /** regulator for SDO plug detection */
+ struct regulator *vdet;
+ /** subdev used as device interface */
+ struct v4l2_subdev sd;
+ /** current format */
+ const struct sdo_format *fmt;
+};
+
+static inline struct sdo_device *sd_to_sdev(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct sdo_device, sd);
+}
+
+static inline
+void sdo_write_mask(struct sdo_device *sdev, u32 reg_id, u32 value, u32 mask)
+{
+ u32 old = readl(sdev->regs + reg_id);
+ value = (value & mask) | (old & ~mask);
+ writel(value, sdev->regs + reg_id);
+}
+
+static inline
+void sdo_write(struct sdo_device *sdev, u32 reg_id, u32 value)
+{
+ writel(value, sdev->regs + reg_id);
+}
+
+static inline
+u32 sdo_read(struct sdo_device *sdev, u32 reg_id)
+{
+ return readl(sdev->regs + reg_id);
+}
+
+static irqreturn_t sdo_irq_handler(int irq, void *dev_data)
+{
+ struct sdo_device *sdev = dev_data;
+
+ /* clear interrupt */
+ sdo_write_mask(sdev, SDO_IRQ, ~0, SDO_VSYNC_IRQ_PEND);
+ return IRQ_HANDLED;
+}
+
+static void sdo_reg_debug(struct sdo_device *sdev)
+{
+#define DBGREG(reg_id) \
+ dev_info(sdev->dev, #reg_id " = %08x\n", \
+ sdo_read(sdev, reg_id))
+
+ DBGREG(SDO_CLKCON);
+ DBGREG(SDO_CONFIG);
+ DBGREG(SDO_VBI);
+ DBGREG(SDO_DAC);
+ DBGREG(SDO_IRQ);
+ DBGREG(SDO_IRQMASK);
+ DBGREG(SDO_VERSION);
+}
+
+static const struct sdo_format sdo_format[] = {
+ { V4L2_STD_PAL_N, .height = 576, .cookie = SDO_PAL_N },
+ { V4L2_STD_PAL_Nc, .height = 576, .cookie = SDO_PAL_NC },
+ { V4L2_STD_PAL_M, .height = 480, .cookie = SDO_PAL_M },
+ { V4L2_STD_PAL_60, .height = 480, .cookie = SDO_PAL_60 },
+ { V4L2_STD_NTSC_443, .height = 480, .cookie = SDO_NTSC_443 },
+ { V4L2_STD_PAL, .height = 576, .cookie = SDO_PAL_BGHID },
+ { V4L2_STD_NTSC_M, .height = 480, .cookie = SDO_NTSC_M },
+};
+
+static const struct sdo_format *sdo_find_format(v4l2_std_id id)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(sdo_format); ++i)
+ if (sdo_format[i].id & id)
+ return &sdo_format[i];
+ return NULL;
+}
+
+static int sdo_g_tvnorms_output(struct v4l2_subdev *sd, v4l2_std_id *std)
+{
+ *std = V4L2_STD_NTSC_M | V4L2_STD_PAL_M | V4L2_STD_PAL |
+ V4L2_STD_PAL_N | V4L2_STD_PAL_Nc |
+ V4L2_STD_NTSC_443 | V4L2_STD_PAL_60;
+ return 0;
+}
+
+static int sdo_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
+{
+ struct sdo_device *sdev = sd_to_sdev(sd);
+ const struct sdo_format *fmt;
+ fmt = sdo_find_format(std);
+ if (fmt == NULL)
+ return -EINVAL;
+ sdev->fmt = fmt;
+ return 0;
+}
+
+static int sdo_g_std_output(struct v4l2_subdev *sd, v4l2_std_id *std)
+{
+ *std = sd_to_sdev(sd)->fmt->id;
+ return 0;
+}
+
+static int sdo_g_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ struct sdo_device *sdev = sd_to_sdev(sd);
+
+ if (!sdev->fmt)
+ return -ENXIO;
+ /* all modes are 720 pixels wide */
+ fmt->width = 720;
+ fmt->height = sdev->fmt->height;
+ fmt->code = V4L2_MBUS_FMT_FIXED;
+ fmt->field = V4L2_FIELD_INTERLACED;
+ return 0;
+}
+
+static int sdo_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct sdo_device *sdev = sd_to_sdev(sd);
+ struct device *dev = sdev->dev;
+ int ret;
+
+ dev_info(dev, "sdo_s_power(%d)\n", on);
+
+ if (on)
+ ret = pm_runtime_get_sync(dev);
+ else
+ ret = pm_runtime_put_sync(dev);
+
+ /* only values < 0 indicate errors */
+ return IS_ERR_VALUE(ret) ? ret : 0;
+}
+
+static int sdo_streamon(struct sdo_device *sdev)
+{
+ /* set proper clock for Timing Generator */
+ clk_set_rate(sdev->fout_vpll, 54000000);
+ dev_info(sdev->dev, "fout_vpll.rate = %lu\n",
+ clk_get_rate(sdev->fout_vpll));
+ /* enable clock in SDO */
+ sdo_write_mask(sdev, SDO_CLKCON, ~0, SDO_TVOUT_CLOCK_ON);
+ clk_enable(sdev->dacphy);
+ /* enable DAC */
+ sdo_write_mask(sdev, SDO_DAC, ~0, SDO_POWER_ON_DAC);
+ sdo_reg_debug(sdev);
+ return 0;
+}
+
+static int sdo_streamoff(struct sdo_device *sdev)
+{
+ int tries;
+
+ sdo_write_mask(sdev, SDO_DAC, 0, SDO_POWER_ON_DAC);
+ clk_disable(sdev->dacphy);
+ sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_CLOCK_ON);
+ for (tries = 100; tries; --tries) {
+ if (sdo_read(sdev, SDO_CLKCON) & SDO_TVOUT_CLOCK_READY)
+ break;
+ mdelay(1);
+ }
+ if (tries == 0)
+ dev_err(sdev->dev, "failed to stop streaming\n");
+ return tries ? 0 : -EIO;
+}
+
+static int sdo_s_stream(struct v4l2_subdev *sd, int on)
+{
+ struct sdo_device *sdev = sd_to_sdev(sd);
+ return on ? sdo_streamon(sdev) : sdo_streamoff(sdev);
+}
+
+static const struct v4l2_subdev_core_ops sdo_sd_core_ops = {
+ .s_power = sdo_s_power,
+};
+
+static const struct v4l2_subdev_video_ops sdo_sd_video_ops = {
+ .s_std_output = sdo_s_std_output,
+ .g_std_output = sdo_g_std_output,
+ .g_tvnorms_output = sdo_g_tvnorms_output,
+ .g_mbus_fmt = sdo_g_mbus_fmt,
+ .s_stream = sdo_s_stream,
+};
+
+static const struct v4l2_subdev_ops sdo_sd_ops = {
+ .core = &sdo_sd_core_ops,
+ .video = &sdo_sd_video_ops,
+};
+
+static int sdo_runtime_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct sdo_device *sdev = sd_to_sdev(sd);
+
+ dev_info(dev, "suspend\n");
+ regulator_disable(sdev->vdet);
+ regulator_disable(sdev->vdac);
+ clk_disable(sdev->sclk_dac);
+ return 0;
+}
+
+static int sdo_runtime_resume(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct sdo_device *sdev = sd_to_sdev(sd);
+
+ dev_info(dev, "resume\n");
+ clk_enable(sdev->sclk_dac);
+ regulator_enable(sdev->vdac);
+ regulator_enable(sdev->vdet);
+
+ /* software reset */
+ sdo_write_mask(sdev, SDO_CLKCON, ~0, SDO_TVOUT_SW_RESET);
+ mdelay(10);
+ sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_SW_RESET);
+
+ /* setting TV mode */
+ sdo_write_mask(sdev, SDO_CONFIG, sdev->fmt->cookie, SDO_STANDARD_MASK);
+ /* XXX: forcing interlaced mode using undocumented bit */
+ sdo_write_mask(sdev, SDO_CONFIG, 0, SDO_PROGRESSIVE);
+ /* turn all VBI off */
+ sdo_write_mask(sdev, SDO_VBI, 0, SDO_CVBS_WSS_INS |
+ SDO_CVBS_CLOSED_CAPTION_MASK);
+ /* turn all post processing off */
+ sdo_write_mask(sdev, SDO_CCCON, ~0, SDO_COMPENSATION_BHS_ADJ_OFF |
+ SDO_COMPENSATION_CVBS_COMP_OFF);
+ sdo_reg_debug(sdev);
+ return 0;
+}
+
+static const struct dev_pm_ops sdo_pm_ops = {
+ .runtime_suspend = sdo_runtime_suspend,
+ .runtime_resume = sdo_runtime_resume,
+};
+
+static int __devinit sdo_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sdo_device *sdev;
+ struct resource *res;
+ int ret = 0;
+ struct clk *sclk_vpll;
+
+ dev_info(dev, "probe start\n");
+ sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
+ if (!sdev) {
+ dev_err(dev, "not enough memory.\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ sdev->dev = dev;
+
+ /* mapping registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "get memory resource failed.\n");
+ ret = -ENXIO;
+ goto fail_sdev;
+ }
+
+ sdev->regs = ioremap(res->start, resource_size(res));
+ if (sdev->regs == NULL) {
+ dev_err(dev, "register mapping failed.\n");
+ ret = -ENXIO;
+ goto fail_sdev;
+ }
+
+ /* acquiring interrupt */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(dev, "get interrupt resource failed.\n");
+ ret = -ENXIO;
+ goto fail_regs;
+ }
+ ret = request_irq(res->start, sdo_irq_handler, 0, "s5p-sdo", sdev);
+ if (ret) {
+ dev_err(dev, "request interrupt failed.\n");
+ goto fail_regs;
+ }
+ sdev->irq = res->start;
+
+ /* acquire clocks */
+ sdev->sclk_dac = clk_get(dev, "sclk_dac");
+ if (IS_ERR_OR_NULL(sdev->sclk_dac)) {
+ dev_err(dev, "failed to get clock 'sclk_dac'\n");
+ ret = -ENXIO;
+ goto fail_irq;
+ }
+ sdev->dac = clk_get(dev, "dac");
+ if (IS_ERR_OR_NULL(sdev->dac)) {
+ dev_err(dev, "failed to get clock 'dac'\n");
+ ret = -ENXIO;
+ goto fail_sclk_dac;
+ }
+ sdev->dacphy = clk_get(dev, "dacphy");
+ if (IS_ERR_OR_NULL(sdev->dacphy)) {
+ dev_err(dev, "failed to get clock 'dacphy'\n");
+ ret = -ENXIO;
+ goto fail_dac;
+ }
+ sclk_vpll = clk_get(dev, "sclk_vpll");
+ if (IS_ERR_OR_NULL(sclk_vpll)) {
+ dev_err(dev, "failed to get clock 'sclk_vpll'\n");
+ ret = -ENXIO;
+ goto fail_dacphy;
+ }
+ clk_set_parent(sdev->sclk_dac, sclk_vpll);
+ clk_put(sclk_vpll);
+ sdev->fout_vpll = clk_get(dev, "fout_vpll");
+ if (IS_ERR_OR_NULL(sdev->fout_vpll)) {
+ dev_err(dev, "failed to get clock 'fout_vpll'\n");
+ goto fail_dacphy;
+ }
+ dev_info(dev, "fout_vpll.rate = %lu\n", clk_get_rate(sclk_vpll));
+
+ /* acquire regulator */
+ sdev->vdac = regulator_get(dev, "vdd33a_dac");
+ if (IS_ERR_OR_NULL(sdev->vdac)) {
+ dev_err(dev, "failed to get regulator 'vdac'\n");
+ goto fail_fout_vpll;
+ }
+ sdev->vdet = regulator_get(dev, "vdet");
+ if (IS_ERR_OR_NULL(sdev->vdet)) {
+ dev_err(dev, "failed to get regulator 'vdet'\n");
+ goto fail_vdac;
+ }
+
+ /* enable gate for dac clock, because mixer uses it */
+ clk_enable(sdev->dac);
+
+ /* configure power management */
+ pm_runtime_enable(dev);
+
+ /* configuration of interface subdevice */
+ v4l2_subdev_init(&sdev->sd, &sdo_sd_ops);
+ sdev->sd.owner = THIS_MODULE;
+ strlcpy(sdev->sd.name, "s5p-sdo", sizeof sdev->sd.name);
+
+ /* set default format */
+ sdev->fmt = sdo_find_format(SDO_DEFAULT_STD);
+ BUG_ON(sdev->fmt == NULL);
+
+ /* keeping subdev in device's private for use by other drivers */
+ dev_set_drvdata(dev, &sdev->sd);
+
+ dev_info(dev, "probe succeeded\n");
+ return 0;
+
+fail_vdac:
+ regulator_put(sdev->vdac);
+fail_fout_vpll:
+ clk_put(sdev->fout_vpll);
+fail_dacphy:
+ clk_put(sdev->dacphy);
+fail_dac:
+ clk_put(sdev->dac);
+fail_sclk_dac:
+ clk_put(sdev->sclk_dac);
+fail_irq:
+ free_irq(sdev->irq, sdev);
+fail_regs:
+ iounmap(sdev->regs);
+fail_sdev:
+ kfree(sdev);
+fail:
+ dev_info(dev, "probe failed\n");
+ return ret;
+}
+
+static int __devexit sdo_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(&pdev->dev);
+ struct sdo_device *sdev = sd_to_sdev(sd);
+
+ pm_runtime_disable(&pdev->dev);
+ clk_disable(sdev->dac);
+ regulator_put(sdev->vdet);
+ regulator_put(sdev->vdac);
+ clk_put(sdev->fout_vpll);
+ clk_put(sdev->dacphy);
+ clk_put(sdev->dac);
+ clk_put(sdev->sclk_dac);
+ free_irq(sdev->irq, sdev);
+ iounmap(sdev->regs);
+ kfree(sdev);
+
+ dev_info(&pdev->dev, "remove successful\n");
+ return 0;
+}
+
+static struct platform_driver sdo_driver __refdata = {
+ .probe = sdo_probe,
+ .remove = __devexit_p(sdo_remove),
+ .driver = {
+ .name = "s5p-sdo",
+ .owner = THIS_MODULE,
+ .pm = &sdo_pm_ops,
+ }
+};
+
+static int __init sdo_init(void)
+{
+ int ret;
+ static const char banner[] __initdata = KERN_INFO \
+ "Samsung Standard Definition Output (SDO) driver, "
+ "(c) 2010-2011 Samsung Electronics Co., Ltd.\n";
+ printk(banner);
+
+ ret = platform_driver_register(&sdo_driver);
+ if (ret)
+ printk(KERN_ERR "SDO platform driver register failed\n");
+
+ return ret;
+}
+module_init(sdo_init);
+
+static void __exit sdo_exit(void)
+{
+ platform_driver_unregister(&sdo_driver);
+}
+module_exit(sdo_exit);
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index 0db9092..f2ae405 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -757,8 +757,8 @@ static int saa711x_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_CHROMA_AGC:
/* chroma gain cluster */
- if (state->agc->cur.val)
- state->gain->cur.val =
+ if (state->agc->val)
+ state->gain->val =
saa711x_read(sd, R_0F_CHROMA_GAIN_CNTL) & 0x7f;
break;
}
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index e2062b2..0f9fb99 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -4951,8 +4951,9 @@ struct saa7134_board saa7134_boards[] = {
.audio_clock = 0x00187de7,
.tuner_type = TUNER_XC2028,
.radio_type = UNSET,
- .tuner_addr = ADDR_UNSET,
+ .tuner_addr = 0x61,
.radio_addr = ADDR_UNSET,
+ .mpeg = SAA7134_MPEG_DVB,
.inputs = {{
.name = name_tv,
.vmux = 3,
@@ -6992,6 +6993,11 @@ static int saa7134_xc2028_callback(struct saa7134_dev *dev,
msleep(10);
saa7134_set_gpio(dev, 18, 1);
break;
+ case SAA7134_BOARD_VIDEOMATE_T750:
+ saa7134_set_gpio(dev, 20, 0);
+ msleep(10);
+ saa7134_set_gpio(dev, 20, 1);
+ break;
}
return 0;
}
@@ -7451,6 +7457,11 @@ int saa7134_board_init1(struct saa7134_dev *dev)
saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x0e050000, 0x0c050000);
saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x0e050000, 0x0c050000);
break;
+ case SAA7134_BOARD_VIDEOMATE_T750:
+ /* enable the analog tuner */
+ saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x00008000, 0x00008000);
+ saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x00008000, 0x00008000);
+ break;
}
return 0;
}
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index f9be737..ca65cda 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -39,6 +39,8 @@
MODULE_DESCRIPTION("v4l2 driver module for saa7130/34 based TV cards");
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
+MODULE_VERSION(SAA7134_VERSION);
+
/* ------------------------------------------------------------------ */
@@ -1332,14 +1334,8 @@ static struct pci_driver saa7134_pci_driver = {
static int __init saa7134_init(void)
{
INIT_LIST_HEAD(&saa7134_devlist);
- printk(KERN_INFO "saa7130/34: v4l2 driver version %d.%d.%d loaded\n",
- (SAA7134_VERSION_CODE >> 16) & 0xff,
- (SAA7134_VERSION_CODE >> 8) & 0xff,
- SAA7134_VERSION_CODE & 0xff);
-#ifdef SNAPSHOT
- printk(KERN_INFO "saa7130/34: snapshot date %04d-%02d-%02d\n",
- SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
-#endif
+ printk(KERN_INFO "saa7130/34: v4l2 driver version %s loaded\n",
+ SAA7134_VERSION);
return pci_register_driver(&saa7134_pci_driver);
}
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 996a206..1e4ef16 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -56,6 +56,7 @@
#include "lgs8gxx.h"
#include "zl10353.h"
+#include "qt1010.h"
#include "zl10036.h"
#include "zl10039.h"
@@ -939,6 +940,18 @@ static struct zl10353_config behold_x7_config = {
.disable_i2c_gate_ctrl = 1,
};
+static struct zl10353_config videomate_t750_zl10353_config = {
+ .demod_address = 0x0f,
+ .no_tuner = 1,
+ .parallel_ts = 1,
+ .disable_i2c_gate_ctrl = 1,
+};
+
+static struct qt1010_config videomate_t750_qt1010_config = {
+ .i2c_address = 0x62
+};
+
+
/* ==================================================================
* tda10086 based DVB-S cards, helper functions
*/
@@ -1650,6 +1663,18 @@ static int dvb_init(struct saa7134_dev *dev)
__func__);
break;
+ case SAA7134_BOARD_VIDEOMATE_T750:
+ fe0->dvb.frontend = dvb_attach(zl10353_attach,
+ &videomate_t750_zl10353_config,
+ &dev->i2c_adap);
+ if (fe0->dvb.frontend != NULL) {
+ if (dvb_attach(qt1010_attach,
+ fe0->dvb.frontend,
+ &dev->i2c_adap,
+ &videomate_t750_qt1010_config) == NULL)
+ wprintk("error attaching QT1010\n");
+ }
+ break;
case SAA7134_BOARD_ZOLID_HYBRID_PCI:
fe0->dvb.frontend = dvb_attach(tda10048_attach,
&zolid_tda10048_config,
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 18294db..dde361a 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -172,7 +172,6 @@ static int empress_querycap(struct file *file, void *priv,
strlcpy(cap->card, saa7134_boards[dev->board].name,
sizeof(cap->card));
sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
- cap->version = SAA7134_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index 776ba2d..9cf7914f 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1810,7 +1810,6 @@ static int saa7134_querycap(struct file *file, void *priv,
strlcpy(cap->card, saa7134_boards[dev->board].name,
sizeof(cap->card));
sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
- cap->version = SAA7134_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_VBI_CAPTURE |
@@ -2307,7 +2306,6 @@ static int radio_querycap(struct file *file, void *priv,
strcpy(cap->driver, "saa7134");
strlcpy(cap->card, saa7134_boards[dev->board].name, sizeof(cap->card));
sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
- cap->version = SAA7134_VERSION_CODE;
cap->capabilities = V4L2_CAP_TUNER;
return 0;
}
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index 28eb103..bc8d6bb 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -19,8 +19,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/version.h>
-#define SAA7134_VERSION_CODE KERNEL_VERSION(0, 2, 16)
+#define SAA7134_VERSION "0, 2, 17"
#include <linux/pci.h>
#include <linux/i2c.h>
diff --git a/drivers/media/video/saa7164/saa7164-encoder.c b/drivers/media/video/saa7164/saa7164-encoder.c
index 4003645..2fd38a0 100644
--- a/drivers/media/video/saa7164/saa7164-encoder.c
+++ b/drivers/media/video/saa7164/saa7164-encoder.c
@@ -1246,7 +1246,6 @@ static unsigned int fops_poll(struct file *file, poll_table *wait)
struct saa7164_encoder_fh *fh =
(struct saa7164_encoder_fh *)file->private_data;
struct saa7164_port *port = fh->port;
- struct saa7164_user_buffer *ubuf;
unsigned int mask = 0;
port->last_poll_msecs_diff = port->last_poll_msecs;
@@ -1278,10 +1277,7 @@ static unsigned int fops_poll(struct file *file, poll_table *wait)
}
/* Pull the first buffer from the used list */
- ubuf = list_first_entry(&port->list_buf_used.list,
- struct saa7164_user_buffer, list);
-
- if (ubuf)
+ if (!list_empty(&port->list_buf_used.list))
mask |= POLLIN | POLLRDNORM;
return mask;
diff --git a/drivers/media/video/saa7164/saa7164-vbi.c b/drivers/media/video/saa7164/saa7164-vbi.c
index bc1fced..e2e0341 100644
--- a/drivers/media/video/saa7164/saa7164-vbi.c
+++ b/drivers/media/video/saa7164/saa7164-vbi.c
@@ -1192,7 +1192,6 @@ static unsigned int fops_poll(struct file *file, poll_table *wait)
{
struct saa7164_vbi_fh *fh = (struct saa7164_vbi_fh *)file->private_data;
struct saa7164_port *port = fh->port;
- struct saa7164_user_buffer *ubuf;
unsigned int mask = 0;
port->last_poll_msecs_diff = port->last_poll_msecs;
@@ -1224,10 +1223,7 @@ static unsigned int fops_poll(struct file *file, poll_table *wait)
}
/* Pull the first buffer from the used list */
- ubuf = list_first_entry(&port->list_buf_used.list,
- struct saa7164_user_buffer, list);
-
- if (ubuf)
+ if (!list_empty(&port->list_buf_used.list))
mask |= POLLIN | POLLRDNORM;
return mask;
diff --git a/drivers/media/video/saa7164/saa7164.h b/drivers/media/video/saa7164/saa7164.h
index 16745d2..6678bf1 100644
--- a/drivers/media/video/saa7164/saa7164.h
+++ b/drivers/media/video/saa7164/saa7164.h
@@ -48,7 +48,6 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/kdev_t.h>
-#include <linux/version.h>
#include <linux/mutex.h>
#include <linux/crc32.h>
#include <linux/kthread.h>
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 3ae5c9c..e540898 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -27,7 +27,6 @@
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/time.h>
-#include <linux/version.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/platform_device.h>
@@ -39,6 +38,7 @@
#include <media/v4l2-dev.h>
#include <media/soc_camera.h>
#include <media/sh_mobile_ceu.h>
+#include <media/sh_mobile_csi2.h>
#include <media/videobuf2-dma-contig.h>
#include <media/v4l2-mediabus.h>
#include <media/soc_mediabus.h>
@@ -96,6 +96,7 @@ struct sh_mobile_ceu_buffer {
struct sh_mobile_ceu_dev {
struct soc_camera_host ici;
struct soc_camera_device *icd;
+ struct platform_device *csi2_pdev;
unsigned int irq;
void __iomem *base;
@@ -205,7 +206,7 @@ static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev)
if (2 != success) {
- dev_warn(&icd->dev, "soft reset time out\n");
+ dev_warn(icd->pdev, "soft reset time out\n");
return -EIO;
}
@@ -220,7 +221,7 @@ static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq,
unsigned long sizes[], void *alloc_ctxs[])
{
struct soc_camera_device *icd = container_of(vq, struct soc_camera_device, vb2_vidq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
icd->current_fmt->host_fmt);
@@ -242,7 +243,7 @@ static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq,
*count = pcdev->video_limit / PAGE_ALIGN(sizes[0]);
}
- dev_dbg(icd->dev.parent, "count=%d, size=%lu\n", *count, sizes[0]);
+ dev_dbg(icd->parent, "count=%d, size=%lu\n", *count, sizes[0]);
return 0;
}
@@ -351,7 +352,7 @@ static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
buf = to_ceu_vb(vb);
- dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
/* Added list head initialization on alloc */
@@ -371,7 +372,7 @@ static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
size = icd->user_height * bytes_per_line;
if (vb2_plane_size(vb, 0) < size) {
- dev_err(icd->dev.parent, "Buffer too small (%lu < %lu)\n",
+ dev_err(icd->parent, "Buffer too small (%lu < %lu)\n",
vb2_plane_size(vb, 0), size);
return -ENOBUFS;
}
@@ -384,11 +385,11 @@ static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
{
struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
- dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
spin_lock_irq(&pcdev->lock);
@@ -409,7 +410,7 @@ static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
{
struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
@@ -421,8 +422,12 @@ static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
pcdev->active = NULL;
}
- /* Doesn't hurt also if the list is empty */
- list_del_init(&buf->queue);
+ /*
+ * Doesn't hurt also if the list is empty, but it hurts, if queuing the
+ * buffer failed, and .buf_init() hasn't been called
+ */
+ if (buf->queue.next)
+ list_del_init(&buf->queue);
spin_unlock_irq(&pcdev->lock);
}
@@ -437,7 +442,7 @@ static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
static int sh_mobile_ceu_stop_streaming(struct vb2_queue *q)
{
struct soc_camera_device *icd = container_of(q, struct soc_camera_device, vb2_vidq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct list_head *buf_head, *tmp;
@@ -499,25 +504,48 @@ out:
return IRQ_HANDLED;
}
+static struct v4l2_subdev *find_csi2(struct sh_mobile_ceu_dev *pcdev)
+{
+ struct v4l2_subdev *sd;
+
+ if (!pcdev->csi2_pdev)
+ return NULL;
+
+ v4l2_device_for_each_subdev(sd, &pcdev->ici.v4l2_dev)
+ if (&pcdev->csi2_pdev->dev == v4l2_get_subdevdata(sd))
+ return sd;
+
+ return NULL;
+}
+
/* Called with .video_lock held */
static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ struct v4l2_subdev *csi2_sd;
int ret;
if (pcdev->icd)
return -EBUSY;
- dev_info(icd->dev.parent,
+ dev_info(icd->parent,
"SuperH Mobile CEU driver attached to camera %d\n",
icd->devnum);
pm_runtime_get_sync(ici->v4l2_dev.dev);
ret = sh_mobile_ceu_soft_reset(pcdev);
- if (!ret)
+
+ csi2_sd = find_csi2(pcdev);
+
+ ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
+ if (ret != -ENODEV && ret != -ENOIOCTLCMD && ret < 0) {
+ pm_runtime_put_sync(ici->v4l2_dev.dev);
+ } else {
pcdev->icd = icd;
+ ret = 0;
+ }
return ret;
}
@@ -525,11 +553,13 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
/* Called with .video_lock held */
static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
BUG_ON(icd != pcdev->icd);
+ v4l2_subdev_call(csi2_sd, core, s_power, 0);
/* disable capture, disable interrupts */
ceu_write(pcdev, CEIER, 0);
sh_mobile_ceu_soft_reset(pcdev);
@@ -545,7 +575,7 @@ static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
pm_runtime_put_sync(ici->v4l2_dev.dev);
- dev_info(icd->dev.parent,
+ dev_info(icd->parent,
"SuperH Mobile CEU driver detached from camera %d\n",
icd->devnum);
@@ -585,14 +615,14 @@ static u16 calc_scale(unsigned int src, unsigned int *dst)
/* rect is guaranteed to not exceed the scaled camera rectangle */
static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct sh_mobile_ceu_dev *pcdev = ici->priv;
unsigned int height, width, cdwdr_width, in_width, in_height;
unsigned int left_offset, top_offset;
u32 camor;
- dev_geo(icd->dev.parent, "Crop %ux%u@%u:%u\n",
+ dev_geo(icd->parent, "Crop %ux%u@%u:%u\n",
icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top);
left_offset = cam->ceu_left;
@@ -641,7 +671,7 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
}
/* CSI2 special configuration */
- if (pcdev->pdata->csi2_dev) {
+ if (pcdev->pdata->csi2) {
in_width = ((in_width - 2) * 2);
left_offset *= 2;
}
@@ -649,7 +679,7 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
/* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */
camor = left_offset | (top_offset << 16);
- dev_geo(icd->dev.parent,
+ dev_geo(icd->parent,
"CAMOR 0x%x, CAPWR 0x%x, CFSZR 0x%x, CDWDR 0x%x\n", camor,
(in_height << 16) | in_width, (height << 16) | width,
cdwdr_width);
@@ -697,7 +727,7 @@ static void capture_restore(struct sh_mobile_ceu_dev *pcdev, u32 capsr)
static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
__u32 pixfmt)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
int ret;
unsigned long camera_flags, common_flags, value;
@@ -783,7 +813,7 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
value |= pcdev->is_16bit ? 1 << 12 : 0;
/* CSI2 mode */
- if (pcdev->pdata->csi2_dev)
+ if (pcdev->pdata->csi2)
value |= 3 << 12;
ceu_write(pcdev, CAMCR, value);
@@ -806,7 +836,7 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
sh_mobile_ceu_set_rect(icd);
mdelay(1);
- dev_geo(icd->dev.parent, "CFLCR 0x%x\n", pcdev->cflcr);
+ dev_geo(icd->parent, "CFLCR 0x%x\n", pcdev->cflcr);
ceu_write(pcdev, CFLCR, pcdev->cflcr);
/*
@@ -829,7 +859,7 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
ceu_write(pcdev, CDOCR, value);
ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */
- dev_dbg(icd->dev.parent, "S_FMT successful for %c%c%c%c %ux%u\n",
+ dev_dbg(icd->parent, "S_FMT successful for %c%c%c%c %ux%u\n",
pixfmt & 0xff, (pixfmt >> 8) & 0xff,
(pixfmt >> 16) & 0xff, (pixfmt >> 24) & 0xff,
icd->user_width, icd->user_height);
@@ -843,7 +873,7 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd,
unsigned char buswidth)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
unsigned long camera_flags, common_flags;
@@ -901,7 +931,7 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
struct soc_camera_format_xlate *xlate)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
struct soc_camera_host *ici = to_soc_camera_host(dev);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
int ret, k, n;
@@ -921,7 +951,7 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
return 0;
}
- if (!pcdev->pdata->csi2_dev) {
+ if (!pcdev->pdata->csi2) {
ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample);
if (ret < 0)
return 0;
@@ -1244,7 +1274,7 @@ static int client_s_fmt(struct soc_camera_device *icd,
{
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h;
unsigned int max_width, max_height;
struct v4l2_cropcap cap;
@@ -1313,7 +1343,7 @@ static int client_scale(struct soc_camera_device *icd,
bool ceu_can_scale)
{
struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
struct v4l2_mbus_framefmt mf_tmp = *mf;
unsigned int scale_h, scale_v;
int ret;
@@ -1363,13 +1393,13 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
struct v4l2_crop *a)
{
struct v4l2_rect *rect = &a->c;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct v4l2_crop cam_crop;
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_rect *cam_rect = &cam_crop.c;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->dev.parent;
struct v4l2_mbus_framefmt mf;
unsigned int scale_cam_h, scale_cam_v, scale_ceu_h, scale_ceu_v,
out_width, out_height;
@@ -1511,7 +1541,7 @@ static void calculate_client_output(struct soc_camera_device *icd,
struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf)
{
struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
struct v4l2_rect *cam_subrect = &cam->subrect;
unsigned int scale_v, scale_h;
@@ -1555,12 +1585,12 @@ static void calculate_client_output(struct soc_camera_device *icd,
static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_mbus_framefmt mf;
- struct device *dev = icd->dev.parent;
__u32 pixfmt = pix->pixelformat;
const struct soc_camera_format_xlate *xlate;
/* Keep Compiler Happy */
@@ -1684,12 +1714,12 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
int width, height;
int ret;
- dev_geo(icd->dev.parent, "TRY_FMT(pix=0x%x, %ux%u)\n",
+ dev_geo(icd->parent, "TRY_FMT(pix=0x%x, %ux%u)\n",
pixfmt, pix->width, pix->height);
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (!xlate) {
- dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt);
+ dev_warn(icd->parent, "Format %x not found\n", pixfmt);
return -EINVAL;
}
@@ -1701,11 +1731,6 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
width = pix->width;
height = pix->height;
- pix->bytesperline = soc_mbus_bytes_per_line(width, xlate->host_fmt);
- if ((int)pix->bytesperline < 0)
- return pix->bytesperline;
- pix->sizeimage = height * pix->bytesperline;
-
/* limit to sensor capabilities */
mf.width = pix->width;
mf.height = pix->height;
@@ -1741,7 +1766,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
try_mbus_fmt, &mf);
if (ret < 0) {
/* Shouldn't actually happen... */
- dev_err(icd->dev.parent,
+ dev_err(icd->parent,
"FIXME: client try_fmt() = %d\n", ret);
return ret;
}
@@ -1753,7 +1778,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
pix->height = height;
}
- dev_geo(icd->dev.parent, "%s(): return %d, fmt 0x%x, %ux%u\n",
+ dev_geo(icd->parent, "%s(): return %d, fmt 0x%x, %ux%u\n",
__func__, ret, pix->pixelformat, pix->width, pix->height);
return ret;
@@ -1763,7 +1788,7 @@ static int sh_mobile_ceu_set_livecrop(struct soc_camera_device *icd,
struct v4l2_crop *a)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
u32 out_width = icd->user_width, out_height = icd->user_height;
int ret;
@@ -1775,13 +1800,13 @@ static int sh_mobile_ceu_set_livecrop(struct soc_camera_device *icd,
/* Stop the client */
ret = v4l2_subdev_call(sd, video, s_stream, 0);
if (ret < 0)
- dev_warn(icd->dev.parent,
+ dev_warn(icd->parent,
"Client failed to stop the stream: %d\n", ret);
else
/* Do the crop, if it fails, there's nothing more we can do */
sh_mobile_ceu_set_crop(icd, a);
- dev_geo(icd->dev.parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height);
+ dev_geo(icd->parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height);
if (icd->user_width != out_width || icd->user_height != out_height) {
struct v4l2_format f = {
@@ -1827,7 +1852,6 @@ static int sh_mobile_ceu_querycap(struct soc_camera_host *ici,
struct v4l2_capability *cap)
{
strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card));
- cap->version = KERNEL_VERSION(0, 0, 5);
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
}
@@ -1848,7 +1872,7 @@ static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q,
static int sh_mobile_ceu_get_ctrl(struct soc_camera_device *icd,
struct v4l2_control *ctrl)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
u32 val;
@@ -1864,7 +1888,7 @@ static int sh_mobile_ceu_get_ctrl(struct soc_camera_device *icd,
static int sh_mobile_ceu_set_ctrl(struct soc_camera_device *icd,
struct v4l2_control *ctrl)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
switch (ctrl->id) {
@@ -1950,7 +1974,7 @@ static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
.completion = COMPLETION_INITIALIZER_ONSTACK(wait.completion),
.notifier.notifier_call = bus_notify,
};
- struct device *csi2;
+ struct sh_mobile_ceu_companion *csi2;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
@@ -2023,26 +2047,61 @@ static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
pcdev->ici.drv_name = dev_name(&pdev->dev);
pcdev->ici.ops = &sh_mobile_ceu_host_ops;
+ pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(pcdev->alloc_ctx)) {
+ err = PTR_ERR(pcdev->alloc_ctx);
+ goto exit_free_clk;
+ }
+
+ err = soc_camera_host_register(&pcdev->ici);
+ if (err)
+ goto exit_free_ctx;
+
/* CSI2 interfacing */
- csi2 = pcdev->pdata->csi2_dev;
+ csi2 = pcdev->pdata->csi2;
if (csi2) {
- wait.dev = csi2;
+ struct platform_device *csi2_pdev =
+ platform_device_alloc("sh-mobile-csi2", csi2->id);
+ struct sh_csi2_pdata *csi2_pdata = csi2->platform_data;
+
+ if (!csi2_pdev) {
+ err = -ENOMEM;
+ goto exit_host_unregister;
+ }
+
+ pcdev->csi2_pdev = csi2_pdev;
+
+ err = platform_device_add_data(csi2_pdev, csi2_pdata, sizeof(*csi2_pdata));
+ if (err < 0)
+ goto exit_pdev_put;
+
+ csi2_pdata = csi2_pdev->dev.platform_data;
+ csi2_pdata->v4l2_dev = &pcdev->ici.v4l2_dev;
+
+ csi2_pdev->resource = csi2->resource;
+ csi2_pdev->num_resources = csi2->num_resources;
+
+ err = platform_device_add(csi2_pdev);
+ if (err < 0)
+ goto exit_pdev_put;
+
+ wait.dev = &csi2_pdev->dev;
err = bus_register_notifier(&platform_bus_type, &wait.notifier);
if (err < 0)
- goto exit_free_clk;
+ goto exit_pdev_unregister;
/*
* From this point the driver module will not unload, until
* we complete the completion.
*/
- if (!csi2->driver) {
+ if (!csi2_pdev->dev.driver) {
complete(&wait.completion);
/* Either too late, or probing failed */
bus_unregister_notifier(&platform_bus_type, &wait.notifier);
err = -ENXIO;
- goto exit_free_clk;
+ goto exit_pdev_unregister;
}
/*
@@ -2051,34 +2110,28 @@ static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
* the "owner" is safe!
*/
- err = try_module_get(csi2->driver->owner);
+ err = try_module_get(csi2_pdev->dev.driver->owner);
/* Let notifier complete, if it has been locked */
complete(&wait.completion);
bus_unregister_notifier(&platform_bus_type, &wait.notifier);
if (!err) {
err = -ENODEV;
- goto exit_free_clk;
+ goto exit_pdev_unregister;
}
}
- pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
- if (IS_ERR(pcdev->alloc_ctx)) {
- err = PTR_ERR(pcdev->alloc_ctx);
- goto exit_module_put;
- }
-
- err = soc_camera_host_register(&pcdev->ici);
- if (err)
- goto exit_free_ctx;
-
return 0;
+exit_pdev_unregister:
+ platform_device_del(pcdev->csi2_pdev);
+exit_pdev_put:
+ pcdev->csi2_pdev->resource = NULL;
+ platform_device_put(pcdev->csi2_pdev);
+exit_host_unregister:
+ soc_camera_host_unregister(&pcdev->ici);
exit_free_ctx:
vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
-exit_module_put:
- if (csi2 && csi2->driver)
- module_put(csi2->driver->owner);
exit_free_clk:
pm_runtime_disable(&pdev->dev);
free_irq(pcdev->irq, pcdev);
@@ -2098,7 +2151,7 @@ static int __devexit sh_mobile_ceu_remove(struct platform_device *pdev)
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
struct sh_mobile_ceu_dev *pcdev = container_of(soc_host,
struct sh_mobile_ceu_dev, ici);
- struct device *csi2 = pcdev->pdata->csi2_dev;
+ struct platform_device *csi2_pdev = pcdev->csi2_pdev;
soc_camera_host_unregister(soc_host);
pm_runtime_disable(&pdev->dev);
@@ -2107,8 +2160,13 @@ static int __devexit sh_mobile_ceu_remove(struct platform_device *pdev)
dma_release_declared_memory(&pdev->dev);
iounmap(pcdev->base);
vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
- if (csi2 && csi2->driver)
- module_put(csi2->driver->owner);
+ if (csi2_pdev && csi2_pdev->dev.driver) {
+ struct module *csi2_drv = csi2_pdev->dev.driver->owner;
+ platform_device_del(csi2_pdev);
+ csi2_pdev->resource = NULL;
+ platform_device_put(csi2_pdev);
+ module_put(csi2_drv);
+ }
kfree(pcdev);
return 0;
@@ -2158,4 +2216,5 @@ module_exit(sh_mobile_ceu_exit);
MODULE_DESCRIPTION("SuperH Mobile CEU driver");
MODULE_AUTHOR("Magnus Damm");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.6");
MODULE_ALIAS("platform:sh_mobile_ceu");
diff --git a/drivers/media/video/sh_mobile_csi2.c b/drivers/media/video/sh_mobile_csi2.c
index 98b8748..2893a01 100644
--- a/drivers/media/video/sh_mobile_csi2.c
+++ b/drivers/media/video/sh_mobile_csi2.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/videodev2.h>
+#include <media/sh_mobile_ceu.h>
#include <media/sh_mobile_csi2.h>
#include <media/soc_camera.h>
#include <media/v4l2-common.h>
@@ -33,7 +34,6 @@
struct sh_csi2 {
struct v4l2_subdev subdev;
struct list_head list;
- struct notifier_block notifier;
unsigned int irq;
void __iomem *base;
struct platform_device *pdev;
@@ -132,13 +132,6 @@ static struct v4l2_subdev_video_ops sh_csi2_subdev_video_ops = {
.try_mbus_fmt = sh_csi2_try_fmt,
};
-static struct v4l2_subdev_core_ops sh_csi2_subdev_core_ops;
-
-static struct v4l2_subdev_ops sh_csi2_subdev_ops = {
- .core = &sh_csi2_subdev_core_ops,
- .video = &sh_csi2_subdev_video_ops,
-};
-
static void sh_csi2_hwinit(struct sh_csi2 *priv)
{
struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
@@ -186,65 +179,84 @@ static unsigned long sh_csi2_query_bus_param(struct soc_camera_device *icd)
return soc_camera_apply_sensor_flags(icl, flags);
}
-static int sh_csi2_notify(struct notifier_block *nb,
- unsigned long action, void *data)
+static int sh_csi2_client_connect(struct sh_csi2 *priv)
{
- struct device *dev = data;
- struct soc_camera_device *icd = to_soc_camera_dev(dev);
- struct v4l2_device *v4l2_dev = dev_get_drvdata(dev->parent);
- struct sh_csi2 *priv =
- container_of(nb, struct sh_csi2, notifier);
struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
- int ret, i;
+ struct v4l2_subdev *sd, *csi2_sd = &priv->subdev;
+ struct soc_camera_device *icd = NULL;
+ struct device *dev = v4l2_get_subdevdata(&priv->subdev);
+ int i;
+
+ v4l2_device_for_each_subdev(sd, csi2_sd->v4l2_dev)
+ if (sd->grp_id) {
+ icd = (struct soc_camera_device *)sd->grp_id;
+ break;
+ }
+
+ if (!icd)
+ return -EINVAL;
for (i = 0; i < pdata->num_clients; i++)
if (&pdata->clients[i].pdev->dev == icd->pdev)
break;
- dev_dbg(dev, "%s(%p): action = %lu, found #%d\n", __func__, dev, action, i);
+ dev_dbg(dev, "%s(%p): found #%d\n", __func__, dev, i);
if (i == pdata->num_clients)
- return NOTIFY_DONE;
+ return -ENODEV;
- switch (action) {
- case BUS_NOTIFY_BOUND_DRIVER:
- snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s%s",
- dev_name(v4l2_dev->dev), ".mipi-csi");
- priv->subdev.grp_id = (long)icd;
- ret = v4l2_device_register_subdev(v4l2_dev, &priv->subdev);
- dev_dbg(dev, "%s(%p): ret(register_subdev) = %d\n", __func__, priv, ret);
- if (ret < 0)
- return NOTIFY_DONE;
+ priv->client = pdata->clients + i;
- priv->client = pdata->clients + i;
+ priv->set_bus_param = icd->ops->set_bus_param;
+ priv->query_bus_param = icd->ops->query_bus_param;
+ icd->ops->set_bus_param = sh_csi2_set_bus_param;
+ icd->ops->query_bus_param = sh_csi2_query_bus_param;
- priv->set_bus_param = icd->ops->set_bus_param;
- priv->query_bus_param = icd->ops->query_bus_param;
- icd->ops->set_bus_param = sh_csi2_set_bus_param;
- icd->ops->query_bus_param = sh_csi2_query_bus_param;
+ csi2_sd->grp_id = (long)icd;
- pm_runtime_get_sync(v4l2_get_subdevdata(&priv->subdev));
+ pm_runtime_get_sync(dev);
- sh_csi2_hwinit(priv);
- break;
- case BUS_NOTIFY_UNBIND_DRIVER:
- priv->client = NULL;
+ sh_csi2_hwinit(priv);
- /* Driver is about to be unbound */
- icd->ops->set_bus_param = priv->set_bus_param;
- icd->ops->query_bus_param = priv->query_bus_param;
- priv->set_bus_param = NULL;
- priv->query_bus_param = NULL;
+ return 0;
+}
- v4l2_device_unregister_subdev(&priv->subdev);
+static void sh_csi2_client_disconnect(struct sh_csi2 *priv)
+{
+ struct soc_camera_device *icd = (struct soc_camera_device *)priv->subdev.grp_id;
- pm_runtime_put(v4l2_get_subdevdata(&priv->subdev));
- break;
- }
+ priv->client = NULL;
+ priv->subdev.grp_id = 0;
- return NOTIFY_OK;
+ /* Driver is about to be unbound */
+ icd->ops->set_bus_param = priv->set_bus_param;
+ icd->ops->query_bus_param = priv->query_bus_param;
+ priv->set_bus_param = NULL;
+ priv->query_bus_param = NULL;
+
+ pm_runtime_put(v4l2_get_subdevdata(&priv->subdev));
}
+static int sh_csi2_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
+
+ if (on)
+ return sh_csi2_client_connect(priv);
+
+ sh_csi2_client_disconnect(priv);
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops sh_csi2_subdev_core_ops = {
+ .s_power = sh_csi2_s_power,
+};
+
+static struct v4l2_subdev_ops sh_csi2_subdev_ops = {
+ .core = &sh_csi2_subdev_core_ops,
+ .video = &sh_csi2_subdev_video_ops,
+};
+
static __devinit int sh_csi2_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -274,14 +286,6 @@ static __devinit int sh_csi2_probe(struct platform_device *pdev)
return -ENOMEM;
priv->irq = irq;
- priv->notifier.notifier_call = sh_csi2_notify;
-
- /* We MUST attach after the MIPI sensor */
- ret = bus_register_notifier(&soc_camera_bus_type, &priv->notifier);
- if (ret < 0) {
- dev_err(&pdev->dev, "CSI2 cannot register notifier\n");
- goto ernotify;
- }
if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
dev_err(&pdev->dev, "CSI2 register region already claimed\n");
@@ -297,11 +301,17 @@ static __devinit int sh_csi2_probe(struct platform_device *pdev)
}
priv->pdev = pdev;
+ platform_set_drvdata(pdev, priv);
v4l2_subdev_init(&priv->subdev, &sh_csi2_subdev_ops);
v4l2_set_subdevdata(&priv->subdev, &pdev->dev);
- platform_set_drvdata(pdev, priv);
+ snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s.mipi-csi",
+ dev_name(pdata->v4l2_dev->dev));
+ ret = v4l2_device_register_subdev(pdata->v4l2_dev, &priv->subdev);
+ dev_dbg(&pdev->dev, "%s(%p): ret(register_subdev) = %d\n", __func__, priv, ret);
+ if (ret < 0)
+ goto esdreg;
pm_runtime_enable(&pdev->dev);
@@ -309,11 +319,11 @@ static __devinit int sh_csi2_probe(struct platform_device *pdev)
return 0;
+esdreg:
+ iounmap(priv->base);
eremap:
release_mem_region(res->start, resource_size(res));
ereqreg:
- bus_unregister_notifier(&soc_camera_bus_type, &priv->notifier);
-ernotify:
kfree(priv);
return ret;
@@ -324,7 +334,7 @@ static __devexit int sh_csi2_remove(struct platform_device *pdev)
struct sh_csi2 *priv = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bus_unregister_notifier(&soc_camera_bus_type, &priv->notifier);
+ v4l2_device_unregister_subdev(&priv->subdev);
pm_runtime_disable(&pdev->dev);
iounmap(priv->base);
release_mem_region(res->start, resource_size(res));
@@ -335,8 +345,9 @@ static __devexit int sh_csi2_remove(struct platform_device *pdev)
}
static struct platform_driver __refdata sh_csi2_pdrv = {
- .remove = __devexit_p(sh_csi2_remove),
- .driver = {
+ .remove = __devexit_p(sh_csi2_remove),
+ .probe = sh_csi2_probe,
+ .driver = {
.name = "sh-mobile-csi2",
.owner = THIS_MODULE,
},
@@ -344,7 +355,7 @@ static struct platform_driver __refdata sh_csi2_pdrv = {
static int __init sh_csi2_init(void)
{
- return platform_driver_probe(&sh_csi2_pdrv, sh_csi2_probe);
+ return platform_driver_register(&sh_csi2_pdrv);
}
static void __exit sh_csi2_exit(void)
diff --git a/drivers/media/video/sh_vou.c b/drivers/media/video/sh_vou.c
index 07cf0c6..6a72987 100644
--- a/drivers/media/video/sh_vou.c
+++ b/drivers/media/video/sh_vou.c
@@ -19,7 +19,6 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <media/sh_vou.h>
@@ -393,7 +392,6 @@ static int sh_vou_querycap(struct file *file, void *priv,
dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
strlcpy(cap->card, "SuperH VOU", sizeof(cap->card));
- cap->version = KERNEL_VERSION(0, 1, 0);
cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
return 0;
}
@@ -1490,4 +1488,5 @@ module_exit(sh_vou_exit);
MODULE_DESCRIPTION("SuperH VOU driver");
MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.1.0");
MODULE_ALIAS("platform:sh-vou");
diff --git a/drivers/media/video/sn9c102/sn9c102.h b/drivers/media/video/sn9c102/sn9c102.h
index cbfc444..22ea211 100644
--- a/drivers/media/video/sn9c102/sn9c102.h
+++ b/drivers/media/video/sn9c102/sn9c102.h
@@ -21,7 +21,6 @@
#ifndef _SN9C102_H_
#define _SN9C102_H_
-#include <linux/version.h>
#include <linux/usb.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 0e07c49..16cb07c5 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -33,6 +33,7 @@
#include <linux/stat.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
+#include <linux/version.h>
#include <linux/page-flags.h>
#include <asm/byteorder.h>
#include <asm/page.h>
@@ -47,8 +48,7 @@
#define SN9C102_MODULE_AUTHOR "(C) 2004-2007 Luca Risolia"
#define SN9C102_AUTHOR_EMAIL "<luca.risolia@studio.unibo.it>"
#define SN9C102_MODULE_LICENSE "GPL"
-#define SN9C102_MODULE_VERSION "1:1.47pre49"
-#define SN9C102_MODULE_VERSION_CODE KERNEL_VERSION(1, 1, 47)
+#define SN9C102_MODULE_VERSION "1:1.48"
/*****************************************************************************/
@@ -2158,7 +2158,7 @@ sn9c102_vidioc_querycap(struct sn9c102_device* cam, void __user * arg)
{
struct v4l2_capability cap = {
.driver = "sn9c102",
- .version = SN9C102_MODULE_VERSION_CODE,
+ .version = LINUX_VERSION_CODE,
.capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING,
};
@@ -3187,16 +3187,8 @@ static long sn9c102_ioctl_v4l2(struct file *filp,
case VIDIOC_S_AUDIO:
return sn9c102_vidioc_s_audio(cam, arg);
- case VIDIOC_G_STD:
- case VIDIOC_S_STD:
- case VIDIOC_QUERYSTD:
- case VIDIOC_ENUMSTD:
- case VIDIOC_QUERYMENU:
- case VIDIOC_ENUM_FRAMEINTERVALS:
- return -EINVAL;
-
default:
- return -EINVAL;
+ return -ENOTTY;
}
}
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 4e4d412..5bdfe7e 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -60,14 +60,14 @@ static int soc_camera_power_set(struct soc_camera_device *icd,
ret = regulator_bulk_enable(icl->num_regulators,
icl->regulators);
if (ret < 0) {
- dev_err(&icd->dev, "Cannot enable regulators\n");
+ dev_err(icd->pdev, "Cannot enable regulators\n");
return ret;
}
if (icl->power)
ret = icl->power(icd->pdev, power_on);
if (ret < 0) {
- dev_err(&icd->dev,
+ dev_err(icd->pdev,
"Platform failed to power-on the camera.\n");
regulator_bulk_disable(icl->num_regulators,
@@ -79,7 +79,7 @@ static int soc_camera_power_set(struct soc_camera_device *icd,
if (icl->power)
ret = icl->power(icd->pdev, 0);
if (ret < 0) {
- dev_err(&icd->dev,
+ dev_err(icd->pdev,
"Platform failed to power-off the camera.\n");
return ret;
}
@@ -87,7 +87,7 @@ static int soc_camera_power_set(struct soc_camera_device *icd,
ret = regulator_bulk_disable(icl->num_regulators,
icl->regulators);
if (ret < 0) {
- dev_err(&icd->dev, "Cannot disable regulators\n");
+ dev_err(icd->pdev, "Cannot disable regulators\n");
return ret;
}
}
@@ -147,11 +147,11 @@ EXPORT_SYMBOL(soc_camera_apply_sensor_flags);
static int soc_camera_try_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct v4l2_pix_format *pix = &f->fmt.pix;
int ret;
- dev_dbg(&icd->dev, "TRY_FMT(%c%c%c%c, %ux%u)\n",
+ dev_dbg(icd->pdev, "TRY_FMT(%c%c%c%c, %ux%u)\n",
pixfmtstr(pix->pixelformat), pix->width, pix->height);
pix->bytesperline = 0;
@@ -199,22 +199,15 @@ static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv,
static int soc_camera_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
- struct soc_camera_device *icd = file->private_data;
- int ret = 0;
-
if (inp->index != 0)
return -EINVAL;
- if (icd->ops->enum_input)
- ret = icd->ops->enum_input(icd, inp);
- else {
- /* default is camera */
- inp->type = V4L2_INPUT_TYPE_CAMERA;
- inp->std = V4L2_STD_UNKNOWN;
- strcpy(inp->name, "Camera");
- }
+ /* default is camera */
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->std = V4L2_STD_UNKNOWN;
+ strcpy(inp->name, "Camera");
- return ret;
+ return 0;
}
static int soc_camera_g_input(struct file *file, void *priv, unsigned int *i)
@@ -244,7 +237,7 @@ static int soc_camera_enum_fsizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
return ici->ops->enum_fsizes(icd, fsize);
}
@@ -254,7 +247,7 @@ static int soc_camera_reqbufs(struct file *file, void *priv,
{
int ret;
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
WARN_ON(priv != file->private_data);
@@ -281,7 +274,7 @@ static int soc_camera_querybuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
WARN_ON(priv != file->private_data);
@@ -295,7 +288,7 @@ static int soc_camera_qbuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
WARN_ON(priv != file->private_data);
@@ -312,7 +305,7 @@ static int soc_camera_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
WARN_ON(priv != file->private_data);
@@ -329,7 +322,7 @@ static int soc_camera_dqbuf(struct file *file, void *priv,
static int soc_camera_init_user_formats(struct soc_camera_device *icd)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
unsigned int i, fmts = 0, raw_fmts = 0;
int ret;
enum v4l2_mbus_pixelcode code;
@@ -363,7 +356,7 @@ static int soc_camera_init_user_formats(struct soc_camera_device *icd)
if (!icd->user_formats)
return -ENOMEM;
- dev_dbg(&icd->dev, "Found %d supported formats.\n", fmts);
+ dev_dbg(icd->pdev, "Found %d supported formats.\n", fmts);
/* Second pass - actually fill data formats */
fmts = 0;
@@ -395,7 +388,7 @@ egfmt:
/* Always entered with .video_lock held */
static void soc_camera_free_user_formats(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
if (ici->ops->put_formats)
ici->ops->put_formats(icd);
@@ -409,11 +402,11 @@ static void soc_camera_free_user_formats(struct soc_camera_device *icd)
static int soc_camera_set_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct v4l2_pix_format *pix = &f->fmt.pix;
int ret;
- dev_dbg(&icd->dev, "S_FMT(%c%c%c%c, %ux%u)\n",
+ dev_dbg(icd->pdev, "S_FMT(%c%c%c%c, %ux%u)\n",
pixfmtstr(pix->pixelformat), pix->width, pix->height);
/* We always call try_fmt() before set_fmt() or set_crop() */
@@ -426,7 +419,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd,
return ret;
} else if (!icd->current_fmt ||
icd->current_fmt->host_fmt->fourcc != pix->pixelformat) {
- dev_err(&icd->dev,
+ dev_err(icd->pdev,
"Host driver hasn't set up current format correctly!\n");
return -EINVAL;
}
@@ -440,7 +433,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd,
if (ici->ops->init_videobuf)
icd->vb_vidq.field = pix->field;
- dev_dbg(&icd->dev, "set width: %d height: %d\n",
+ dev_dbg(icd->pdev, "set width: %d height: %d\n",
icd->user_width, icd->user_height);
/* set physical bus parameters */
@@ -450,9 +443,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd,
static int soc_camera_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
- struct soc_camera_device *icd = container_of(vdev->parent,
- struct soc_camera_device,
- dev);
+ struct soc_camera_device *icd = dev_get_drvdata(vdev->parent);
struct soc_camera_link *icl = to_soc_camera_link(icd);
struct soc_camera_host *ici;
int ret;
@@ -461,10 +452,10 @@ static int soc_camera_open(struct file *file)
/* No device driver attached */
return -ENODEV;
- ici = to_soc_camera_host(icd->dev.parent);
+ ici = to_soc_camera_host(icd->parent);
if (!try_module_get(ici->ops->owner)) {
- dev_err(&icd->dev, "Couldn't lock capture bus driver.\n");
+ dev_err(icd->pdev, "Couldn't lock capture bus driver.\n");
return -EINVAL;
}
@@ -495,7 +486,7 @@ static int soc_camera_open(struct file *file)
ret = ici->ops->add(icd);
if (ret < 0) {
- dev_err(&icd->dev, "Couldn't activate the camera: %d\n", ret);
+ dev_err(icd->pdev, "Couldn't activate the camera: %d\n", ret);
goto eiciadd;
}
@@ -524,7 +515,7 @@ static int soc_camera_open(struct file *file)
}
file->private_data = icd;
- dev_dbg(&icd->dev, "camera device open\n");
+ dev_dbg(icd->pdev, "camera device open\n");
return 0;
@@ -549,7 +540,7 @@ epower:
static int soc_camera_close(struct file *file)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
icd->use_count--;
if (!icd->use_count) {
@@ -570,7 +561,7 @@ static int soc_camera_close(struct file *file)
module_put(ici->ops->owner);
- dev_dbg(&icd->dev, "camera device close\n");
+ dev_dbg(icd->pdev, "camera device close\n");
return 0;
}
@@ -581,7 +572,7 @@ static ssize_t soc_camera_read(struct file *file, char __user *buf,
struct soc_camera_device *icd = file->private_data;
int err = -EINVAL;
- dev_err(&icd->dev, "camera device read not implemented\n");
+ dev_err(icd->pdev, "camera device read not implemented\n");
return err;
}
@@ -589,10 +580,10 @@ static ssize_t soc_camera_read(struct file *file, char __user *buf,
static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
int err;
- dev_dbg(&icd->dev, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
+ dev_dbg(icd->pdev, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
if (icd->streamer != file)
return -EBUSY;
@@ -602,7 +593,7 @@ static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
else
err = vb2_mmap(&icd->vb2_vidq, vma);
- dev_dbg(&icd->dev, "vma start=0x%08lx, size=%ld, ret=%d\n",
+ dev_dbg(icd->pdev, "vma start=0x%08lx, size=%ld, ret=%d\n",
(unsigned long)vma->vm_start,
(unsigned long)vma->vm_end - (unsigned long)vma->vm_start,
err);
@@ -613,13 +604,13 @@ static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
static unsigned int soc_camera_poll(struct file *file, poll_table *pt)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
if (icd->streamer != file)
return -EBUSY;
if (ici->ops->init_videobuf && list_empty(&icd->vb_vidq.stream)) {
- dev_err(&icd->dev, "Trying to poll with no queued buffers!\n");
+ dev_err(icd->pdev, "Trying to poll with no queued buffers!\n");
return POLLERR;
}
@@ -659,15 +650,15 @@ static int soc_camera_s_fmt_vid_cap(struct file *file, void *priv,
WARN_ON(priv != file->private_data);
if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- dev_warn(&icd->dev, "Wrong buf-type %d\n", f->type);
+ dev_warn(icd->pdev, "Wrong buf-type %d\n", f->type);
return -EINVAL;
}
if (icd->streamer && icd->streamer != file)
return -EBUSY;
- if (is_streaming(to_soc_camera_host(icd->dev.parent), icd)) {
- dev_err(&icd->dev, "S_FMT denied: queue initialised\n");
+ if (is_streaming(to_soc_camera_host(icd->parent), icd)) {
+ dev_err(icd->pdev, "S_FMT denied: queue initialised\n");
return -EBUSY;
}
@@ -716,7 +707,7 @@ static int soc_camera_g_fmt_vid_cap(struct file *file, void *priv,
pix->field = icd->field;
pix->pixelformat = icd->current_fmt->host_fmt->fourcc;
pix->colorspace = icd->colorspace;
- dev_dbg(&icd->dev, "current_fmt->fourcc: 0x%08x\n",
+ dev_dbg(icd->pdev, "current_fmt->fourcc: 0x%08x\n",
icd->current_fmt->host_fmt->fourcc);
return 0;
}
@@ -725,7 +716,7 @@ static int soc_camera_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
WARN_ON(priv != file->private_data);
@@ -737,7 +728,7 @@ static int soc_camera_streamon(struct file *file, void *priv,
enum v4l2_buf_type i)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
int ret;
@@ -766,7 +757,7 @@ static int soc_camera_streamoff(struct file *file, void *priv,
{
struct soc_camera_device *icd = file->private_data;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
WARN_ON(priv != file->private_data);
@@ -794,7 +785,7 @@ static int soc_camera_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
int i;
WARN_ON(priv != file->private_data);
@@ -825,7 +816,7 @@ static int soc_camera_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
int ret;
@@ -844,7 +835,7 @@ static int soc_camera_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
int ret;
@@ -863,7 +854,7 @@ static int soc_camera_cropcap(struct file *file, void *fh,
struct v4l2_cropcap *a)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
return ici->ops->cropcap(icd, a);
}
@@ -872,7 +863,7 @@ static int soc_camera_g_crop(struct file *file, void *fh,
struct v4l2_crop *a)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
int ret;
ret = ici->ops->get_crop(icd, a);
@@ -889,7 +880,7 @@ static int soc_camera_s_crop(struct file *file, void *fh,
struct v4l2_crop *a)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct v4l2_rect *rect = &a->c;
struct v4l2_crop current_crop;
int ret;
@@ -897,7 +888,7 @@ static int soc_camera_s_crop(struct file *file, void *fh,
if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- dev_dbg(&icd->dev, "S_CROP(%ux%u@%u:%u)\n",
+ dev_dbg(icd->pdev, "S_CROP(%ux%u@%u:%u)\n",
rect->width, rect->height, rect->left, rect->top);
/* If get_crop fails, we'll let host and / or client drivers decide */
@@ -905,7 +896,7 @@ static int soc_camera_s_crop(struct file *file, void *fh,
/* Prohibit window size change with initialised buffers */
if (ret < 0) {
- dev_err(&icd->dev,
+ dev_err(icd->pdev,
"S_CROP denied: getting current crop failed\n");
} else if ((a->c.width == current_crop.c.width &&
a->c.height == current_crop.c.height) ||
@@ -915,7 +906,7 @@ static int soc_camera_s_crop(struct file *file, void *fh,
} else if (ici->ops->set_livecrop) {
ret = ici->ops->set_livecrop(icd, a);
} else {
- dev_err(&icd->dev,
+ dev_err(icd->pdev,
"S_CROP denied: queue initialised and sizes differ\n");
ret = -EBUSY;
}
@@ -927,7 +918,7 @@ static int soc_camera_g_parm(struct file *file, void *fh,
struct v4l2_streamparm *a)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
if (ici->ops->get_parm)
return ici->ops->get_parm(icd, a);
@@ -939,7 +930,7 @@ static int soc_camera_s_parm(struct file *file, void *fh,
struct v4l2_streamparm *a)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
if (ici->ops->set_parm)
return ici->ops->set_parm(icd, a);
@@ -976,6 +967,8 @@ static int soc_camera_s_register(struct file *file, void *fh,
}
#endif
+static int soc_camera_probe(struct soc_camera_device *icd);
+
/* So far this function cannot fail */
static void scan_add_host(struct soc_camera_host *ici)
{
@@ -986,15 +979,9 @@ static void scan_add_host(struct soc_camera_host *ici)
list_for_each_entry(icd, &devices, list) {
if (icd->iface == ici->nr) {
int ret;
- icd->dev.parent = ici->v4l2_dev.dev;
- dev_set_name(&icd->dev, "%u-%u", icd->iface,
- icd->devnum);
- ret = device_register(&icd->dev);
- if (ret < 0) {
- icd->dev.parent = NULL;
- dev_err(&icd->dev,
- "Cannot register device: %d\n", ret);
- }
+
+ icd->parent = ici->v4l2_dev.dev;
+ ret = soc_camera_probe(icd);
}
}
@@ -1006,12 +993,12 @@ static int soc_camera_init_i2c(struct soc_camera_device *icd,
struct soc_camera_link *icl)
{
struct i2c_client *client;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct i2c_adapter *adap = i2c_get_adapter(icl->i2c_adapter_id);
struct v4l2_subdev *subdev;
if (!adap) {
- dev_err(&icd->dev, "Cannot get I2C adapter #%d. No driver?\n",
+ dev_err(icd->pdev, "Cannot get I2C adapter #%d. No driver?\n",
icl->i2c_adapter_id);
goto ei2cga;
}
@@ -1026,7 +1013,7 @@ static int soc_camera_init_i2c(struct soc_camera_device *icd,
client = v4l2_get_subdevdata(subdev);
/* Use to_i2c_client(dev) to recover the i2c client */
- dev_set_drvdata(&icd->dev, &client->dev);
+ icd->control = &client->dev;
return 0;
ei2cnd:
@@ -1040,7 +1027,8 @@ static void soc_camera_free_i2c(struct soc_camera_device *icd)
struct i2c_client *client =
to_i2c_client(to_soc_camera_control(icd));
struct i2c_adapter *adap = client->adapter;
- dev_set_drvdata(&icd->dev, NULL);
+
+ icd->control = NULL;
v4l2_device_unregister_subdev(i2c_get_clientdata(client));
i2c_unregister_device(client);
i2c_put_adapter(adap);
@@ -1053,17 +1041,16 @@ static void soc_camera_free_i2c(struct soc_camera_device *icd)
static int soc_camera_video_start(struct soc_camera_device *icd);
static int video_dev_create(struct soc_camera_device *icd);
/* Called during host-driver probe */
-static int soc_camera_probe(struct device *dev)
+static int soc_camera_probe(struct soc_camera_device *icd)
{
- struct soc_camera_device *icd = to_soc_camera_dev(dev);
- struct soc_camera_host *ici = to_soc_camera_host(dev->parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct soc_camera_link *icl = to_soc_camera_link(icd);
struct device *control = NULL;
struct v4l2_subdev *sd;
struct v4l2_mbus_framefmt mf;
int ret;
- dev_info(dev, "Probing %s\n", dev_name(dev));
+ dev_info(icd->pdev, "Probing %s\n", dev_name(icd->pdev));
ret = regulator_bulk_get(icd->pdev, icl->num_regulators,
icl->regulators);
@@ -1099,7 +1086,7 @@ static int soc_camera_probe(struct device *dev)
if (icl->module_name)
ret = request_module(icl->module_name);
- ret = icl->add_device(icl, &icd->dev);
+ ret = icl->add_device(icd);
if (ret < 0)
goto eadddev;
@@ -1110,7 +1097,7 @@ static int soc_camera_probe(struct device *dev)
control = to_soc_camera_control(icd);
if (!control || !control->driver || !dev_get_drvdata(control) ||
!try_module_get(control->driver->owner)) {
- icl->del_device(icl);
+ icl->del_device(icd);
goto enodrv;
}
}
@@ -1125,8 +1112,6 @@ static int soc_camera_probe(struct device *dev)
icd->field = V4L2_FIELD_ANY;
- icd->vdev->lock = &icd->video_lock;
-
/*
* ..._video_start() will create a device node, video_register_device()
* itself is protected against concurrent open() calls, but we also have
@@ -1146,11 +1131,6 @@ static int soc_camera_probe(struct device *dev)
icd->field = mf.field;
}
- /* Do we have to sysfs_remove_link() before device_unregister()? */
- if (sysfs_create_link(&icd->dev.kobj, &to_soc_camera_control(icd)->kobj,
- "control"))
- dev_warn(&icd->dev, "Failed creating the control symlink\n");
-
ici->ops->remove(icd);
soc_camera_power_set(icd, icl, 0);
@@ -1166,7 +1146,7 @@ eiufmt:
if (icl->board_info) {
soc_camera_free_i2c(icd);
} else {
- icl->del_device(icl);
+ icl->del_device(icd);
module_put(control->driver->owner);
}
enodrv:
@@ -1186,13 +1166,12 @@ ereg:
* This is called on device_unregister, which only means we have to disconnect
* from the host, but not remove ourselves from the device list
*/
-static int soc_camera_remove(struct device *dev)
+static int soc_camera_remove(struct soc_camera_device *icd)
{
- struct soc_camera_device *icd = to_soc_camera_dev(dev);
struct soc_camera_link *icl = to_soc_camera_link(icd);
struct video_device *vdev = icd->vdev;
- BUG_ON(!dev->parent);
+ BUG_ON(!icd->parent);
if (vdev) {
video_unregister_device(vdev);
@@ -1202,10 +1181,9 @@ static int soc_camera_remove(struct device *dev)
if (icl->board_info) {
soc_camera_free_i2c(icd);
} else {
- struct device_driver *drv = to_soc_camera_control(icd) ?
- to_soc_camera_control(icd)->driver : NULL;
+ struct device_driver *drv = to_soc_camera_control(icd)->driver;
if (drv) {
- icl->del_device(icl);
+ icl->del_device(icd);
module_put(drv->owner);
}
}
@@ -1216,49 +1194,6 @@ static int soc_camera_remove(struct device *dev)
return 0;
}
-static int soc_camera_suspend(struct device *dev, pm_message_t state)
-{
- struct soc_camera_device *icd = to_soc_camera_dev(dev);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
- int ret = 0;
-
- if (ici->ops->suspend)
- ret = ici->ops->suspend(icd, state);
-
- return ret;
-}
-
-static int soc_camera_resume(struct device *dev)
-{
- struct soc_camera_device *icd = to_soc_camera_dev(dev);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
- int ret = 0;
-
- if (ici->ops->resume)
- ret = ici->ops->resume(icd);
-
- return ret;
-}
-
-struct bus_type soc_camera_bus_type = {
- .name = "soc-camera",
- .probe = soc_camera_probe,
- .remove = soc_camera_remove,
- .suspend = soc_camera_suspend,
- .resume = soc_camera_resume,
-};
-EXPORT_SYMBOL_GPL(soc_camera_bus_type);
-
-static struct device_driver ic_drv = {
- .name = "camera",
- .bus = &soc_camera_bus_type,
- .owner = THIS_MODULE,
-};
-
-static void dummy_release(struct device *dev)
-{
-}
-
static int default_cropcap(struct soc_camera_device *icd,
struct v4l2_cropcap *a)
{
@@ -1317,13 +1252,6 @@ static int default_enum_fsizes(struct soc_camera_device *icd,
return 0;
}
-static void soc_camera_device_init(struct device *dev, void *pdata)
-{
- dev->platform_data = pdata;
- dev->bus = &soc_camera_bus_type;
- dev->release = dummy_release;
-}
-
int soc_camera_host_register(struct soc_camera_host *ici)
{
struct soc_camera_host *ix;
@@ -1389,24 +1317,9 @@ void soc_camera_host_unregister(struct soc_camera_host *ici)
mutex_lock(&list_lock);
list_del(&ici->list);
-
- list_for_each_entry(icd, &devices, list) {
- if (icd->iface == ici->nr) {
- void *pdata = icd->dev.platform_data;
- /* The bus->remove will be called */
- device_unregister(&icd->dev);
- /*
- * Not before device_unregister(), .remove
- * needs parent to call ici->ops->remove().
- * If the host module is loaded again, device_register()
- * would complain "already initialised," since 2.6.32
- * this is also needed to prevent use-after-free of the
- * device private data.
- */
- memset(&icd->dev, 0, sizeof(icd->dev));
- soc_camera_device_init(&icd->dev, pdata);
- }
- }
+ list_for_each_entry(icd, &devices, list)
+ if (icd->iface == ici->nr && to_soc_camera_control(icd))
+ soc_camera_remove(icd);
mutex_unlock(&list_lock);
@@ -1448,11 +1361,6 @@ static int soc_camera_device_register(struct soc_camera_device *icd)
return 0;
}
-static void soc_camera_device_unregister(struct soc_camera_device *icd)
-{
- list_del(&icd->list);
-}
-
static const struct v4l2_ioctl_ops soc_camera_ioctl_ops = {
.vidioc_querycap = soc_camera_querycap,
.vidioc_g_fmt_vid_cap = soc_camera_g_fmt_vid_cap,
@@ -1487,7 +1395,7 @@ static const struct v4l2_ioctl_ops soc_camera_ioctl_ops = {
static int video_dev_create(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct video_device *vdev = video_device_alloc();
if (!vdev)
@@ -1495,12 +1403,13 @@ static int video_dev_create(struct soc_camera_device *icd)
strlcpy(vdev->name, ici->drv_name, sizeof(vdev->name));
- vdev->parent = &icd->dev;
+ vdev->parent = icd->pdev;
vdev->current_norm = V4L2_STD_UNKNOWN;
vdev->fops = &soc_camera_fops;
vdev->ioctl_ops = &soc_camera_ioctl_ops;
vdev->release = video_device_release;
vdev->tvnorms = V4L2_STD_UNKNOWN;
+ vdev->lock = &icd->video_lock;
icd->vdev = vdev;
@@ -1515,7 +1424,7 @@ static int soc_camera_video_start(struct soc_camera_device *icd)
const struct device_type *type = icd->vdev->dev.type;
int ret;
- if (!icd->dev.parent)
+ if (!icd->parent)
return -ENODEV;
if (!icd->ops ||
@@ -1525,7 +1434,7 @@ static int soc_camera_video_start(struct soc_camera_device *icd)
ret = video_register_device(icd->vdev, VFL_TYPE_GRABBER, -1);
if (ret < 0) {
- dev_err(&icd->dev, "video_register_device failed: %d\n", ret);
+ dev_err(icd->pdev, "video_register_device failed: %d\n", ret);
return ret;
}
@@ -1549,6 +1458,7 @@ static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev)
return -ENOMEM;
icd->iface = icl->bus_id;
+ icd->link = icl;
icd->pdev = &pdev->dev;
platform_set_drvdata(pdev, icd);
@@ -1556,8 +1466,6 @@ static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev)
if (ret < 0)
goto escdevreg;
- soc_camera_device_init(&icd->dev, icl);
-
icd->user_width = DEFAULT_WIDTH;
icd->user_height = DEFAULT_HEIGHT;
@@ -1581,7 +1489,7 @@ static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev)
if (!icd)
return -EINVAL;
- soc_camera_device_unregister(icd);
+ list_del(&icd->list);
kfree(icd);
@@ -1598,31 +1506,12 @@ static struct platform_driver __refdata soc_camera_pdrv = {
static int __init soc_camera_init(void)
{
- int ret = bus_register(&soc_camera_bus_type);
- if (ret)
- return ret;
- ret = driver_register(&ic_drv);
- if (ret)
- goto edrvr;
-
- ret = platform_driver_probe(&soc_camera_pdrv, soc_camera_pdrv_probe);
- if (ret)
- goto epdr;
-
- return 0;
-
-epdr:
- driver_unregister(&ic_drv);
-edrvr:
- bus_unregister(&soc_camera_bus_type);
- return ret;
+ return platform_driver_probe(&soc_camera_pdrv, soc_camera_pdrv_probe);
}
static void __exit soc_camera_exit(void)
{
platform_driver_unregister(&soc_camera_pdrv);
- driver_unregister(&ic_drv);
- bus_unregister(&soc_camera_bus_type);
}
module_init(soc_camera_init);
diff --git a/drivers/media/video/soc_camera_platform.c b/drivers/media/video/soc_camera_platform.c
index bf406e8..8069cd6 100644
--- a/drivers/media/video/soc_camera_platform.c
+++ b/drivers/media/video/soc_camera_platform.c
@@ -146,7 +146,7 @@ static int soc_camera_platform_probe(struct platform_device *pdev)
if (!p)
return -EINVAL;
- if (!p->dev) {
+ if (!p->icd) {
dev_err(&pdev->dev,
"Platform has not set soc_camera_device pointer!\n");
return -EINVAL;
@@ -156,16 +156,16 @@ static int soc_camera_platform_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- icd = to_soc_camera_dev(p->dev);
+ icd = p->icd;
/* soc-camera convention: control's drvdata points to the subdev */
platform_set_drvdata(pdev, &priv->subdev);
/* Set the control device reference */
- dev_set_drvdata(&icd->dev, &pdev->dev);
+ icd->control = &pdev->dev;
icd->ops = &soc_camera_platform_ops;
- ici = to_soc_camera_host(icd->dev.parent);
+ ici = to_soc_camera_host(icd->parent);
v4l2_subdev_init(&priv->subdev, &platform_subdev_ops);
v4l2_set_subdevdata(&priv->subdev, p);
@@ -188,7 +188,7 @@ static int soc_camera_platform_remove(struct platform_device *pdev)
{
struct soc_camera_platform_priv *priv = get_priv(pdev);
struct soc_camera_platform_info *p = pdev->dev.platform_data;
- struct soc_camera_device *icd = to_soc_camera_dev(p->dev);
+ struct soc_camera_device *icd = p->icd;
v4l2_device_unregister_subdev(&priv->subdev);
icd->ops = NULL;
diff --git a/drivers/media/video/sr030pc30.c b/drivers/media/video/sr030pc30.c
index c901721..8afb0e8 100644
--- a/drivers/media/video/sr030pc30.c
+++ b/drivers/media/video/sr030pc30.c
@@ -726,8 +726,10 @@ static int sr030pc30_s_power(struct v4l2_subdev *sd, int on)
const struct sr030pc30_platform_data *pdata = info->pdata;
int ret;
- if (WARN(pdata == NULL, "No platform data!\n"))
- return -ENOMEM;
+ if (pdata == NULL) {
+ WARN(1, "No platform data!\n");
+ return -EINVAL;
+ }
/*
* Put sensor into power sleep mode before switching off
@@ -746,6 +748,7 @@ static int sr030pc30_s_power(struct v4l2_subdev *sd, int on)
if (on) {
ret = sr030pc30_base_config(sd);
} else {
+ ret = 0;
info->curr_win = NULL;
info->curr_fmt = NULL;
}
diff --git a/drivers/media/video/tda7432.c b/drivers/media/video/tda7432.c
index 3941f95..bd21854 100644
--- a/drivers/media/video/tda7432.c
+++ b/drivers/media/video/tda7432.c
@@ -49,10 +49,11 @@ static int maxvol;
static int loudness; /* disable loudness by default */
static int debug; /* insmod parameter */
module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Set debugging level from 0 to 3. Default is off(0).");
module_param(loudness, int, S_IRUGO);
-MODULE_PARM_DESC(maxvol,"Set maximium volume to +20db (0), default is 0db(1)");
+MODULE_PARM_DESC(loudness, "Turn loudness on(1) else off(0). Default is off(0).");
module_param(maxvol, int, S_IRUGO | S_IWUSR);
-
+MODULE_PARM_DESC(maxvol, "Set maximium volume to +20dB(0) else +0dB(1). Default is +20dB(0).");
/* Structure of address and subaddresses for the tda7432 */
diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
index fc611eb..84cd1b6 100644
--- a/drivers/media/video/timblogiw.c
+++ b/drivers/media/video/timblogiw.c
@@ -20,7 +20,6 @@
* Timberdale FPGA LogiWin Video In
*/
-#include <linux/version.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dmaengine.h>
diff --git a/drivers/media/video/tlg2300/pd-common.h b/drivers/media/video/tlg2300/pd-common.h
index 46066bd..56564e6 100644
--- a/drivers/media/video/tlg2300/pd-common.h
+++ b/drivers/media/video/tlg2300/pd-common.h
@@ -1,7 +1,6 @@
#ifndef PD_COMMON_H
#define PD_COMMON_H
-#include <linux/version.h>
#include <linux/fs.h>
#include <linux/wait.h>
#include <linux/list.h>
diff --git a/drivers/media/video/tlg2300/pd-dvb.c b/drivers/media/video/tlg2300/pd-dvb.c
index edd78f8..d0da11a 100644
--- a/drivers/media/video/tlg2300/pd-dvb.c
+++ b/drivers/media/video/tlg2300/pd-dvb.c
@@ -7,7 +7,7 @@
#include "vendorcmds.h"
#include <linux/sched.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
static void dvb_urb_cleanup(struct pd_dvb_adapter *pd_dvb);
diff --git a/drivers/media/video/tlg2300/pd-main.c b/drivers/media/video/tlg2300/pd-main.c
index 99c81a9..129f135 100644
--- a/drivers/media/video/tlg2300/pd-main.c
+++ b/drivers/media/video/tlg2300/pd-main.c
@@ -531,3 +531,4 @@ module_exit(poseidon_exit);
MODULE_AUTHOR("Telegent Systems");
MODULE_DESCRIPTION("For tlg2300-based USB device ");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.2");
diff --git a/drivers/media/video/tlg2300/pd-radio.c b/drivers/media/video/tlg2300/pd-radio.c
index fae84c2..4fad1df 100644
--- a/drivers/media/video/tlg2300/pd-radio.c
+++ b/drivers/media/video/tlg2300/pd-radio.c
@@ -6,7 +6,6 @@
#include <linux/usb.h>
#include <linux/i2c.h>
#include <media/v4l2-dev.h>
-#include <linux/version.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <media/v4l2-ioctl.h>
@@ -149,7 +148,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "tele-radio", sizeof(v->driver));
strlcpy(v->card, "Telegent Poseidon", sizeof(v->card));
usb_make_path(p->udev, v->bus_info, sizeof(v->bus_info));
- v->version = KERNEL_VERSION(0, 0, 1);
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index a03945a..11cc980 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -39,6 +39,7 @@
#include "tda9887.h"
#include "xc5000.h"
#include "tda18271.h"
+#include "xc4000.h"
#define UNSET (-1U)
@@ -391,6 +392,23 @@ static void set_type(struct i2c_client *c, unsigned int type,
tune_now = 0;
break;
}
+ case TUNER_XC4000:
+ {
+ struct xc4000_config xc4000_cfg = {
+ .i2c_address = t->i2c->addr,
+ /* FIXME: the correct parameters will be set */
+ /* only when the digital dvb_attach() occurs */
+ .default_pm = 0,
+ .dvb_amplitude = 0,
+ .set_smoothedcvbs = 0,
+ .if_khz = 0
+ };
+ if (!dvb_attach(xc4000_attach,
+ &t->fe, t->i2c->adapter, &xc4000_cfg))
+ goto attach_failed;
+ tune_now = 0;
+ break;
+ }
default:
if (!dvb_attach(simple_tuner_attach, &t->fe,
t->i2c->adapter, t->i2c->addr, t->type))
diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c
index 0347bbe..742482e 100644
--- a/drivers/media/video/tw9910.c
+++ b/drivers/media/video/tw9910.c
@@ -552,16 +552,6 @@ static int tw9910_s_std(struct v4l2_subdev *sd, v4l2_std_id norm)
return ret;
}
-static int tw9910_enum_input(struct soc_camera_device *icd,
- struct v4l2_input *inp)
-{
- inp->type = V4L2_INPUT_TYPE_TUNER;
- inp->std = V4L2_STD_UNKNOWN;
- strcpy(inp->name, "Video");
-
- return 0;
-}
-
static int tw9910_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
@@ -846,13 +836,9 @@ static int tw9910_video_probe(struct soc_camera_device *icd,
struct tw9910_priv *priv = to_tw9910(client);
s32 id;
- /*
- * We must have a parent by now. And it cannot be a wrong one.
- * So this entire test is completely redundant.
- */
- if (!icd->dev.parent ||
- to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
- return -ENODEV;
+ /* We must have a parent by now. And it cannot be a wrong one. */
+ BUG_ON(!icd->parent ||
+ to_soc_camera_host(icd->parent)->nr != icd->iface);
/*
* tw9910 only use 8 or 16 bit bus width
@@ -891,7 +877,6 @@ static int tw9910_video_probe(struct soc_camera_device *icd,
static struct soc_camera_ops tw9910_ops = {
.set_bus_param = tw9910_set_bus_param,
.query_bus_param = tw9910_query_bus_param,
- .enum_input = tw9910_enum_input,
};
static struct v4l2_subdev_core_ops tw9910_subdev_core_ops = {
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index ea8ea8a..5a74f5e 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -45,7 +45,6 @@
*
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/timer.h>
@@ -77,15 +76,7 @@
#define DRIVER_ALIAS "USBVision"
#define DRIVER_DESC "USBVision USB Video Device Driver for Linux"
#define DRIVER_LICENSE "GPL"
-#define USBVISION_DRIVER_VERSION_MAJOR 0
-#define USBVISION_DRIVER_VERSION_MINOR 9
-#define USBVISION_DRIVER_VERSION_PATCHLEVEL 10
-#define USBVISION_DRIVER_VERSION KERNEL_VERSION(USBVISION_DRIVER_VERSION_MAJOR,\
-USBVISION_DRIVER_VERSION_MINOR,\
-USBVISION_DRIVER_VERSION_PATCHLEVEL)
-#define USBVISION_VERSION_STRING __stringify(USBVISION_DRIVER_VERSION_MAJOR) \
-"." __stringify(USBVISION_DRIVER_VERSION_MINOR) \
-"." __stringify(USBVISION_DRIVER_VERSION_PATCHLEVEL)
+#define USBVISION_VERSION_STRING "0.9.11"
#define ENABLE_HEXDUMP 0 /* Enable if you need it */
@@ -516,7 +507,6 @@ static int vidioc_querycap(struct file *file, void *priv,
usbvision_device_data[usbvision->dev_model].model_string,
sizeof(vc->card));
usb_make_path(usbvision->dev, vc->bus_info, sizeof(vc->bus_info));
- vc->version = USBVISION_DRIVER_VERSION;
vc->capabilities = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_AUDIO |
V4L2_CAP_READWRITE |
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index a4db26f..10c2364 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -20,7 +20,7 @@
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "uvcvideo.h"
@@ -1664,8 +1664,8 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
return -EINVAL;
}
- /* Search for the matching (GUID/CS) control in the given device */
- list_for_each_entry(entity, &dev->entities, list) {
+ /* Search for the matching (GUID/CS) control on the current chain */
+ list_for_each_entry(entity, &chain->entities, chain) {
unsigned int i;
if (UVC_ENTITY_TYPE(entity) != UVC_VC_EXTENSION_UNIT ||
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index b6eae48..d29f9c2 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -31,6 +31,7 @@
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
+#include <linux/version.h>
#include <asm/atomic.h>
#include <asm/unaligned.h>
@@ -1857,7 +1858,7 @@ static int uvc_probe(struct usb_interface *intf,
sizeof(dev->mdev.serial));
strcpy(dev->mdev.bus_info, udev->devpath);
dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
- dev->mdev.driver_version = DRIVER_VERSION_NUMBER;
+ dev->mdev.driver_version = LINUX_VERSION_CODE;
if (media_device_register(&dev->mdev) < 0)
goto error;
@@ -2130,6 +2131,15 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_PROBE_MINMAX
| UVC_QUIRK_BUILTIN_ISIGHT },
+ /* Foxlink ("HP Webcam" on HP Mini 5103) */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x05c8,
+ .idProduct = 0x0403,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_FIX_BANDWIDTH },
/* Genesys Logic USB 2.0 PC Camera */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index f90ce9f..677691c 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -19,7 +19,7 @@
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "uvcvideo.h"
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index 543a803..ea71d5f 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -21,7 +21,7 @@
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/wait.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
@@ -83,7 +83,7 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
default:
uvc_trace(UVC_TRACE_CONTROL, "Unsupported V4L2 control type "
"%u.\n", xmap->v4l2_type);
- ret = -EINVAL;
+ ret = -ENOTTY;
goto done;
}
@@ -571,7 +571,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
strlcpy(cap->card, vdev->name, sizeof cap->card);
usb_make_path(stream->dev->udev,
cap->bus_info, sizeof(cap->bus_info));
- cap->version = DRIVER_VERSION_NUMBER;
+ cap->version = LINUX_VERSION_CODE;
if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE
| V4L2_CAP_STREAMING;
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 4999479..8244167 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -19,7 +19,7 @@
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/unaligned.h>
#include <media/v4l2-common.h>
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index 20107fd..df32a43 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -183,8 +183,7 @@ struct uvc_xu_control {
* Driver specific constants.
*/
-#define DRIVER_VERSION_NUMBER KERNEL_VERSION(1, 1, 0)
-#define DRIVER_VERSION "v1.1.0"
+#define DRIVER_VERSION "1.1.1"
/* Number of isochronous URBs. */
#define UVC_URBS 5
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 06b9f9f..5c6100f 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -105,6 +105,9 @@ int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl,
menu_items[ctrl->value][0] == '\0')
return -EINVAL;
}
+ if (qctrl->type == V4L2_CTRL_TYPE_BITMASK &&
+ (ctrl->value & ~qctrl->maximum))
+ return -ERANGE;
return 0;
}
EXPORT_SYMBOL(v4l2_ctrl_check);
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
index 7c26947..61979b7 100644
--- a/drivers/media/video/v4l2-compat-ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -662,6 +662,32 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
return 0;
}
+struct v4l2_event32 {
+ __u32 type;
+ union {
+ __u8 data[64];
+ } u;
+ __u32 pending;
+ __u32 sequence;
+ struct compat_timespec timestamp;
+ __u32 id;
+ __u32 reserved[8];
+};
+
+static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up)
+{
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_event32)) ||
+ put_user(kp->type, &up->type) ||
+ copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
+ put_user(kp->pending, &up->pending) ||
+ put_user(kp->sequence, &up->sequence) ||
+ put_compat_timespec(&kp->timestamp, &up->timestamp) ||
+ put_user(kp->id, &up->id) ||
+ copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
+ return -EFAULT;
+ return 0;
+}
+
#define VIDIOC_G_FMT32 _IOWR('V', 4, struct v4l2_format32)
#define VIDIOC_S_FMT32 _IOWR('V', 5, struct v4l2_format32)
#define VIDIOC_QUERYBUF32 _IOWR('V', 9, struct v4l2_buffer32)
@@ -675,6 +701,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
#define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32)
#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32)
#define VIDIOC_TRY_EXT_CTRLS32 _IOWR('V', 73, struct v4l2_ext_controls32)
+#define VIDIOC_DQEVENT32 _IOR ('V', 89, struct v4l2_event32)
#define VIDIOC_OVERLAY32 _IOW ('V', 14, s32)
#define VIDIOC_STREAMON32 _IOW ('V', 18, s32)
@@ -693,6 +720,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
struct v4l2_input v2i;
struct v4l2_standard v2s;
struct v4l2_ext_controls v2ecs;
+ struct v4l2_event v2ev;
unsigned long vx;
int vi;
} karg;
@@ -715,6 +743,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
case VIDIOC_G_EXT_CTRLS32: cmd = VIDIOC_G_EXT_CTRLS; break;
case VIDIOC_S_EXT_CTRLS32: cmd = VIDIOC_S_EXT_CTRLS; break;
case VIDIOC_TRY_EXT_CTRLS32: cmd = VIDIOC_TRY_EXT_CTRLS; break;
+ case VIDIOC_DQEVENT32: cmd = VIDIOC_DQEVENT; break;
case VIDIOC_OVERLAY32: cmd = VIDIOC_OVERLAY; break;
case VIDIOC_STREAMON32: cmd = VIDIOC_STREAMON; break;
case VIDIOC_STREAMOFF32: cmd = VIDIOC_STREAMOFF; break;
@@ -778,6 +807,9 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
err = get_v4l2_ext_controls32(&karg.v2ecs, up);
compatible_arg = 0;
break;
+ case VIDIOC_DQEVENT:
+ compatible_arg = 0;
+ break;
}
if (err)
return err;
@@ -818,6 +850,10 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
err = put_v4l2_framebuffer32(&karg.v2fb, up);
break;
+ case VIDIOC_DQEVENT:
+ err = put_v4l2_event32(&karg.v2ev, up);
+ break;
+
case VIDIOC_G_FMT:
case VIDIOC_S_FMT:
case VIDIOC_TRY_FMT:
@@ -920,6 +956,7 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
case VIDIOC_S_DV_TIMINGS:
case VIDIOC_G_DV_TIMINGS:
case VIDIOC_DQEVENT:
+ case VIDIOC_DQEVENT32:
case VIDIOC_SUBSCRIBE_EVENT:
case VIDIOC_UNSUBSCRIBE_EVENT:
ret = do_video_ioctl(file, cmd, arg);
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index 2412f08..06b6014 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -23,17 +23,39 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-dev.h>
+#define has_op(master, op) \
+ (master->ops && master->ops->op)
+#define call_op(master, op) \
+ (has_op(master, op) ? master->ops->op(master) : 0)
+
/* Internal temporary helper struct, one for each v4l2_ext_control */
-struct ctrl_helper {
+struct v4l2_ctrl_helper {
+ /* Pointer to the control reference of the master control */
+ struct v4l2_ctrl_ref *mref;
/* The control corresponding to the v4l2_ext_control ID field. */
struct v4l2_ctrl *ctrl;
- /* Used internally to mark whether this control was already
- processed. */
- bool handled;
+ /* v4l2_ext_control index of the next control belonging to the
+ same cluster, or 0 if there isn't any. */
+ u32 next;
};
+/* Small helper function to determine if the autocluster is set to manual
+ mode. In that case the is_volatile flag should be ignored. */
+static bool is_cur_manual(const struct v4l2_ctrl *master)
+{
+ return master->is_auto && master->cur.val == master->manual_mode_value;
+}
+
+/* Same as above, but this checks the against the new value instead of the
+ current value. */
+static bool is_new_manual(const struct v4l2_ctrl *master)
+{
+ return master->is_auto && master->val == master->manual_mode_value;
+}
+
/* Returns NULL or a character pointer array containing the menu for
the given control ID. The pointer array ends with a NULL pointer.
An empty string signifies a menu entry that is invalid. This allows
@@ -181,7 +203,7 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
};
static const char * const mpeg_stream_vbi_fmt[] = {
"No VBI",
- "Private packet, IVTV format",
+ "Private Packet, IVTV Format",
NULL
};
static const char * const camera_power_line_frequency[] = {
@@ -204,18 +226,130 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Negative",
"Emboss",
"Sketch",
- "Sky blue",
- "Grass green",
- "Skin whiten",
+ "Sky Blue",
+ "Grass Green",
+ "Skin Whiten",
"Vivid",
NULL
};
static const char * const tune_preemphasis[] = {
- "No preemphasis",
+ "No Preemphasis",
"50 useconds",
"75 useconds",
NULL,
};
+ static const char * const header_mode[] = {
+ "Separate Buffer",
+ "Joined With 1st Frame",
+ NULL,
+ };
+ static const char * const multi_slice[] = {
+ "Single",
+ "Max Macroblocks",
+ "Max Bytes",
+ NULL,
+ };
+ static const char * const entropy_mode[] = {
+ "CAVLC",
+ "CABAC",
+ NULL,
+ };
+ static const char * const mpeg_h264_level[] = {
+ "1",
+ "1b",
+ "1.1",
+ "1.2",
+ "1.3",
+ "2",
+ "2.1",
+ "2.2",
+ "3",
+ "3.1",
+ "3.2",
+ "4",
+ "4.1",
+ "4.2",
+ "5",
+ "5.1",
+ NULL,
+ };
+ static const char * const h264_loop_filter[] = {
+ "Enabled",
+ "Disabled",
+ "Disabled at Slice Boundary",
+ NULL,
+ };
+ static const char * const h264_profile[] = {
+ "Baseline",
+ "Constrained Baseline",
+ "Main",
+ "Extended",
+ "High",
+ "High 10",
+ "High 422",
+ "High 444 Predictive",
+ "High 10 Intra",
+ "High 422 Intra",
+ "High 444 Intra",
+ "CAVLC 444 Intra",
+ "Scalable Baseline",
+ "Scalable High",
+ "Scalable High Intra",
+ "Multiview High",
+ NULL,
+ };
+ static const char * const vui_sar_idc[] = {
+ "Unspecified",
+ "1:1",
+ "12:11",
+ "10:11",
+ "16:11",
+ "40:33",
+ "24:11",
+ "20:11",
+ "32:11",
+ "80:33",
+ "18:11",
+ "15:11",
+ "64:33",
+ "160:99",
+ "4:3",
+ "3:2",
+ "2:1",
+ "Extended SAR",
+ NULL,
+ };
+ static const char * const mpeg_mpeg4_level[] = {
+ "0",
+ "0b",
+ "1",
+ "2",
+ "3",
+ "3b",
+ "4",
+ "5",
+ NULL,
+ };
+ static const char * const mpeg4_profile[] = {
+ "Simple",
+ "Adcanved Simple",
+ "Core",
+ "Simple Scalable",
+ "Advanced Coding Efficency",
+ NULL,
+ };
+
+ static const char * const flash_led_mode[] = {
+ "Off",
+ "Flash",
+ "Torch",
+ NULL,
+ };
+ static const char * const flash_strobe_source[] = {
+ "Software",
+ "External",
+ NULL,
+ };
switch (id) {
case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
@@ -256,6 +390,28 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
return colorfx;
case V4L2_CID_TUNE_PREEMPHASIS:
return tune_preemphasis;
+ case V4L2_CID_FLASH_LED_MODE:
+ return flash_led_mode;
+ case V4L2_CID_FLASH_STROBE_SOURCE:
+ return flash_strobe_source;
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ return header_mode;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ return multi_slice;
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ return entropy_mode;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ return mpeg_h264_level;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ return h264_loop_filter;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ return h264_profile;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+ return vui_sar_idc;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ return mpeg_mpeg4_level;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ return mpeg4_profile;
default:
return NULL;
}
@@ -307,6 +463,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1";
case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2";
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Minimum Number of Capture Buffers";
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Minimum Number of Output Buffers";
/* MPEG controls */
/* Keep the order of the 'case's the same as in videodev2.h! */
@@ -343,6 +501,48 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation";
case V4L2_CID_MPEG_VIDEO_MUTE: return "Video Mute";
case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV";
+ case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface";
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable";
+ case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "The Number of Intra Refresh MBs";
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable";
+ case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control";
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode";
+ case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "The Max Number of Reference Picture";
+ case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable";
+ case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size";
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entorpy Mode";
+ case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I Period";
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: return "H264 Loop Filter Mode";
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE: return "H264 Profile";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT: return "Vertical Size of SAR";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH: return "Horizontal Size of SAR";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "The Maximum Bytes Per Slice";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "The Number of MB in a Slice";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "The Slice Partitioning Method";
+ case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size";
/* CAMERA controls */
/* Keep the order of the 'case's the same as in videodev2.h! */
@@ -389,6 +589,21 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level";
case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor";
+ /* Flash controls */
+ case V4L2_CID_FLASH_CLASS: return "Flash controls";
+ case V4L2_CID_FLASH_LED_MODE: return "LED mode";
+ case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe source";
+ case V4L2_CID_FLASH_STROBE: return "Strobe";
+ case V4L2_CID_FLASH_STROBE_STOP: return "Stop strobe";
+ case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe status";
+ case V4L2_CID_FLASH_TIMEOUT: return "Strobe timeout";
+ case V4L2_CID_FLASH_INTENSITY: return "Intensity, flash mode";
+ case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, torch mode";
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, indicator";
+ case V4L2_CID_FLASH_FAULT: return "Faults";
+ case V4L2_CID_FLASH_CHARGE: return "Charge";
+ case V4L2_CID_FLASH_READY: return "Ready to strobe";
+
default:
return NULL;
}
@@ -423,12 +638,24 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_PILOT_TONE_ENABLED:
case V4L2_CID_ILLUMINATORS_1:
case V4L2_CID_ILLUMINATORS_2:
+ case V4L2_CID_FLASH_STROBE_STATUS:
+ case V4L2_CID_FLASH_CHARGE:
+ case V4L2_CID_FLASH_READY:
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
+ case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:
*type = V4L2_CTRL_TYPE_BOOLEAN;
*min = 0;
*max = *step = 1;
break;
case V4L2_CID_PAN_RESET:
case V4L2_CID_TILT_RESET:
+ case V4L2_CID_FLASH_STROBE:
+ case V4L2_CID_FLASH_STROBE_STOP:
*type = V4L2_CTRL_TYPE_BUTTON;
*flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
*min = *max = *step = *def = 0;
@@ -452,6 +679,17 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_EXPOSURE_AUTO:
case V4L2_CID_COLORFX:
case V4L2_CID_TUNE_PREEMPHASIS:
+ case V4L2_CID_FLASH_LED_MODE:
+ case V4L2_CID_FLASH_STROBE_SOURCE:
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
*type = V4L2_CTRL_TYPE_MENU;
break;
case V4L2_CID_RDS_TX_PS_NAME:
@@ -462,6 +700,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_CAMERA_CLASS:
case V4L2_CID_MPEG_CLASS:
case V4L2_CID_FM_TX_CLASS:
+ case V4L2_CID_FLASH_CLASS:
*type = V4L2_CTRL_TYPE_CTRL_CLASS;
/* You can neither read not write these */
*flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY;
@@ -474,6 +713,14 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
/* Max is calculated as RGB888 that is 2^24 */
*max = 0xFFFFFF;
break;
+ case V4L2_CID_FLASH_FAULT:
+ *type = V4L2_CTRL_TYPE_BITMASK;
+ break;
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
default:
*type = V4L2_CTRL_TYPE_INTEGER;
break;
@@ -519,6 +766,10 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_ZOOM_RELATIVE:
*flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
break;
+ case V4L2_CID_FLASH_STROBE_STATUS:
+ case V4L2_CID_FLASH_READY:
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
}
}
EXPORT_SYMBOL(v4l2_ctrl_fill);
@@ -537,6 +788,42 @@ static bool type_is_int(const struct v4l2_ctrl *ctrl)
}
}
+static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
+{
+ memset(ev->reserved, 0, sizeof(ev->reserved));
+ ev->type = V4L2_EVENT_CTRL;
+ ev->id = ctrl->id;
+ ev->u.ctrl.changes = changes;
+ ev->u.ctrl.type = ctrl->type;
+ ev->u.ctrl.flags = ctrl->flags;
+ if (ctrl->type == V4L2_CTRL_TYPE_STRING)
+ ev->u.ctrl.value64 = 0;
+ else
+ ev->u.ctrl.value64 = ctrl->cur.val64;
+ ev->u.ctrl.minimum = ctrl->minimum;
+ ev->u.ctrl.maximum = ctrl->maximum;
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU)
+ ev->u.ctrl.step = 1;
+ else
+ ev->u.ctrl.step = ctrl->step;
+ ev->u.ctrl.default_value = ctrl->default_value;
+}
+
+static void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes)
+{
+ struct v4l2_event ev;
+ struct v4l2_subscribed_event *sev;
+
+ if (list_empty(&ctrl->ev_subs))
+ return;
+ fill_event(&ev, ctrl, changes);
+
+ list_for_each_entry(sev, &ctrl->ev_subs, node)
+ if (sev->fh && (sev->fh != fh ||
+ (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK)))
+ v4l2_event_queue_fh(sev->fh, &ev);
+}
+
/* Helper function: copy the current control value back to the caller */
static int cur_to_user(struct v4l2_ext_control *c,
struct v4l2_ctrl *ctrl)
@@ -624,22 +911,45 @@ static int new_to_user(struct v4l2_ext_control *c,
}
/* Copy the new value to the current value. */
-static void new_to_cur(struct v4l2_ctrl *ctrl)
+static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
+ bool update_inactive)
{
+ bool changed = false;
+
if (ctrl == NULL)
return;
switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_BUTTON:
+ changed = true;
+ break;
case V4L2_CTRL_TYPE_STRING:
/* strings are always 0-terminated */
+ changed = strcmp(ctrl->string, ctrl->cur.string);
strcpy(ctrl->cur.string, ctrl->string);
break;
case V4L2_CTRL_TYPE_INTEGER64:
+ changed = ctrl->val64 != ctrl->cur.val64;
ctrl->cur.val64 = ctrl->val64;
break;
default:
+ changed = ctrl->val != ctrl->cur.val;
ctrl->cur.val = ctrl->val;
break;
}
+ if (update_inactive) {
+ ctrl->flags &= ~V4L2_CTRL_FLAG_INACTIVE;
+ if (!is_cur_manual(ctrl->cluster[0]))
+ ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
+ }
+ if (changed || update_inactive) {
+ /* If a control was changed that was not one of the controls
+ modified by the application, then send the event to all. */
+ if (!ctrl->is_new)
+ fh = NULL;
+ send_event(fh, ctrl,
+ (changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) |
+ (update_inactive ? V4L2_EVENT_CTRL_CH_FLAGS : 0));
+ }
}
/* Copy the current value to the new value */
@@ -692,13 +1002,11 @@ static int cluster_changed(struct v4l2_ctrl *master)
return diff;
}
-/* Validate a new control */
-static int validate_new(struct v4l2_ctrl *ctrl)
+/* Validate integer-type control */
+static int validate_new_int(const struct v4l2_ctrl *ctrl, s32 *pval)
{
- s32 val = ctrl->val;
- char *s = ctrl->string;
+ s32 val = *pval;
u32 offset;
- size_t len;
switch (ctrl->type) {
case V4L2_CTRL_TYPE_INTEGER:
@@ -711,11 +1019,11 @@ static int validate_new(struct v4l2_ctrl *ctrl)
offset = val - ctrl->minimum;
offset = ctrl->step * (offset / ctrl->step);
val = ctrl->minimum + offset;
- ctrl->val = val;
+ *pval = val;
return 0;
case V4L2_CTRL_TYPE_BOOLEAN:
- ctrl->val = !!ctrl->val;
+ *pval = !!val;
return 0;
case V4L2_CTRL_TYPE_MENU:
@@ -726,11 +1034,35 @@ static int validate_new(struct v4l2_ctrl *ctrl)
return -EINVAL;
return 0;
+ case V4L2_CTRL_TYPE_BITMASK:
+ *pval &= ctrl->maximum;
+ return 0;
+
case V4L2_CTRL_TYPE_BUTTON:
case V4L2_CTRL_TYPE_CTRL_CLASS:
- ctrl->val64 = 0;
+ *pval = 0;
return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Validate a new control */
+static int validate_new(const struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c)
+{
+ char *s = c->string;
+ size_t len;
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_BITMASK:
+ case V4L2_CTRL_TYPE_BUTTON:
+ case V4L2_CTRL_TYPE_CTRL_CLASS:
+ return validate_new_int(ctrl, &c->value);
+
case V4L2_CTRL_TYPE_INTEGER64:
return 0;
@@ -780,6 +1112,7 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
{
struct v4l2_ctrl_ref *ref, *next_ref;
struct v4l2_ctrl *ctrl, *next_ctrl;
+ struct v4l2_subscribed_event *sev, *next_sev;
if (hdl == NULL || hdl->buckets == NULL)
return;
@@ -793,6 +1126,8 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
/* Free all controls owned by the handler */
list_for_each_entry_safe(ctrl, next_ctrl, &hdl->ctrls, node) {
list_del(&ctrl->node);
+ list_for_each_entry_safe(sev, next_sev, &ctrl->ev_subs, node)
+ list_del(&sev->node);
kfree(ctrl);
}
kfree(hdl->buckets);
@@ -962,13 +1297,17 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
/* Sanity checks */
if (id == 0 || name == NULL || id >= V4L2_CID_PRIVATE_BASE ||
- max < min ||
(type == V4L2_CTRL_TYPE_INTEGER && step == 0) ||
+ (type == V4L2_CTRL_TYPE_BITMASK && max == 0) ||
(type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) ||
(type == V4L2_CTRL_TYPE_STRING && max == 0)) {
handler_set_err(hdl, -ERANGE);
return NULL;
}
+ if (type != V4L2_CTRL_TYPE_BITMASK && max < min) {
+ handler_set_err(hdl, -ERANGE);
+ return NULL;
+ }
if ((type == V4L2_CTRL_TYPE_INTEGER ||
type == V4L2_CTRL_TYPE_MENU ||
type == V4L2_CTRL_TYPE_BOOLEAN) &&
@@ -976,6 +1315,10 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
handler_set_err(hdl, -ERANGE);
return NULL;
}
+ if (type == V4L2_CTRL_TYPE_BITMASK && ((def & ~max) || min || step)) {
+ handler_set_err(hdl, -ERANGE);
+ return NULL;
+ }
if (type == V4L2_CTRL_TYPE_BUTTON)
flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
@@ -991,6 +1334,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
}
INIT_LIST_HEAD(&ctrl->node);
+ INIT_LIST_HEAD(&ctrl->ev_subs);
ctrl->handler = hdl;
ctrl->ops = ops;
ctrl->id = id;
@@ -1132,6 +1476,9 @@ int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
/* Skip handler-private controls. */
if (ctrl->is_private)
continue;
+ /* And control classes */
+ if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
+ continue;
ret = handler_new_ref(hdl, ctrl);
if (ret)
break;
@@ -1147,7 +1494,7 @@ void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)
int i;
/* The first control is the master control and it must not be NULL */
- BUG_ON(controls[0] == NULL);
+ BUG_ON(ncontrols == 0 || controls[0] == NULL);
for (i = 0; i < ncontrols; i++) {
if (controls[i]) {
@@ -1158,18 +1505,47 @@ void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)
}
EXPORT_SYMBOL(v4l2_ctrl_cluster);
+void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls,
+ u8 manual_val, bool set_volatile)
+{
+ struct v4l2_ctrl *master = controls[0];
+ u32 flag;
+ int i;
+
+ v4l2_ctrl_cluster(ncontrols, controls);
+ WARN_ON(ncontrols <= 1);
+ WARN_ON(manual_val < master->minimum || manual_val > master->maximum);
+ master->is_auto = true;
+ master->manual_mode_value = manual_val;
+ master->flags |= V4L2_CTRL_FLAG_UPDATE;
+ flag = is_cur_manual(master) ? 0 : V4L2_CTRL_FLAG_INACTIVE;
+
+ for (i = 1; i < ncontrols; i++)
+ if (controls[i]) {
+ controls[i]->is_volatile = set_volatile;
+ controls[i]->flags |= flag;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_auto_cluster);
+
/* Activate/deactivate a control. */
void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active)
{
+ /* invert since the actual flag is called 'inactive' */
+ bool inactive = !active;
+ bool old;
+
if (ctrl == NULL)
return;
- if (!active)
+ if (inactive)
/* set V4L2_CTRL_FLAG_INACTIVE */
- set_bit(4, &ctrl->flags);
+ old = test_and_set_bit(4, &ctrl->flags);
else
/* clear V4L2_CTRL_FLAG_INACTIVE */
- clear_bit(4, &ctrl->flags);
+ old = test_and_clear_bit(4, &ctrl->flags);
+ if (old != inactive)
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
}
EXPORT_SYMBOL(v4l2_ctrl_activate);
@@ -1181,15 +1557,21 @@ EXPORT_SYMBOL(v4l2_ctrl_activate);
these controls. */
void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed)
{
+ bool old;
+
if (ctrl == NULL)
return;
+ v4l2_ctrl_lock(ctrl);
if (grabbed)
/* set V4L2_CTRL_FLAG_GRABBED */
- set_bit(1, &ctrl->flags);
+ old = test_and_set_bit(1, &ctrl->flags);
else
/* clear V4L2_CTRL_FLAG_GRABBED */
- clear_bit(1, &ctrl->flags);
+ old = test_and_clear_bit(1, &ctrl->flags);
+ if (old != grabbed)
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
+ v4l2_ctrl_unlock(ctrl);
}
EXPORT_SYMBOL(v4l2_ctrl_grab);
@@ -1217,6 +1599,9 @@ static void log_ctrl(const struct v4l2_ctrl *ctrl,
case V4L2_CTRL_TYPE_MENU:
printk(KERN_CONT "%s", ctrl->qmenu[ctrl->cur.val]);
break;
+ case V4L2_CTRL_TYPE_BITMASK:
+ printk(KERN_CONT "0x%08x", ctrl->cur.val);
+ break;
case V4L2_CTRL_TYPE_INTEGER64:
printk(KERN_CONT "%lld", ctrl->cur.val64);
break;
@@ -1277,26 +1662,21 @@ int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
int i;
/* Skip if this control was already handled by a cluster. */
- if (ctrl->done)
+ /* Skip button controls and read-only controls. */
+ if (ctrl->done || ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
+ (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
continue;
for (i = 0; i < master->ncontrols; i++) {
if (master->cluster[i]) {
cur_to_new(master->cluster[i]);
master->cluster[i]->is_new = 1;
+ master->cluster[i]->done = true;
}
}
-
- /* Skip button controls and read-only controls. */
- if (ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
- (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
- continue;
- ret = master->ops->s_ctrl(master);
+ ret = call_op(master, s_ctrl);
if (ret)
break;
- for (i = 0; i < master->ncontrols; i++)
- if (master->cluster[i])
- master->cluster[i]->done = true;
}
mutex_unlock(&hdl->lock);
return ret;
@@ -1447,18 +1827,19 @@ EXPORT_SYMBOL(v4l2_subdev_querymenu);
Find the controls in the control array and do some basic checks. */
static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
struct v4l2_ext_controls *cs,
- struct ctrl_helper *helpers,
- bool try)
+ struct v4l2_ctrl_helper *helpers)
{
+ struct v4l2_ctrl_helper *h;
+ bool have_clusters = false;
u32 i;
- for (i = 0; i < cs->count; i++) {
+ for (i = 0, h = helpers; i < cs->count; i++, h++) {
struct v4l2_ext_control *c = &cs->controls[i];
+ struct v4l2_ctrl_ref *ref;
struct v4l2_ctrl *ctrl;
u32 id = c->id & V4L2_CTRL_ID_MASK;
- if (try)
- cs->error_idx = i;
+ cs->error_idx = i;
if (cs->ctrl_class && V4L2_CTRL_ID2CLASS(id) != cs->ctrl_class)
return -EINVAL;
@@ -1467,53 +1848,59 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
extended controls */
if (id >= V4L2_CID_PRIVATE_BASE)
return -EINVAL;
- ctrl = v4l2_ctrl_find(hdl, id);
- if (ctrl == NULL)
+ ref = find_ref_lock(hdl, id);
+ if (ref == NULL)
return -EINVAL;
+ ctrl = ref->ctrl;
if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED)
return -EINVAL;
- helpers[i].ctrl = ctrl;
- helpers[i].handled = false;
+ if (ctrl->cluster[0]->ncontrols > 1)
+ have_clusters = true;
+ if (ctrl->cluster[0] != ctrl)
+ ref = find_ref_lock(hdl, ctrl->cluster[0]->id);
+ /* Store the ref to the master control of the cluster */
+ h->mref = ref;
+ h->ctrl = ctrl;
+ /* Initially set next to 0, meaning that there is no other
+ control in this helper array belonging to the same
+ cluster */
+ h->next = 0;
}
- return 0;
-}
-typedef int (*cluster_func)(struct v4l2_ext_control *c,
- struct v4l2_ctrl *ctrl);
+ /* We are done if there were no controls that belong to a multi-
+ control cluster. */
+ if (!have_clusters)
+ return 0;
-/* Walk over all controls in v4l2_ext_controls belonging to the same cluster
- and call the provided function. */
-static int cluster_walk(unsigned from,
- struct v4l2_ext_controls *cs,
- struct ctrl_helper *helpers,
- cluster_func f)
-{
- struct v4l2_ctrl **cluster = helpers[from].ctrl->cluster;
- int ret = 0;
- int i;
+ /* The code below figures out in O(n) time which controls in the list
+ belong to the same cluster. */
- /* Find any controls from the same cluster and call the function */
- for (i = from; !ret && i < cs->count; i++) {
- struct v4l2_ctrl *ctrl = helpers[i].ctrl;
+ /* This has to be done with the handler lock taken. */
+ mutex_lock(&hdl->lock);
- if (!helpers[i].handled && ctrl->cluster == cluster)
- ret = f(&cs->controls[i], ctrl);
+ /* First zero the helper field in the master control references */
+ for (i = 0; i < cs->count; i++)
+ helpers[i].mref->helper = 0;
+ for (i = 0, h = helpers; i < cs->count; i++, h++) {
+ struct v4l2_ctrl_ref *mref = h->mref;
+
+ /* If the mref->helper is set, then it points to an earlier
+ helper that belongs to the same cluster. */
+ if (mref->helper) {
+ /* Set the next field of mref->helper to the current
+ index: this means that that earlier helper now
+ points to the next helper in the same cluster. */
+ mref->helper->next = i;
+ /* mref should be set only for the first helper in the
+ cluster, clear the others. */
+ h->mref = NULL;
+ }
+ /* Point the mref helper to the current helper struct. */
+ mref->helper = h;
}
- return ret;
-}
-
-static void cluster_done(unsigned from,
- struct v4l2_ext_controls *cs,
- struct ctrl_helper *helpers)
-{
- struct v4l2_ctrl **cluster = helpers[from].ctrl->cluster;
- int i;
-
- /* Find any controls from the same cluster and mark them as handled */
- for (i = from; i < cs->count; i++)
- if (helpers[i].ctrl->cluster == cluster)
- helpers[i].handled = true;
+ mutex_unlock(&hdl->lock);
+ return 0;
}
/* Handles the corner case where cs->count == 0. It checks whether the
@@ -1531,10 +1918,10 @@ static int class_check(struct v4l2_ctrl_handler *hdl, u32 ctrl_class)
/* Get extended controls. Allocates the helpers array if needed. */
int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
{
- struct ctrl_helper helper[4];
- struct ctrl_helper *helpers = helper;
+ struct v4l2_ctrl_helper helper[4];
+ struct v4l2_ctrl_helper *helpers = helper;
int ret;
- int i;
+ int i, j;
cs->error_idx = cs->count;
cs->ctrl_class = V4L2_CTRL_ID2CLASS(cs->ctrl_class);
@@ -1551,30 +1938,46 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs
return -ENOMEM;
}
- ret = prepare_ext_ctrls(hdl, cs, helpers, false);
+ ret = prepare_ext_ctrls(hdl, cs, helpers);
+ cs->error_idx = cs->count;
for (i = 0; !ret && i < cs->count; i++)
if (helpers[i].ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
ret = -EACCES;
for (i = 0; !ret && i < cs->count; i++) {
- struct v4l2_ctrl *ctrl = helpers[i].ctrl;
- struct v4l2_ctrl *master = ctrl->cluster[0];
+ int (*ctrl_to_user)(struct v4l2_ext_control *c,
+ struct v4l2_ctrl *ctrl) = cur_to_user;
+ struct v4l2_ctrl *master;
- if (helpers[i].handled)
+ if (helpers[i].mref == NULL)
continue;
+ master = helpers[i].mref->ctrl;
cs->error_idx = i;
v4l2_ctrl_lock(master);
- /* g_volatile_ctrl will update the current control values */
- if (ctrl->is_volatile && master->ops->g_volatile_ctrl)
- ret = master->ops->g_volatile_ctrl(master);
- /* If OK, then copy the current control values to the caller */
- if (!ret)
- ret = cluster_walk(i, cs, helpers, cur_to_user);
+
+ /* g_volatile_ctrl will update the new control values */
+ if (has_op(master, g_volatile_ctrl) && !is_cur_manual(master)) {
+ for (j = 0; j < master->ncontrols; j++)
+ cur_to_new(master->cluster[j]);
+ ret = call_op(master, g_volatile_ctrl);
+ ctrl_to_user = new_to_user;
+ }
+ /* If OK, then copy the current (for non-volatile controls)
+ or the new (for volatile controls) control values to the
+ caller */
+ if (!ret) {
+ u32 idx = i;
+
+ do {
+ ret = ctrl_to_user(cs->controls + idx,
+ helpers[idx].ctrl);
+ idx = helpers[idx].next;
+ } while (!ret && idx);
+ }
v4l2_ctrl_unlock(master);
- cluster_done(i, cs, helpers);
}
if (cs->count > ARRAY_SIZE(helper))
@@ -1594,15 +1997,21 @@ static int get_ctrl(struct v4l2_ctrl *ctrl, s32 *val)
{
struct v4l2_ctrl *master = ctrl->cluster[0];
int ret = 0;
+ int i;
if (ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
return -EACCES;
v4l2_ctrl_lock(master);
/* g_volatile_ctrl will update the current control values */
- if (ctrl->is_volatile && master->ops->g_volatile_ctrl)
- ret = master->ops->g_volatile_ctrl(master);
- *val = ctrl->cur.val;
+ if (ctrl->is_volatile && !is_cur_manual(master)) {
+ for (i = 0; i < master->ncontrols; i++)
+ cur_to_new(master->cluster[i]);
+ ret = call_op(master, g_volatile_ctrl);
+ *val = ctrl->val;
+ } else {
+ *val = ctrl->cur.val;
+ }
v4l2_ctrl_unlock(master);
return ret;
}
@@ -1638,72 +2047,61 @@ EXPORT_SYMBOL(v4l2_ctrl_g_ctrl);
/* Core function that calls try/s_ctrl and ensures that the new value is
copied to the current value on a set.
Must be called with ctrl->handler->lock held. */
-static int try_or_set_control_cluster(struct v4l2_ctrl *master, bool set)
+static int try_or_set_cluster(struct v4l2_fh *fh,
+ struct v4l2_ctrl *master, bool set)
{
- bool try = !set;
- int ret = 0;
+ bool update_flag;
+ int ret;
int i;
/* Go through the cluster and either validate the new value or
(if no new value was set), copy the current value to the new
value, ensuring a consistent view for the control ops when
called. */
- for (i = 0; !ret && i < master->ncontrols; i++) {
+ for (i = 0; i < master->ncontrols; i++) {
struct v4l2_ctrl *ctrl = master->cluster[i];
if (ctrl == NULL)
continue;
- if (ctrl->is_new) {
- /* Double check this: it may have changed since the
- last check in try_or_set_ext_ctrls(). */
- if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
- return -EBUSY;
-
- /* Validate if required */
- if (!set)
- ret = validate_new(ctrl);
+ if (!ctrl->is_new) {
+ cur_to_new(ctrl);
continue;
}
- /* No new value was set, so copy the current and force
- a call to try_ctrl later, since the values for the cluster
- may now have changed and the end result might be invalid. */
- try = true;
- cur_to_new(ctrl);
+ /* Check again: it may have changed since the
+ previous check in try_or_set_ext_ctrls(). */
+ if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
+ return -EBUSY;
}
- /* For larger clusters you have to call try_ctrl again to
- verify that the controls are still valid after the
- 'cur_to_new' above. */
- if (!ret && master->ops->try_ctrl && try)
- ret = master->ops->try_ctrl(master);
+ ret = call_op(master, try_ctrl);
/* Don't set if there is no change */
- if (!ret && set && cluster_changed(master)) {
- ret = master->ops->s_ctrl(master);
- /* If OK, then make the new values permanent. */
- if (!ret)
- for (i = 0; i < master->ncontrols; i++)
- new_to_cur(master->cluster[i]);
- }
- return ret;
+ if (ret || !set || !cluster_changed(master))
+ return ret;
+ ret = call_op(master, s_ctrl);
+ if (ret)
+ return ret;
+
+ /* If OK, then make the new values permanent. */
+ update_flag = is_cur_manual(master) != is_new_manual(master);
+ for (i = 0; i < master->ncontrols; i++)
+ new_to_cur(fh, master->cluster[i], update_flag && i > 0);
+ return 0;
}
-/* Try or set controls. */
-static int try_or_set_ext_ctrls(struct v4l2_ctrl_handler *hdl,
- struct v4l2_ext_controls *cs,
- struct ctrl_helper *helpers,
- bool set)
+/* Validate controls. */
+static int validate_ctrls(struct v4l2_ext_controls *cs,
+ struct v4l2_ctrl_helper *helpers, bool set)
{
- unsigned i, j;
+ unsigned i;
int ret = 0;
cs->error_idx = cs->count;
for (i = 0; i < cs->count; i++) {
struct v4l2_ctrl *ctrl = helpers[i].ctrl;
- if (!set)
- cs->error_idx = i;
+ cs->error_idx = i;
if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
return -EACCES;
@@ -1715,50 +2113,22 @@ static int try_or_set_ext_ctrls(struct v4l2_ctrl_handler *hdl,
best-effort to avoid that. */
if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
return -EBUSY;
+ ret = validate_new(ctrl, &cs->controls[i]);
+ if (ret)
+ return ret;
}
-
- for (i = 0; !ret && i < cs->count; i++) {
- struct v4l2_ctrl *ctrl = helpers[i].ctrl;
- struct v4l2_ctrl *master = ctrl->cluster[0];
-
- cs->error_idx = i;
-
- if (helpers[i].handled)
- continue;
-
- v4l2_ctrl_lock(ctrl);
-
- /* Reset the 'is_new' flags of the cluster */
- for (j = 0; j < master->ncontrols; j++)
- if (master->cluster[j])
- master->cluster[j]->is_new = 0;
-
- /* Copy the new caller-supplied control values.
- user_to_new() sets 'is_new' to 1. */
- ret = cluster_walk(i, cs, helpers, user_to_new);
-
- if (!ret)
- ret = try_or_set_control_cluster(master, set);
-
- /* Copy the new values back to userspace. */
- if (!ret)
- ret = cluster_walk(i, cs, helpers, new_to_user);
-
- v4l2_ctrl_unlock(ctrl);
- cluster_done(i, cs, helpers);
- }
- return ret;
+ return 0;
}
/* Try or try-and-set controls */
-static int try_set_ext_ctrls(struct v4l2_ctrl_handler *hdl,
+static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
struct v4l2_ext_controls *cs,
bool set)
{
- struct ctrl_helper helper[4];
- struct ctrl_helper *helpers = helper;
+ struct v4l2_ctrl_helper helper[4];
+ struct v4l2_ctrl_helper *helpers = helper;
+ unsigned i, j;
int ret;
- int i;
cs->error_idx = cs->count;
cs->ctrl_class = V4L2_CTRL_ID2CLASS(cs->ctrl_class);
@@ -1774,25 +2144,49 @@ static int try_set_ext_ctrls(struct v4l2_ctrl_handler *hdl,
if (!helpers)
return -ENOMEM;
}
- ret = prepare_ext_ctrls(hdl, cs, helpers, !set);
- if (ret)
- goto free;
-
- /* First 'try' all controls and abort on error */
- ret = try_or_set_ext_ctrls(hdl, cs, helpers, false);
- /* If this is a 'set' operation and the initial 'try' failed,
- then set error_idx to count to tell the application that no
- controls changed value yet. */
- if (set)
+ ret = prepare_ext_ctrls(hdl, cs, helpers);
+ if (!ret)
+ ret = validate_ctrls(cs, helpers, set);
+ if (ret && set)
cs->error_idx = cs->count;
- if (!ret && set) {
- /* Reset 'handled' state */
- for (i = 0; i < cs->count; i++)
- helpers[i].handled = false;
- ret = try_or_set_ext_ctrls(hdl, cs, helpers, true);
+ for (i = 0; !ret && i < cs->count; i++) {
+ struct v4l2_ctrl *master;
+ u32 idx = i;
+
+ if (helpers[i].mref == NULL)
+ continue;
+
+ cs->error_idx = i;
+ master = helpers[i].mref->ctrl;
+ v4l2_ctrl_lock(master);
+
+ /* Reset the 'is_new' flags of the cluster */
+ for (j = 0; j < master->ncontrols; j++)
+ if (master->cluster[j])
+ master->cluster[j]->is_new = 0;
+
+ /* Copy the new caller-supplied control values.
+ user_to_new() sets 'is_new' to 1. */
+ do {
+ ret = user_to_new(cs->controls + idx, helpers[idx].ctrl);
+ idx = helpers[idx].next;
+ } while (!ret && idx);
+
+ if (!ret)
+ ret = try_or_set_cluster(fh, master, set);
+
+ /* Copy the new values back to userspace. */
+ if (!ret) {
+ idx = i;
+ do {
+ ret = new_to_user(cs->controls + idx,
+ helpers[idx].ctrl);
+ idx = helpers[idx].next;
+ } while (!ret && idx);
+ }
+ v4l2_ctrl_unlock(master);
}
-free:
if (cs->count > ARRAY_SIZE(helper))
kfree(helpers);
return ret;
@@ -1800,37 +2194,39 @@ free:
int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
{
- return try_set_ext_ctrls(hdl, cs, false);
+ return try_set_ext_ctrls(NULL, hdl, cs, false);
}
EXPORT_SYMBOL(v4l2_try_ext_ctrls);
-int v4l2_s_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
+int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs)
{
- return try_set_ext_ctrls(hdl, cs, true);
+ return try_set_ext_ctrls(fh, hdl, cs, true);
}
EXPORT_SYMBOL(v4l2_s_ext_ctrls);
int v4l2_subdev_try_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
{
- return try_set_ext_ctrls(sd->ctrl_handler, cs, false);
+ return try_set_ext_ctrls(NULL, sd->ctrl_handler, cs, false);
}
EXPORT_SYMBOL(v4l2_subdev_try_ext_ctrls);
int v4l2_subdev_s_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
{
- return try_set_ext_ctrls(sd->ctrl_handler, cs, true);
+ return try_set_ext_ctrls(NULL, sd->ctrl_handler, cs, true);
}
EXPORT_SYMBOL(v4l2_subdev_s_ext_ctrls);
/* Helper function for VIDIOC_S_CTRL compatibility */
-static int set_ctrl(struct v4l2_ctrl *ctrl, s32 *val)
+static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, s32 *val)
{
struct v4l2_ctrl *master = ctrl->cluster[0];
int ret;
int i;
- if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
- return -EACCES;
+ ret = validate_new_int(ctrl, val);
+ if (ret)
+ return ret;
v4l2_ctrl_lock(ctrl);
@@ -1841,28 +2237,30 @@ static int set_ctrl(struct v4l2_ctrl *ctrl, s32 *val)
ctrl->val = *val;
ctrl->is_new = 1;
- ret = try_or_set_control_cluster(master, false);
- if (!ret)
- ret = try_or_set_control_cluster(master, true);
+ ret = try_or_set_cluster(fh, master, true);
*val = ctrl->cur.val;
v4l2_ctrl_unlock(ctrl);
return ret;
}
-int v4l2_s_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control)
+int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
+ struct v4l2_control *control)
{
struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
if (ctrl == NULL || !type_is_int(ctrl))
return -EINVAL;
- return set_ctrl(ctrl, &control->value);
+ if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
+ return -EACCES;
+
+ return set_ctrl(fh, ctrl, &control->value);
}
EXPORT_SYMBOL(v4l2_s_ctrl);
int v4l2_subdev_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *control)
{
- return v4l2_s_ctrl(sd->ctrl_handler, control);
+ return v4l2_s_ctrl(NULL, sd->ctrl_handler, control);
}
EXPORT_SYMBOL(v4l2_subdev_s_ctrl);
@@ -1870,6 +2268,34 @@ int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
{
/* It's a driver bug if this happens. */
WARN_ON(!type_is_int(ctrl));
- return set_ctrl(ctrl, &val);
+ return set_ctrl(NULL, ctrl, &val);
}
EXPORT_SYMBOL(v4l2_ctrl_s_ctrl);
+
+void v4l2_ctrl_add_event(struct v4l2_ctrl *ctrl,
+ struct v4l2_subscribed_event *sev)
+{
+ v4l2_ctrl_lock(ctrl);
+ list_add_tail(&sev->node, &ctrl->ev_subs);
+ if (ctrl->type != V4L2_CTRL_TYPE_CTRL_CLASS &&
+ (sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL)) {
+ struct v4l2_event ev;
+ u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
+
+ if (!(ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY))
+ changes |= V4L2_EVENT_CTRL_CH_VALUE;
+ fill_event(&ev, ctrl, changes);
+ v4l2_event_queue_fh(sev->fh, &ev);
+ }
+ v4l2_ctrl_unlock(ctrl);
+}
+EXPORT_SYMBOL(v4l2_ctrl_add_event);
+
+void v4l2_ctrl_del_event(struct v4l2_ctrl *ctrl,
+ struct v4l2_subscribed_event *sev)
+{
+ v4l2_ctrl_lock(ctrl);
+ list_del(&sev->node);
+ v4l2_ctrl_unlock(ctrl);
+}
+EXPORT_SYMBOL(v4l2_ctrl_del_event);
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index 4aae501..c72856c 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -209,6 +209,7 @@ int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
vdev->v4l2_dev = v4l2_dev;
vdev->fops = &v4l2_subdev_fops;
vdev->release = video_device_release_empty;
+ vdev->ctrl_handler = sd->ctrl_handler;
err = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1,
sd->owner);
if (err < 0)
diff --git a/drivers/media/video/v4l2-event.c b/drivers/media/video/v4l2-event.c
index 69fd343..53b190c 100644
--- a/drivers/media/video/v4l2-event.c
+++ b/drivers/media/video/v4l2-event.c
@@ -25,100 +25,39 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
+#include <media/v4l2-ctrls.h>
#include <linux/sched.h>
#include <linux/slab.h>
-int v4l2_event_init(struct v4l2_fh *fh)
+static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
{
- fh->events = kzalloc(sizeof(*fh->events), GFP_KERNEL);
- if (fh->events == NULL)
- return -ENOMEM;
-
- init_waitqueue_head(&fh->events->wait);
-
- INIT_LIST_HEAD(&fh->events->free);
- INIT_LIST_HEAD(&fh->events->available);
- INIT_LIST_HEAD(&fh->events->subscribed);
-
- fh->events->sequence = -1;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(v4l2_event_init);
-
-int v4l2_event_alloc(struct v4l2_fh *fh, unsigned int n)
-{
- struct v4l2_events *events = fh->events;
- unsigned long flags;
-
- if (!events) {
- WARN_ON(1);
- return -ENOMEM;
- }
-
- while (events->nallocated < n) {
- struct v4l2_kevent *kev;
-
- kev = kzalloc(sizeof(*kev), GFP_KERNEL);
- if (kev == NULL)
- return -ENOMEM;
-
- spin_lock_irqsave(&fh->vdev->fh_lock, flags);
- list_add_tail(&kev->list, &events->free);
- events->nallocated++;
- spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(v4l2_event_alloc);
-
-#define list_kfree(list, type, member) \
- while (!list_empty(list)) { \
- type *hi; \
- hi = list_first_entry(list, type, member); \
- list_del(&hi->member); \
- kfree(hi); \
- }
-
-void v4l2_event_free(struct v4l2_fh *fh)
-{
- struct v4l2_events *events = fh->events;
-
- if (!events)
- return;
-
- list_kfree(&events->free, struct v4l2_kevent, list);
- list_kfree(&events->available, struct v4l2_kevent, list);
- list_kfree(&events->subscribed, struct v4l2_subscribed_event, list);
-
- kfree(events);
- fh->events = NULL;
+ idx += sev->first;
+ return idx >= sev->elems ? idx - sev->elems : idx;
}
-EXPORT_SYMBOL_GPL(v4l2_event_free);
static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
{
- struct v4l2_events *events = fh->events;
struct v4l2_kevent *kev;
unsigned long flags;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
- if (list_empty(&events->available)) {
+ if (list_empty(&fh->available)) {
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
return -ENOENT;
}
- WARN_ON(events->navailable == 0);
+ WARN_ON(fh->navailable == 0);
- kev = list_first_entry(&events->available, struct v4l2_kevent, list);
- list_move(&kev->list, &events->free);
- events->navailable--;
+ kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
+ list_del(&kev->list);
+ fh->navailable--;
- kev->event.pending = events->navailable;
+ kev->event.pending = fh->navailable;
*event = kev->event;
+ kev->sev->first = sev_pos(kev->sev, 1);
+ kev->sev->in_use--;
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
@@ -128,7 +67,6 @@ static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
int nonblocking)
{
- struct v4l2_events *events = fh->events;
int ret;
if (nonblocking)
@@ -139,8 +77,8 @@ int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
mutex_unlock(fh->vdev->lock);
do {
- ret = wait_event_interruptible(events->wait,
- events->navailable != 0);
+ ret = wait_event_interruptible(fh->wait,
+ fh->navailable != 0);
if (ret < 0)
break;
@@ -154,23 +92,72 @@ int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
}
EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
-/* Caller must hold fh->event->lock! */
+/* Caller must hold fh->vdev->fh_lock! */
static struct v4l2_subscribed_event *v4l2_event_subscribed(
- struct v4l2_fh *fh, u32 type)
+ struct v4l2_fh *fh, u32 type, u32 id)
{
- struct v4l2_events *events = fh->events;
struct v4l2_subscribed_event *sev;
assert_spin_locked(&fh->vdev->fh_lock);
- list_for_each_entry(sev, &events->subscribed, list) {
- if (sev->type == type)
+ list_for_each_entry(sev, &fh->subscribed, list)
+ if (sev->type == type && sev->id == id)
return sev;
- }
return NULL;
}
+static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
+ const struct timespec *ts)
+{
+ struct v4l2_subscribed_event *sev;
+ struct v4l2_kevent *kev;
+ bool copy_payload = true;
+
+ /* Are we subscribed? */
+ sev = v4l2_event_subscribed(fh, ev->type, ev->id);
+ if (sev == NULL)
+ return;
+
+ /* Increase event sequence number on fh. */
+ fh->sequence++;
+
+ /* Do we have any free events? */
+ if (sev->in_use == sev->elems) {
+ /* no, remove the oldest one */
+ kev = sev->events + sev_pos(sev, 0);
+ list_del(&kev->list);
+ sev->in_use--;
+ sev->first = sev_pos(sev, 1);
+ fh->navailable--;
+ if (sev->elems == 1) {
+ if (sev->replace) {
+ sev->replace(&kev->event, ev);
+ copy_payload = false;
+ }
+ } else if (sev->merge) {
+ struct v4l2_kevent *second_oldest =
+ sev->events + sev_pos(sev, 0);
+ sev->merge(&kev->event, &second_oldest->event);
+ }
+ }
+
+ /* Take one and fill it. */
+ kev = sev->events + sev_pos(sev, sev->in_use);
+ kev->event.type = ev->type;
+ if (copy_payload)
+ kev->event.u = ev->u;
+ kev->event.id = ev->id;
+ kev->event.timestamp = *ts;
+ kev->event.sequence = fh->sequence;
+ sev->in_use++;
+ list_add_tail(&kev->list, &fh->available);
+
+ fh->navailable++;
+
+ wake_up_all(&fh->wait);
+}
+
void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
{
struct v4l2_fh *fh;
@@ -181,81 +168,95 @@ void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
spin_lock_irqsave(&vdev->fh_lock, flags);
- list_for_each_entry(fh, &vdev->fh_list, list) {
- struct v4l2_events *events = fh->events;
- struct v4l2_kevent *kev;
+ list_for_each_entry(fh, &vdev->fh_list, list)
+ __v4l2_event_queue_fh(fh, ev, &timestamp);
- /* Are we subscribed? */
- if (!v4l2_event_subscribed(fh, ev->type))
- continue;
+ spin_unlock_irqrestore(&vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_queue);
- /* Increase event sequence number on fh. */
- events->sequence++;
+void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
+{
+ unsigned long flags;
+ struct timespec timestamp;
- /* Do we have any free events? */
- if (list_empty(&events->free))
- continue;
+ ktime_get_ts(&timestamp);
- /* Take one and fill it. */
- kev = list_first_entry(&events->free, struct v4l2_kevent, list);
- kev->event.type = ev->type;
- kev->event.u = ev->u;
- kev->event.timestamp = timestamp;
- kev->event.sequence = events->sequence;
- list_move_tail(&kev->list, &events->available);
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ __v4l2_event_queue_fh(fh, ev, &timestamp);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
- events->navailable++;
+int v4l2_event_pending(struct v4l2_fh *fh)
+{
+ return fh->navailable;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_pending);
- wake_up_all(&events->wait);
- }
+static void ctrls_replace(struct v4l2_event *old, const struct v4l2_event *new)
+{
+ u32 old_changes = old->u.ctrl.changes;
- spin_unlock_irqrestore(&vdev->fh_lock, flags);
+ old->u.ctrl = new->u.ctrl;
+ old->u.ctrl.changes |= old_changes;
}
-EXPORT_SYMBOL_GPL(v4l2_event_queue);
-int v4l2_event_pending(struct v4l2_fh *fh)
+static void ctrls_merge(const struct v4l2_event *old, struct v4l2_event *new)
{
- return fh->events->navailable;
+ new->u.ctrl.changes |= old->u.ctrl.changes;
}
-EXPORT_SYMBOL_GPL(v4l2_event_pending);
int v4l2_event_subscribe(struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub)
+ struct v4l2_event_subscription *sub, unsigned elems)
{
- struct v4l2_events *events = fh->events;
- struct v4l2_subscribed_event *sev;
+ struct v4l2_subscribed_event *sev, *found_ev;
+ struct v4l2_ctrl *ctrl = NULL;
unsigned long flags;
-
- if (fh->events == NULL) {
- WARN_ON(1);
- return -ENOMEM;
+ unsigned i;
+
+ if (elems < 1)
+ elems = 1;
+ if (sub->type == V4L2_EVENT_CTRL) {
+ ctrl = v4l2_ctrl_find(fh->ctrl_handler, sub->id);
+ if (ctrl == NULL)
+ return -EINVAL;
}
- sev = kmalloc(sizeof(*sev), GFP_KERNEL);
+ sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
if (!sev)
return -ENOMEM;
-
- spin_lock_irqsave(&fh->vdev->fh_lock, flags);
-
- if (v4l2_event_subscribed(fh, sub->type) == NULL) {
- INIT_LIST_HEAD(&sev->list);
- sev->type = sub->type;
-
- list_add(&sev->list, &events->subscribed);
- sev = NULL;
+ for (i = 0; i < elems; i++)
+ sev->events[i].sev = sev;
+ sev->type = sub->type;
+ sev->id = sub->id;
+ sev->flags = sub->flags;
+ sev->fh = fh;
+ sev->elems = elems;
+ if (ctrl) {
+ sev->replace = ctrls_replace;
+ sev->merge = ctrls_merge;
}
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
+ if (!found_ev)
+ list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
- kfree(sev);
+ /* v4l2_ctrl_add_event uses a mutex, so do this outside the spin lock */
+ if (found_ev)
+ kfree(sev);
+ else if (ctrl)
+ v4l2_ctrl_add_event(ctrl, sev);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
-static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
+void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
{
- struct v4l2_events *events = fh->events;
+ struct v4l2_event_subscription sub;
struct v4l2_subscribed_event *sev;
unsigned long flags;
@@ -263,15 +264,18 @@ static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
sev = NULL;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
- if (!list_empty(&events->subscribed)) {
- sev = list_first_entry(&events->subscribed,
- struct v4l2_subscribed_event, list);
- list_del(&sev->list);
+ if (!list_empty(&fh->subscribed)) {
+ sev = list_first_entry(&fh->subscribed,
+ struct v4l2_subscribed_event, list);
+ sub.type = sev->type;
+ sub.id = sev->id;
}
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
- kfree(sev);
+ if (sev)
+ v4l2_event_unsubscribe(fh, &sub);
} while (sev);
}
+EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
int v4l2_event_unsubscribe(struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
@@ -286,11 +290,19 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
- sev = v4l2_event_subscribed(fh, sub->type);
- if (sev != NULL)
+ sev = v4l2_event_subscribed(fh, sub->type, sub->id);
+ if (sev != NULL) {
list_del(&sev->list);
+ sev->fh = NULL;
+ }
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ if (sev && sev->type == V4L2_EVENT_CTRL) {
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(fh->ctrl_handler, sev->id);
+
+ if (ctrl)
+ v4l2_ctrl_del_event(ctrl, sev);
+ }
kfree(sev);
diff --git a/drivers/media/video/v4l2-fh.c b/drivers/media/video/v4l2-fh.c
index 717f71e..122822d 100644
--- a/drivers/media/video/v4l2-fh.c
+++ b/drivers/media/video/v4l2-fh.c
@@ -29,23 +29,18 @@
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
-int v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
+void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
{
fh->vdev = vdev;
+ /* Inherit from video_device. May be overridden by the driver. */
+ fh->ctrl_handler = vdev->ctrl_handler;
INIT_LIST_HEAD(&fh->list);
set_bit(V4L2_FL_USES_V4L2_FH, &fh->vdev->flags);
fh->prio = V4L2_PRIORITY_UNSET;
-
- /*
- * fh->events only needs to be initialized if the driver
- * supports the VIDIOC_SUBSCRIBE_EVENT ioctl.
- */
- if (vdev->ioctl_ops && vdev->ioctl_ops->vidioc_subscribe_event)
- return v4l2_event_init(fh);
-
- fh->events = NULL;
-
- return 0;
+ init_waitqueue_head(&fh->wait);
+ INIT_LIST_HEAD(&fh->available);
+ INIT_LIST_HEAD(&fh->subscribed);
+ fh->sequence = -1;
}
EXPORT_SYMBOL_GPL(v4l2_fh_init);
@@ -91,10 +86,8 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
{
if (fh->vdev == NULL)
return;
-
+ v4l2_event_unsubscribe_all(fh);
fh->vdev = NULL;
-
- v4l2_event_free(fh);
}
EXPORT_SYMBOL_GPL(v4l2_fh_exit);
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 69e8c6f..002ce13 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/version.h>
#include <linux/videodev2.h>
@@ -542,12 +543,12 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_fh *vfh = NULL;
struct v4l2_format f_copy;
int use_fh_prio = 0;
- long ret = -EINVAL;
+ long ret = -ENOTTY;
if (ops == NULL) {
printk(KERN_WARNING "videodev: \"%s\" has no ioctl_ops.\n",
vfd->name);
- return -EINVAL;
+ return ret;
}
if ((vfd->debug & V4L2_DEBUG_IOCTL) &&
@@ -605,6 +606,7 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_querycap)
break;
+ cap->version = LINUX_VERSION_CODE;
ret = ops->vidioc_querycap(file, fh, cap);
if (!ret)
dbgarg(cmd, "driver=%s, card=%s, bus=%s, "
@@ -1418,7 +1420,9 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_queryctrl *p = arg;
- if (vfd->ctrl_handler)
+ if (vfh && vfh->ctrl_handler)
+ ret = v4l2_queryctrl(vfh->ctrl_handler, p);
+ else if (vfd->ctrl_handler)
ret = v4l2_queryctrl(vfd->ctrl_handler, p);
else if (ops->vidioc_queryctrl)
ret = ops->vidioc_queryctrl(file, fh, p);
@@ -1438,7 +1442,9 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_control *p = arg;
- if (vfd->ctrl_handler)
+ if (vfh && vfh->ctrl_handler)
+ ret = v4l2_g_ctrl(vfh->ctrl_handler, p);
+ else if (vfd->ctrl_handler)
ret = v4l2_g_ctrl(vfd->ctrl_handler, p);
else if (ops->vidioc_g_ctrl)
ret = ops->vidioc_g_ctrl(file, fh, p);
@@ -1470,14 +1476,18 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_ext_controls ctrls;
struct v4l2_ext_control ctrl;
- if (!vfd->ctrl_handler &&
+ if (!(vfh && vfh->ctrl_handler) && !vfd->ctrl_handler &&
!ops->vidioc_s_ctrl && !ops->vidioc_s_ext_ctrls)
break;
dbgarg(cmd, "id=0x%x, value=%d\n", p->id, p->value);
+ if (vfh && vfh->ctrl_handler) {
+ ret = v4l2_s_ctrl(vfh, vfh->ctrl_handler, p);
+ break;
+ }
if (vfd->ctrl_handler) {
- ret = v4l2_s_ctrl(vfd->ctrl_handler, p);
+ ret = v4l2_s_ctrl(NULL, vfd->ctrl_handler, p);
break;
}
if (ops->vidioc_s_ctrl) {
@@ -1501,7 +1511,9 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_ext_controls *p = arg;
p->error_idx = p->count;
- if (vfd->ctrl_handler)
+ if (vfh && vfh->ctrl_handler)
+ ret = v4l2_g_ext_ctrls(vfh->ctrl_handler, p);
+ else if (vfd->ctrl_handler)
ret = v4l2_g_ext_ctrls(vfd->ctrl_handler, p);
else if (ops->vidioc_g_ext_ctrls && check_ext_ctrls(p, 0))
ret = ops->vidioc_g_ext_ctrls(file, fh, p);
@@ -1515,11 +1527,14 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_ext_controls *p = arg;
p->error_idx = p->count;
- if (!vfd->ctrl_handler && !ops->vidioc_s_ext_ctrls)
+ if (!(vfh && vfh->ctrl_handler) && !vfd->ctrl_handler &&
+ !ops->vidioc_s_ext_ctrls)
break;
v4l_print_ext_ctrls(cmd, vfd, p, 1);
- if (vfd->ctrl_handler)
- ret = v4l2_s_ext_ctrls(vfd->ctrl_handler, p);
+ if (vfh && vfh->ctrl_handler)
+ ret = v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, p);
+ else if (vfd->ctrl_handler)
+ ret = v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, p);
else if (check_ext_ctrls(p, 0))
ret = ops->vidioc_s_ext_ctrls(file, fh, p);
break;
@@ -1529,10 +1544,13 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_ext_controls *p = arg;
p->error_idx = p->count;
- if (!vfd->ctrl_handler && !ops->vidioc_try_ext_ctrls)
+ if (!(vfh && vfh->ctrl_handler) && !vfd->ctrl_handler &&
+ !ops->vidioc_try_ext_ctrls)
break;
v4l_print_ext_ctrls(cmd, vfd, p, 1);
- if (vfd->ctrl_handler)
+ if (vfh && vfh->ctrl_handler)
+ ret = v4l2_try_ext_ctrls(vfh->ctrl_handler, p);
+ else if (vfd->ctrl_handler)
ret = v4l2_try_ext_ctrls(vfd->ctrl_handler, p);
else if (check_ext_ctrls(p, 0))
ret = ops->vidioc_try_ext_ctrls(file, fh, p);
@@ -1542,7 +1560,9 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_querymenu *p = arg;
- if (vfd->ctrl_handler)
+ if (vfh && vfh->ctrl_handler)
+ ret = v4l2_querymenu(vfh->ctrl_handler, p);
+ else if (vfd->ctrl_handler)
ret = v4l2_querymenu(vfd->ctrl_handler, p);
else if (ops->vidioc_querymenu)
ret = ops->vidioc_querymenu(file, fh, p);
@@ -2276,7 +2296,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
break;
}
*user_ptr = (void __user *)buf->m.planes;
- *kernel_ptr = (void **)&buf->m.planes;
+ *kernel_ptr = (void *)&buf->m.planes;
*array_size = sizeof(struct v4l2_plane) * buf->length;
ret = 1;
}
@@ -2290,7 +2310,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
if (ctrls->count != 0) {
*user_ptr = (void __user *)ctrls->controls;
- *kernel_ptr = (void **)&ctrls->controls;
+ *kernel_ptr = (void *)&ctrls->controls;
*array_size = sizeof(struct v4l2_ext_control)
* ctrls->count;
ret = 1;
diff --git a/drivers/media/video/v4l2-subdev.c b/drivers/media/video/v4l2-subdev.c
index 812729e..b7967c9 100644
--- a/drivers/media/video/v4l2-subdev.c
+++ b/drivers/media/video/v4l2-subdev.c
@@ -75,20 +75,7 @@ static int subdev_open(struct file *file)
return ret;
}
- ret = v4l2_fh_init(&subdev_fh->vfh, vdev);
- if (ret)
- goto err;
-
- if (sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) {
- ret = v4l2_event_init(&subdev_fh->vfh);
- if (ret)
- goto err;
-
- ret = v4l2_event_alloc(&subdev_fh->vfh, sd->nevents);
- if (ret)
- goto err;
- }
-
+ v4l2_fh_init(&subdev_fh->vfh, vdev);
v4l2_fh_add(&subdev_fh->vfh);
file->private_data = &subdev_fh->vfh;
#if defined(CONFIG_MEDIA_CONTROLLER)
@@ -155,25 +142,25 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
switch (cmd) {
case VIDIOC_QUERYCTRL:
- return v4l2_queryctrl(sd->ctrl_handler, arg);
+ return v4l2_queryctrl(vfh->ctrl_handler, arg);
case VIDIOC_QUERYMENU:
- return v4l2_querymenu(sd->ctrl_handler, arg);
+ return v4l2_querymenu(vfh->ctrl_handler, arg);
case VIDIOC_G_CTRL:
- return v4l2_g_ctrl(sd->ctrl_handler, arg);
+ return v4l2_g_ctrl(vfh->ctrl_handler, arg);
case VIDIOC_S_CTRL:
- return v4l2_s_ctrl(sd->ctrl_handler, arg);
+ return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg);
case VIDIOC_G_EXT_CTRLS:
- return v4l2_g_ext_ctrls(sd->ctrl_handler, arg);
+ return v4l2_g_ext_ctrls(vfh->ctrl_handler, arg);
case VIDIOC_S_EXT_CTRLS:
- return v4l2_s_ext_ctrls(sd->ctrl_handler, arg);
+ return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, arg);
case VIDIOC_TRY_EXT_CTRLS:
- return v4l2_try_ext_ctrls(sd->ctrl_handler, arg);
+ return v4l2_try_ext_ctrls(vfh->ctrl_handler, arg);
case VIDIOC_DQEVENT:
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
@@ -297,7 +284,7 @@ static unsigned int subdev_poll(struct file *file, poll_table *wait)
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
return POLLERR;
- poll_wait(file, &fh->events->wait, wait);
+ poll_wait(file, &fh->wait, wait);
if (v4l2_event_pending(fh))
return POLLPRI;
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index ddb8f4b..f300dea 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -108,8 +108,9 @@ static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
if (PageHighMem(pages[0]))
/* DMA to highmem pages might not work */
goto highmem;
- sg_set_page(&sglist[0], pages[0], PAGE_SIZE - offset, offset);
- size -= PAGE_SIZE - offset;
+ sg_set_page(&sglist[0], pages[0],
+ min_t(size_t, PAGE_SIZE - offset, size), offset);
+ size -= min_t(size_t, PAGE_SIZE - offset, size);
for (i = 1; i < nr_pages; i++) {
if (NULL == pages[i])
goto nopage;
diff --git a/drivers/media/video/videobuf2-dma-sg.c b/drivers/media/video/videobuf2-dma-sg.c
index 10a20d9..065f468 100644
--- a/drivers/media/video/videobuf2-dma-sg.c
+++ b/drivers/media/video/videobuf2-dma-sg.c
@@ -48,12 +48,10 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
buf->sg_desc.size = size;
buf->sg_desc.num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- buf->sg_desc.sglist = vmalloc(buf->sg_desc.num_pages *
+ buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages *
sizeof(*buf->sg_desc.sglist));
if (!buf->sg_desc.sglist)
goto fail_sglist_alloc;
- memset(buf->sg_desc.sglist, 0, buf->sg_desc.num_pages *
- sizeof(*buf->sg_desc.sglist));
sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
@@ -136,13 +134,11 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
buf->sg_desc.num_pages = last - first + 1;
- buf->sg_desc.sglist = vmalloc(
+ buf->sg_desc.sglist = vzalloc(
buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
if (!buf->sg_desc.sglist)
goto userptr_fail_sglist_alloc;
- memset(buf->sg_desc.sglist, 0,
- buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
diff --git a/drivers/media/video/videobuf2-memops.c b/drivers/media/video/videobuf2-memops.c
index b03c3ae..569eeb3 100644
--- a/drivers/media/video/videobuf2-memops.c
+++ b/drivers/media/video/videobuf2-memops.c
@@ -176,7 +176,7 @@ int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
vma->vm_ops->open(vma);
- printk(KERN_DEBUG "%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
+ pr_debug("%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
__func__, paddr, vma->vm_start, size);
return 0;
@@ -194,7 +194,7 @@ static void vb2_common_vm_open(struct vm_area_struct *vma)
{
struct vb2_vmarea_handler *h = vma->vm_private_data;
- printk(KERN_DEBUG "%s: %p, refcount: %d, vma: %08lx-%08lx\n",
+ pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
__func__, h, atomic_read(h->refcount), vma->vm_start,
vma->vm_end);
@@ -212,7 +212,7 @@ static void vb2_common_vm_close(struct vm_area_struct *vma)
{
struct vb2_vmarea_handler *h = vma->vm_private_data;
- printk(KERN_DEBUG "%s: %p, refcount: %d, vma: %08lx-%08lx\n",
+ pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
__func__, h, atomic_read(h->refcount), vma->vm_start,
vma->vm_end);
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index d63e9d9..52a0a37 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -36,7 +36,6 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/time.h>
-#include <linux/version.h>
#include <linux/kmod.h>
#include <linux/i2c.h>
@@ -61,8 +60,7 @@
// #define VINO_DEBUG
// #define VINO_DEBUG_INT
-#define VINO_MODULE_VERSION "0.0.6"
-#define VINO_VERSION_CODE KERNEL_VERSION(0, 0, 6)
+#define VINO_MODULE_VERSION "0.0.7"
MODULE_DESCRIPTION("SGI VINO Video4Linux2 driver");
MODULE_VERSION(VINO_MODULE_VERSION);
@@ -2934,7 +2932,6 @@ static int vino_querycap(struct file *file, void *__fh,
strcpy(cap->driver, vino_driver_name);
strcpy(cap->card, vino_driver_description);
strcpy(cap->bus_info, vino_bus_name);
- cap->version = VINO_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING;
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index 2238a61..a848bd2 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -22,7 +22,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/font.h>
-#include <linux/version.h>
#include <linux/mutex.h>
#include <linux/videodev2.h>
#include <linux/kthread.h>
@@ -32,6 +31,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-common.h>
#define VIVI_MODULE_NAME "vivi"
@@ -44,15 +44,12 @@
#define MAX_WIDTH 1920
#define MAX_HEIGHT 1200
-#define VIVI_MAJOR_VERSION 0
-#define VIVI_MINOR_VERSION 8
-#define VIVI_RELEASE 0
-#define VIVI_VERSION \
- KERNEL_VERSION(VIVI_MAJOR_VERSION, VIVI_MINOR_VERSION, VIVI_RELEASE)
+#define VIVI_VERSION "0.8.1"
MODULE_DESCRIPTION("Video Technology Magazine Virtual Video Capture Board");
MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol");
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(VIVI_VERSION);
static unsigned video_nr = -1;
module_param(video_nr, uint, 0644);
@@ -167,6 +164,11 @@ struct vivi_dev {
struct v4l2_ctrl *contrast;
struct v4l2_ctrl *saturation;
struct v4l2_ctrl *hue;
+ struct {
+ /* autogain/gain cluster */
+ struct v4l2_ctrl *autogain;
+ struct v4l2_ctrl *gain;
+ };
struct v4l2_ctrl *volume;
struct v4l2_ctrl *button;
struct v4l2_ctrl *boolean;
@@ -174,6 +176,7 @@ struct vivi_dev {
struct v4l2_ctrl *int64;
struct v4l2_ctrl *menu;
struct v4l2_ctrl *string;
+ struct v4l2_ctrl *bitmask;
spinlock_t slock;
struct mutex mutex;
@@ -457,6 +460,7 @@ static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
unsigned ms;
char str[100];
int h, line = 1;
+ s32 gain;
if (!vbuf)
return;
@@ -479,6 +483,7 @@ static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
dev->width, dev->height, dev->input);
gen_text(dev, vbuf, line++ * 16, 16, str);
+ gain = v4l2_ctrl_g_ctrl(dev->gain);
mutex_lock(&dev->ctrl_handler.lock);
snprintf(str, sizeof(str), " brightness %3d, contrast %3d, saturation %3d, hue %d ",
dev->brightness->cur.val,
@@ -486,11 +491,13 @@ static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
dev->saturation->cur.val,
dev->hue->cur.val);
gen_text(dev, vbuf, line++ * 16, 16, str);
- snprintf(str, sizeof(str), " volume %3d ", dev->volume->cur.val);
+ snprintf(str, sizeof(str), " autogain %d, gain %3d, volume %3d ",
+ dev->autogain->cur.val, gain, dev->volume->cur.val);
gen_text(dev, vbuf, line++ * 16, 16, str);
- snprintf(str, sizeof(str), " int32 %d, int64 %lld ",
+ snprintf(str, sizeof(str), " int32 %d, int64 %lld, bitmask %08x ",
dev->int32->cur.val,
- dev->int64->cur.val64);
+ dev->int64->cur.val64,
+ dev->bitmask->cur.val);
gen_text(dev, vbuf, line++ * 16, 16, str);
snprintf(str, sizeof(str), " boolean %d, menu %s, string \"%s\" ",
dev->boolean->cur.val,
@@ -524,11 +531,13 @@ static void vivi_thread_tick(struct vivi_dev *dev)
spin_lock_irqsave(&dev->slock, flags);
if (list_empty(&dma_q->active)) {
dprintk(dev, 1, "No active queue to serve\n");
- goto unlock;
+ spin_unlock_irqrestore(&dev->slock, flags);
+ return;
}
buf = list_entry(dma_q->active.next, struct vivi_buffer, list);
list_del(&buf->list);
+ spin_unlock_irqrestore(&dev->slock, flags);
do_gettimeofday(&buf->vb.v4l2_buf.timestamp);
@@ -538,8 +547,6 @@ static void vivi_thread_tick(struct vivi_dev *dev)
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
dprintk(dev, 2, "[%p/%d] done\n", buf, buf->vb.v4l2_buf.index);
-unlock:
- spin_unlock_irqrestore(&dev->slock, flags);
}
#define frames_to_ms(frames) \
@@ -812,7 +819,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strcpy(cap->driver, "vivi");
strcpy(cap->card, "vivi");
strlcpy(cap->bus_info, dev->v4l2_dev.name, sizeof(cap->bus_info));
- cap->version = VIVI_VERSION;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | \
V4L2_CAP_READWRITE;
return 0;
@@ -975,14 +981,37 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
if (i >= NUM_INPUTS)
return -EINVAL;
+ if (i == dev->input)
+ return 0;
+
dev->input = i;
precalculate_bars(dev);
precalculate_line(dev);
return 0;
}
+static int vidioc_subscribe_event(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_CTRL:
+ return v4l2_event_subscribe(fh, sub, 0);
+ default:
+ return -EINVAL;
+ }
+}
+
/* --- controls ---------------------------------------------- */
+static int vivi_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivi_dev *dev = container_of(ctrl->handler, struct vivi_dev, ctrl_handler);
+
+ if (ctrl == dev->autogain)
+ dev->gain->val = jiffies & 0xff;
+ return 0;
+}
+
static int vivi_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct vivi_dev *dev = container_of(ctrl->handler, struct vivi_dev, ctrl_handler);
@@ -1010,10 +1039,17 @@ static unsigned int
vivi_poll(struct file *file, struct poll_table_struct *wait)
{
struct vivi_dev *dev = video_drvdata(file);
+ struct v4l2_fh *fh = file->private_data;
struct vb2_queue *q = &dev->vb_vidq;
+ unsigned int res;
dprintk(dev, 1, "%s\n", __func__);
- return vb2_poll(q, file, wait);
+ res = vb2_poll(q, file, wait);
+ if (v4l2_event_pending(fh))
+ res |= POLLPRI;
+ else
+ poll_wait(file, &fh->wait, wait);
+ return res;
}
static int vivi_close(struct file *file)
@@ -1045,6 +1081,7 @@ static int vivi_mmap(struct file *file, struct vm_area_struct *vma)
}
static const struct v4l2_ctrl_ops vivi_ctrl_ops = {
+ .g_volatile_ctrl = vivi_g_volatile_ctrl,
.s_ctrl = vivi_s_ctrl,
};
@@ -1117,9 +1154,20 @@ static const struct v4l2_ctrl_config vivi_ctrl_string = {
.step = 1,
};
+static const struct v4l2_ctrl_config vivi_ctrl_bitmask = {
+ .ops = &vivi_ctrl_ops,
+ .id = VIVI_CID_CUSTOM_BASE + 6,
+ .name = "Bitmask",
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .def = 0x80002000,
+ .min = 0,
+ .max = 0x80402010,
+ .step = 0,
+};
+
static const struct v4l2_file_operations vivi_fops = {
.owner = THIS_MODULE,
- .open = v4l2_fh_open,
+ .open = v4l2_fh_open,
.release = vivi_close,
.read = vivi_read,
.poll = vivi_poll,
@@ -1143,6 +1191,8 @@ static const struct v4l2_ioctl_ops vivi_ioctl_ops = {
.vidioc_s_input = vidioc_s_input,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
+ .vidioc_subscribe_event = vidioc_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static struct video_device vivi_template = {
@@ -1213,16 +1263,22 @@ static int __init vivi_create_instance(int inst)
V4L2_CID_SATURATION, 0, 255, 1, 127);
dev->hue = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
V4L2_CID_HUE, -128, 127, 1, 0);
+ dev->autogain = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ dev->gain = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
+ V4L2_CID_GAIN, 0, 255, 1, 100);
dev->button = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_button, NULL);
dev->int32 = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int32, NULL);
dev->int64 = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int64, NULL);
dev->boolean = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_boolean, NULL);
dev->menu = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_menu, NULL);
dev->string = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_string, NULL);
+ dev->bitmask = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_bitmask, NULL);
if (hdl->error) {
ret = hdl->error;
goto unreg_dev;
}
+ v4l2_ctrl_auto_cluster(2, &dev->autogain, 0, true);
dev->v4l2_dev.ctrl_handler = hdl;
/* initialize locks */
@@ -1325,9 +1381,8 @@ static int __init vivi_init(void)
}
printk(KERN_INFO "Video Technology Magazine Virtual Video "
- "Capture Board ver %u.%u.%u successfully loaded.\n",
- (VIVI_VERSION >> 16) & 0xFF, (VIVI_VERSION >> 8) & 0xFF,
- VIVI_VERSION & 0xFF);
+ "Capture Board ver %s successfully loaded.\n",
+ VIVI_VERSION);
/* n_devs will reflect the actual number of allocated devices */
n_devs = i;
diff --git a/drivers/media/video/w9966.c b/drivers/media/video/w9966.c
index fa35639..453dbbd 100644
--- a/drivers/media/video/w9966.c
+++ b/drivers/media/video/w9966.c
@@ -57,7 +57,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <linux/slab.h>
#include <media/v4l2-common.h>
@@ -127,7 +126,7 @@ struct w9966 {
MODULE_AUTHOR("Jakob Kemi <jakob.kemi@post.utfors.se>");
MODULE_DESCRIPTION("Winbond w9966cf WebCam driver (0.32)");
MODULE_LICENSE("GPL");
-
+MODULE_VERSION("0.33.1");
#ifdef MODULE
static const char *pardev[] = {[0 ... W9966_MAXCAMS] = ""};
@@ -568,7 +567,6 @@ static int cam_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, cam->v4l2_dev.name, sizeof(vcap->driver));
strlcpy(vcap->card, W9966_DRIVERNAME, sizeof(vcap->card));
strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
- vcap->version = KERNEL_VERSION(0, 33, 0);
vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
return 0;
}
diff --git a/drivers/media/video/zoran/zoran.h b/drivers/media/video/zoran/zoran.h
index f3f6400..d7166af 100644
--- a/drivers/media/video/zoran/zoran.h
+++ b/drivers/media/video/zoran/zoran.h
@@ -41,10 +41,6 @@ struct zoran_sync {
};
-#define MAJOR_VERSION 0 /* driver major version */
-#define MINOR_VERSION 10 /* driver minor version */
-#define RELEASE_VERSION 0 /* release version */
-
#define ZORAN_NAME "ZORAN" /* name of the device */
#define ZR_DEVNAME(zr) ((zr)->name)
diff --git a/drivers/media/video/zoran/zoran_card.c b/drivers/media/video/zoran/zoran_card.c
index 79b04ac..c3602d6 100644
--- a/drivers/media/video/zoran/zoran_card.c
+++ b/drivers/media/video/zoran/zoran_card.c
@@ -123,9 +123,12 @@ int zr36067_debug = 1;
module_param_named(debug, zr36067_debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-5)");
+#define ZORAN_VERSION "0.10.1"
+
MODULE_DESCRIPTION("Zoran-36057/36067 JPEG codec driver");
MODULE_AUTHOR("Serguei Miridonov");
MODULE_LICENSE("GPL");
+MODULE_VERSION(ZORAN_VERSION);
#define ZR_DEVICE(subven, subdev, data) { \
.vendor = PCI_VENDOR_ID_ZORAN, .device = PCI_DEVICE_ID_ZORAN_36057, \
@@ -1459,8 +1462,8 @@ static int __init zoran_init(void)
{
int res;
- printk(KERN_INFO "Zoran MJPEG board driver version %d.%d.%d\n",
- MAJOR_VERSION, MINOR_VERSION, RELEASE_VERSION);
+ printk(KERN_INFO "Zoran MJPEG board driver version %s\n",
+ ZORAN_VERSION);
/* check the parameters we have been given, adjust if necessary */
if (v4l_nbufs < 2)
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index 2771d81..d4d05d2 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -44,7 +44,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
@@ -1538,8 +1537,6 @@ static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability
strncpy(cap->driver, "zoran", sizeof(cap->driver)-1);
snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
pci_name(zr->pci_dev));
- cap->version = KERNEL_VERSION(MAJOR_VERSION, MINOR_VERSION,
- RELEASE_VERSION);
cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OVERLAY;
return 0;
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index 7dfb01e..c492846 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -29,7 +29,6 @@
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/usb.h>
#include <linux/vmalloc.h>
@@ -42,8 +41,7 @@
/* Version Information */
-#define DRIVER_VERSION "v0.73"
-#define ZR364XX_VERSION_CODE KERNEL_VERSION(0, 7, 3)
+#define DRIVER_VERSION "0.7.4"
#define DRIVER_AUTHOR "Antoine Jacquet, http://royale.zerezo.com/"
#define DRIVER_DESC "Zoran 364xx"
@@ -744,7 +742,6 @@ static int zr364xx_vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->card, cam->udev->product, sizeof(cap->card));
strlcpy(cap->bus_info, dev_name(&cam->udev->dev),
sizeof(cap->bus_info));
- cap->version = ZR364XX_VERSION_CODE;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING;
@@ -1721,3 +1718,4 @@ module_exit(zr364xx_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index a1d4ee6..ce61a57 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -827,7 +827,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
* DID_SOFT_ERROR is set.
*/
if (ioc->bus_type == SPI) {
- if (pScsiReq->CDB[0] == READ_6 ||
+ if ((pScsiReq->CDB[0] == READ_6 && ((pScsiReq->CDB[1] & 0x02) == 0)) ||
pScsiReq->CDB[0] == READ_10 ||
pScsiReq->CDB[0] == READ_12 ||
pScsiReq->CDB[0] == READ_16 ||
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index 74fbe56..c8ed7b6 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -59,7 +59,7 @@
#include <asm/dma.h>
#include <asm/system.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 6ca938a..21574bd 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -171,6 +171,37 @@ config MFD_TPS6586X
This driver can also be built as a module. If so, the module
will be called tps6586x.
+config MFD_TPS65910
+ bool "TPS65910 Power Management chip"
+ depends on I2C=y && GPIOLIB
+ select MFD_CORE
+ select GPIO_TPS65910
+ help
+ if you say yes here you get support for the TPS65910 series of
+ Power Management chips.
+
+config MFD_TPS65912
+ bool
+ depends on GPIOLIB
+
+config MFD_TPS65912_I2C
+ bool "TPS95612 Power Management chip with I2C"
+ select MFD_CORE
+ select MFD_TPS65912
+ depends on I2C=y && GPIOLIB
+ help
+ If you say yes here you get support for the TPS65912 series of
+ PM chips with I2C interface.
+
+config MFD_TPS65912_SPI
+ bool "TPS65912 Power Management chip with SPI"
+ select MFD_CORE
+ select MFD_TPS65912
+ depends on SPI_MASTER && GPIOLIB
+ help
+ If you say yes here you get support for the TPS65912 series of
+ PM chips with SPI interface.
+
config MENELAUS
bool "Texas Instruments TWL92330/Menelaus PM chip"
depends on I2C=y && ARCH_OMAP2
@@ -218,7 +249,7 @@ config TWL4030_POWER
and load scripts controlling which resources are switched off/on
or reset when a sleep, wakeup or warm reset event occurs.
-config TWL4030_CODEC
+config MFD_TWL4030_AUDIO
bool
depends on TWL4030_CORE
select MFD_CORE
@@ -233,6 +264,12 @@ config TWL6030_PWM
Say yes here if you want support for TWL6030 PWM.
This is used to control charging LED brightness.
+config TWL6040_CORE
+ bool
+ depends on TWL4030_CORE && GENERIC_HARDIRQS
+ select MFD_CORE
+ default n
+
config MFD_STMPE
bool "Support STMicroelectronics STMPE"
depends on I2C=y && GENERIC_HARDIRQS
@@ -656,8 +693,9 @@ config MFD_JANZ_CMODIO
CAN and GPIO controllers.
config MFD_JZ4740_ADC
- tristate "Support for the JZ4740 SoC ADC core"
+ bool "Support for the JZ4740 SoC ADC core"
select MFD_CORE
+ select GENERIC_IRQ_CHIP
depends on MACH_JZ4740
help
Say yes here if you want support for the ADC unit in the JZ4740 SoC.
@@ -719,18 +757,19 @@ config MFD_PM8XXX_IRQ
This is required to use certain other PM 8xxx features, such as GPIO
and MPP.
-config MFD_TPS65910
- bool "TPS65910 Power Management chip"
- depends on I2C=y && GPIOLIB
- select MFD_CORE
- select GPIO_TPS65910
- help
- if you say yes here you get support for the TPS65910 series of
- Power Management chips.
-
config TPS65911_COMPARATOR
tristate
+config MFD_AAT2870_CORE
+ bool "Support for the AnalogicTech AAT2870"
+ select MFD_CORE
+ depends on I2C=y && GPIOLIB
+ help
+ If you say yes here you get support for the AAT2870.
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
endif # MFD_SUPPORT
menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index d7d47d2..c580203 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o
obj-$(CONFIG_MFD_WM8400) += wm8400-core.o
wm831x-objs := wm831x-core.o wm831x-irq.o wm831x-otp.o
+wm831x-objs += wm831x-auxadc.o
obj-$(CONFIG_MFD_WM831X) += wm831x.o
obj-$(CONFIG_MFD_WM831X_I2C) += wm831x-i2c.o
obj-$(CONFIG_MFD_WM831X_SPI) += wm831x-spi.o
@@ -35,13 +36,19 @@ obj-$(CONFIG_MFD_WM8994) += wm8994-core.o wm8994-irq.o
obj-$(CONFIG_TPS6105X) += tps6105x.o
obj-$(CONFIG_TPS65010) += tps65010.o
obj-$(CONFIG_TPS6507X) += tps6507x.o
+obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o
+tps65912-objs := tps65912-core.o tps65912-irq.o
+obj-$(CONFIG_MFD_TPS65912) += tps65912.o
+obj-$(CONFIG_MFD_TPS65912_I2C) += tps65912-i2c.o
+obj-$(CONFIG_MFD_TPS65912_SPI) += tps65912-spi.o
obj-$(CONFIG_MENELAUS) += menelaus.o
obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
-obj-$(CONFIG_TWL4030_CODEC) += twl4030-codec.o
+obj-$(CONFIG_MFD_TWL4030_AUDIO) += twl4030-audio.o
obj-$(CONFIG_TWL6030_PWM) += twl6030-pwm.o
+obj-$(CONFIG_TWL6040_CORE) += twl6040-core.o twl6040-irq.o
obj-$(CONFIG_MFD_MC13XXX) += mc13xxx-core.o
@@ -93,5 +100,5 @@ obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o
obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o
obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o
obj-$(CONFIG_MFD_PM8XXX_IRQ) += pm8xxx-irq.o
-obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o
obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
+obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
new file mode 100644
index 0000000..345dc65
--- /dev/null
+++ b/drivers/mfd/aat2870-core.c
@@ -0,0 +1,535 @@
+/*
+ * linux/drivers/mfd/aat2870-core.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ * Author: Jin Park <jinyoungp@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/aat2870.h>
+#include <linux/regulator/machine.h>
+
+static struct aat2870_register aat2870_regs[AAT2870_REG_NUM] = {
+ /* readable, writeable, value */
+ { 0, 1, 0x00 }, /* 0x00 AAT2870_BL_CH_EN */
+ { 0, 1, 0x16 }, /* 0x01 AAT2870_BLM */
+ { 0, 1, 0x16 }, /* 0x02 AAT2870_BLS */
+ { 0, 1, 0x56 }, /* 0x03 AAT2870_BL1 */
+ { 0, 1, 0x56 }, /* 0x04 AAT2870_BL2 */
+ { 0, 1, 0x56 }, /* 0x05 AAT2870_BL3 */
+ { 0, 1, 0x56 }, /* 0x06 AAT2870_BL4 */
+ { 0, 1, 0x56 }, /* 0x07 AAT2870_BL5 */
+ { 0, 1, 0x56 }, /* 0x08 AAT2870_BL6 */
+ { 0, 1, 0x56 }, /* 0x09 AAT2870_BL7 */
+ { 0, 1, 0x56 }, /* 0x0A AAT2870_BL8 */
+ { 0, 1, 0x00 }, /* 0x0B AAT2870_FLR */
+ { 0, 1, 0x03 }, /* 0x0C AAT2870_FM */
+ { 0, 1, 0x03 }, /* 0x0D AAT2870_FS */
+ { 0, 1, 0x10 }, /* 0x0E AAT2870_ALS_CFG0 */
+ { 0, 1, 0x06 }, /* 0x0F AAT2870_ALS_CFG1 */
+ { 0, 1, 0x00 }, /* 0x10 AAT2870_ALS_CFG2 */
+ { 1, 0, 0x00 }, /* 0x11 AAT2870_AMB */
+ { 0, 1, 0x00 }, /* 0x12 AAT2870_ALS0 */
+ { 0, 1, 0x00 }, /* 0x13 AAT2870_ALS1 */
+ { 0, 1, 0x00 }, /* 0x14 AAT2870_ALS2 */
+ { 0, 1, 0x00 }, /* 0x15 AAT2870_ALS3 */
+ { 0, 1, 0x00 }, /* 0x16 AAT2870_ALS4 */
+ { 0, 1, 0x00 }, /* 0x17 AAT2870_ALS5 */
+ { 0, 1, 0x00 }, /* 0x18 AAT2870_ALS6 */
+ { 0, 1, 0x00 }, /* 0x19 AAT2870_ALS7 */
+ { 0, 1, 0x00 }, /* 0x1A AAT2870_ALS8 */
+ { 0, 1, 0x00 }, /* 0x1B AAT2870_ALS9 */
+ { 0, 1, 0x00 }, /* 0x1C AAT2870_ALSA */
+ { 0, 1, 0x00 }, /* 0x1D AAT2870_ALSB */
+ { 0, 1, 0x00 }, /* 0x1E AAT2870_ALSC */
+ { 0, 1, 0x00 }, /* 0x1F AAT2870_ALSD */
+ { 0, 1, 0x00 }, /* 0x20 AAT2870_ALSE */
+ { 0, 1, 0x00 }, /* 0x21 AAT2870_ALSF */
+ { 0, 1, 0x00 }, /* 0x22 AAT2870_SUB_SET */
+ { 0, 1, 0x00 }, /* 0x23 AAT2870_SUB_CTRL */
+ { 0, 1, 0x00 }, /* 0x24 AAT2870_LDO_AB */
+ { 0, 1, 0x00 }, /* 0x25 AAT2870_LDO_CD */
+ { 0, 1, 0x00 }, /* 0x26 AAT2870_LDO_EN */
+};
+
+static struct mfd_cell aat2870_devs[] = {
+ {
+ .name = "aat2870-backlight",
+ .id = AAT2870_ID_BL,
+ .pdata_size = sizeof(struct aat2870_bl_platform_data),
+ },
+ {
+ .name = "aat2870-regulator",
+ .id = AAT2870_ID_LDOA,
+ .pdata_size = sizeof(struct regulator_init_data),
+ },
+ {
+ .name = "aat2870-regulator",
+ .id = AAT2870_ID_LDOB,
+ .pdata_size = sizeof(struct regulator_init_data),
+ },
+ {
+ .name = "aat2870-regulator",
+ .id = AAT2870_ID_LDOC,
+ .pdata_size = sizeof(struct regulator_init_data),
+ },
+ {
+ .name = "aat2870-regulator",
+ .id = AAT2870_ID_LDOD,
+ .pdata_size = sizeof(struct regulator_init_data),
+ },
+};
+
+static int __aat2870_read(struct aat2870_data *aat2870, u8 addr, u8 *val)
+{
+ int ret;
+
+ if (addr >= AAT2870_REG_NUM) {
+ dev_err(aat2870->dev, "Invalid address, 0x%02x\n", addr);
+ return -EINVAL;
+ }
+
+ if (!aat2870->reg_cache[addr].readable) {
+ *val = aat2870->reg_cache[addr].value;
+ goto out;
+ }
+
+ ret = i2c_master_send(aat2870->client, &addr, 1);
+ if (ret < 0)
+ return ret;
+ if (ret != 1)
+ return -EIO;
+
+ ret = i2c_master_recv(aat2870->client, val, 1);
+ if (ret < 0)
+ return ret;
+ if (ret != 1)
+ return -EIO;
+
+out:
+ dev_dbg(aat2870->dev, "read: addr=0x%02x, val=0x%02x\n", addr, *val);
+ return 0;
+}
+
+static int __aat2870_write(struct aat2870_data *aat2870, u8 addr, u8 val)
+{
+ u8 msg[2];
+ int ret;
+
+ if (addr >= AAT2870_REG_NUM) {
+ dev_err(aat2870->dev, "Invalid address, 0x%02x\n", addr);
+ return -EINVAL;
+ }
+
+ if (!aat2870->reg_cache[addr].writeable) {
+ dev_err(aat2870->dev, "Address 0x%02x is not writeable\n",
+ addr);
+ return -EINVAL;
+ }
+
+ msg[0] = addr;
+ msg[1] = val;
+ ret = i2c_master_send(aat2870->client, msg, 2);
+ if (ret < 0)
+ return ret;
+ if (ret != 2)
+ return -EIO;
+
+ aat2870->reg_cache[addr].value = val;
+
+ dev_dbg(aat2870->dev, "write: addr=0x%02x, val=0x%02x\n", addr, val);
+ return 0;
+}
+
+static int aat2870_read(struct aat2870_data *aat2870, u8 addr, u8 *val)
+{
+ int ret;
+
+ mutex_lock(&aat2870->io_lock);
+ ret = __aat2870_read(aat2870, addr, val);
+ mutex_unlock(&aat2870->io_lock);
+
+ return ret;
+}
+
+static int aat2870_write(struct aat2870_data *aat2870, u8 addr, u8 val)
+{
+ int ret;
+
+ mutex_lock(&aat2870->io_lock);
+ ret = __aat2870_write(aat2870, addr, val);
+ mutex_unlock(&aat2870->io_lock);
+
+ return ret;
+}
+
+static int aat2870_update(struct aat2870_data *aat2870, u8 addr, u8 mask,
+ u8 val)
+{
+ int change;
+ u8 old_val, new_val;
+ int ret;
+
+ mutex_lock(&aat2870->io_lock);
+
+ ret = __aat2870_read(aat2870, addr, &old_val);
+ if (ret)
+ goto out_unlock;
+
+ new_val = (old_val & ~mask) | (val & mask);
+ change = old_val != new_val;
+ if (change)
+ ret = __aat2870_write(aat2870, addr, new_val);
+
+out_unlock:
+ mutex_unlock(&aat2870->io_lock);
+
+ return ret;
+}
+
+static inline void aat2870_enable(struct aat2870_data *aat2870)
+{
+ if (aat2870->en_pin >= 0)
+ gpio_set_value(aat2870->en_pin, 1);
+
+ aat2870->is_enable = 1;
+}
+
+static inline void aat2870_disable(struct aat2870_data *aat2870)
+{
+ if (aat2870->en_pin >= 0)
+ gpio_set_value(aat2870->en_pin, 0);
+
+ aat2870->is_enable = 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t aat2870_dump_reg(struct aat2870_data *aat2870, char *buf)
+{
+ u8 addr, val;
+ ssize_t count = 0;
+ int ret;
+
+ count += sprintf(buf, "aat2870 registers\n");
+ for (addr = 0; addr < AAT2870_REG_NUM; addr++) {
+ count += sprintf(buf + count, "0x%02x: ", addr);
+ if (count >= PAGE_SIZE - 1)
+ break;
+
+ ret = aat2870->read(aat2870, addr, &val);
+ if (ret == 0)
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "0x%02x", val);
+ else
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "<read fail: %d>", ret);
+
+ if (count >= PAGE_SIZE - 1)
+ break;
+
+ count += snprintf(buf + count, PAGE_SIZE - count, "\n");
+ if (count >= PAGE_SIZE - 1)
+ break;
+ }
+
+ /* Truncate count; min() would cause a warning */
+ if (count >= PAGE_SIZE)
+ count = PAGE_SIZE - 1;
+
+ return count;
+}
+
+static int aat2870_reg_open_file(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+static ssize_t aat2870_reg_read_file(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct aat2870_data *aat2870 = file->private_data;
+ char *buf;
+ ssize_t ret;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = aat2870_dump_reg(aat2870, buf);
+ if (ret >= 0)
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t aat2870_reg_write_file(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct aat2870_data *aat2870 = file->private_data;
+ char buf[32];
+ int buf_size;
+ char *start = buf;
+ unsigned long addr, val;
+ int ret;
+
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size)) {
+ dev_err(aat2870->dev, "Failed to copy from user\n");
+ return -EFAULT;
+ }
+ buf[buf_size] = 0;
+
+ while (*start == ' ')
+ start++;
+
+ addr = simple_strtoul(start, &start, 16);
+ if (addr >= AAT2870_REG_NUM) {
+ dev_err(aat2870->dev, "Invalid address, 0x%lx\n", addr);
+ return -EINVAL;
+ }
+
+ while (*start == ' ')
+ start++;
+
+ if (strict_strtoul(start, 16, &val))
+ return -EINVAL;
+
+ ret = aat2870->write(aat2870, (u8)addr, (u8)val);
+ if (ret)
+ return ret;
+
+ return buf_size;
+}
+
+static const struct file_operations aat2870_reg_fops = {
+ .open = aat2870_reg_open_file,
+ .read = aat2870_reg_read_file,
+ .write = aat2870_reg_write_file,
+};
+
+static void aat2870_init_debugfs(struct aat2870_data *aat2870)
+{
+ aat2870->dentry_root = debugfs_create_dir("aat2870", NULL);
+ if (!aat2870->dentry_root) {
+ dev_warn(aat2870->dev,
+ "Failed to create debugfs root directory\n");
+ return;
+ }
+
+ aat2870->dentry_reg = debugfs_create_file("regs", 0644,
+ aat2870->dentry_root,
+ aat2870, &aat2870_reg_fops);
+ if (!aat2870->dentry_reg)
+ dev_warn(aat2870->dev,
+ "Failed to create debugfs register file\n");
+}
+
+static void aat2870_uninit_debugfs(struct aat2870_data *aat2870)
+{
+ debugfs_remove_recursive(aat2870->dentry_root);
+}
+#else
+static inline void aat2870_init_debugfs(struct aat2870_data *aat2870)
+{
+}
+
+static inline void aat2870_uninit_debugfs(struct aat2870_data *aat2870)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int aat2870_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct aat2870_platform_data *pdata = client->dev.platform_data;
+ struct aat2870_data *aat2870;
+ int i, j;
+ int ret = 0;
+
+ aat2870 = kzalloc(sizeof(struct aat2870_data), GFP_KERNEL);
+ if (!aat2870) {
+ dev_err(&client->dev,
+ "Failed to allocate memory for aat2870\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ aat2870->dev = &client->dev;
+ dev_set_drvdata(aat2870->dev, aat2870);
+
+ aat2870->client = client;
+ i2c_set_clientdata(client, aat2870);
+
+ aat2870->reg_cache = aat2870_regs;
+
+ if (pdata->en_pin < 0)
+ aat2870->en_pin = -1;
+ else
+ aat2870->en_pin = pdata->en_pin;
+
+ aat2870->init = pdata->init;
+ aat2870->uninit = pdata->uninit;
+ aat2870->read = aat2870_read;
+ aat2870->write = aat2870_write;
+ aat2870->update = aat2870_update;
+
+ mutex_init(&aat2870->io_lock);
+
+ if (aat2870->init)
+ aat2870->init(aat2870);
+
+ if (aat2870->en_pin >= 0) {
+ ret = gpio_request(aat2870->en_pin, "aat2870-en");
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed to request GPIO %d\n", aat2870->en_pin);
+ goto out_kfree;
+ }
+ gpio_direction_output(aat2870->en_pin, 1);
+ }
+
+ aat2870_enable(aat2870);
+
+ for (i = 0; i < pdata->num_subdevs; i++) {
+ for (j = 0; j < ARRAY_SIZE(aat2870_devs); j++) {
+ if ((pdata->subdevs[i].id == aat2870_devs[j].id) &&
+ !strcmp(pdata->subdevs[i].name,
+ aat2870_devs[j].name)) {
+ aat2870_devs[j].platform_data =
+ pdata->subdevs[i].platform_data;
+ break;
+ }
+ }
+ }
+
+ ret = mfd_add_devices(aat2870->dev, 0, aat2870_devs,
+ ARRAY_SIZE(aat2870_devs), NULL, 0);
+ if (ret != 0) {
+ dev_err(aat2870->dev, "Failed to add subdev: %d\n", ret);
+ goto out_disable;
+ }
+
+ aat2870_init_debugfs(aat2870);
+
+ return 0;
+
+out_disable:
+ aat2870_disable(aat2870);
+ if (aat2870->en_pin >= 0)
+ gpio_free(aat2870->en_pin);
+out_kfree:
+ kfree(aat2870);
+out:
+ return ret;
+}
+
+static int aat2870_i2c_remove(struct i2c_client *client)
+{
+ struct aat2870_data *aat2870 = i2c_get_clientdata(client);
+
+ aat2870_uninit_debugfs(aat2870);
+
+ mfd_remove_devices(aat2870->dev);
+ aat2870_disable(aat2870);
+ if (aat2870->en_pin >= 0)
+ gpio_free(aat2870->en_pin);
+ if (aat2870->uninit)
+ aat2870->uninit(aat2870);
+ kfree(aat2870);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int aat2870_i2c_suspend(struct i2c_client *client, pm_message_t state)
+{
+ struct aat2870_data *aat2870 = i2c_get_clientdata(client);
+
+ aat2870_disable(aat2870);
+
+ return 0;
+}
+
+static int aat2870_i2c_resume(struct i2c_client *client)
+{
+ struct aat2870_data *aat2870 = i2c_get_clientdata(client);
+ struct aat2870_register *reg = NULL;
+ int i;
+
+ aat2870_enable(aat2870);
+
+ /* restore registers */
+ for (i = 0; i < AAT2870_REG_NUM; i++) {
+ reg = &aat2870->reg_cache[i];
+ if (reg->writeable)
+ aat2870->write(aat2870, i, reg->value);
+ }
+
+ return 0;
+}
+#else
+#define aat2870_i2c_suspend NULL
+#define aat2870_i2c_resume NULL
+#endif /* CONFIG_PM */
+
+static struct i2c_device_id aat2870_i2c_id_table[] = {
+ { "aat2870", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, aat2870_i2c_id_table);
+
+static struct i2c_driver aat2870_i2c_driver = {
+ .driver = {
+ .name = "aat2870",
+ .owner = THIS_MODULE,
+ },
+ .probe = aat2870_i2c_probe,
+ .remove = aat2870_i2c_remove,
+ .suspend = aat2870_i2c_suspend,
+ .resume = aat2870_i2c_resume,
+ .id_table = aat2870_i2c_id_table,
+};
+
+static int __init aat2870_init(void)
+{
+ return i2c_add_driver(&aat2870_i2c_driver);
+}
+subsys_initcall(aat2870_init);
+
+static void __exit aat2870_exit(void)
+{
+ i2c_del_driver(&aat2870_i2c_driver);
+}
+module_exit(aat2870_exit);
+
+MODULE_DESCRIPTION("Core support for the AnalogicTech AAT2870");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jin Park <jinyoungp@nvidia.com>");
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
index 3d7dce6..56ba194 100644
--- a/drivers/mfd/ab3550-core.c
+++ b/drivers/mfd/ab3550-core.c
@@ -879,20 +879,13 @@ static ssize_t ab3550_bank_write(struct file *file,
size_t count, loff_t *ppos)
{
struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
unsigned long user_bank;
int err;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf) - 1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_bank);
+ err = kstrtoul_from_user(user_buf, count, 0, &user_bank);
if (err)
- return -EINVAL;
+ return err;
if (user_bank >= AB3550_NUM_BANKS) {
dev_err(&ab->i2c_client[0]->dev,
@@ -902,7 +895,7 @@ static ssize_t ab3550_bank_write(struct file *file,
ab->debug_bank = user_bank;
- return buf_size;
+ return count;
}
static int ab3550_address_print(struct seq_file *s, void *p)
@@ -923,27 +916,21 @@ static ssize_t ab3550_address_write(struct file *file,
size_t count, loff_t *ppos)
{
struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
unsigned long user_address;
int err;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf) - 1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_address);
+ err = kstrtoul_from_user(user_buf, count, 0, &user_address);
if (err)
- return -EINVAL;
+ return err;
+
if (user_address > 0xff) {
dev_err(&ab->i2c_client[0]->dev,
"debugfs error input > 0xff\n");
return -EINVAL;
}
ab->debug_address = user_address;
- return buf_size;
+ return count;
}
static int ab3550_val_print(struct seq_file *s, void *p)
@@ -971,21 +958,15 @@ static ssize_t ab3550_val_write(struct file *file,
size_t count, loff_t *ppos)
{
struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
unsigned long user_val;
int err;
u8 regvalue;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf)-1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_val);
+ err = kstrtoul_from_user(user_buf, count, 0, &user_val);
if (err)
- return -EINVAL;
+ return err;
+
if (user_val > 0xff) {
dev_err(&ab->i2c_client[0]->dev,
"debugfs error input > 0xff\n");
@@ -1002,7 +983,7 @@ static ssize_t ab3550_val_write(struct file *file,
if (err)
return -EINVAL;
- return buf_size;
+ return count;
}
static const struct file_operations ab3550_bank_fops = {
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index fc0c1af..387705e 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -363,7 +363,7 @@ static void ab8500_irq_remove(struct ab8500 *ab8500)
}
}
-static struct resource ab8500_gpio_resources[] = {
+static struct resource __devinitdata ab8500_gpio_resources[] = {
{
.name = "GPIO_INT6",
.start = AB8500_INT_GPIO6R,
@@ -372,7 +372,7 @@ static struct resource ab8500_gpio_resources[] = {
}
};
-static struct resource ab8500_gpadc_resources[] = {
+static struct resource __devinitdata ab8500_gpadc_resources[] = {
{
.name = "HW_CONV_END",
.start = AB8500_INT_GP_HW_ADC_CONV_END,
@@ -387,7 +387,7 @@ static struct resource ab8500_gpadc_resources[] = {
},
};
-static struct resource ab8500_rtc_resources[] = {
+static struct resource __devinitdata ab8500_rtc_resources[] = {
{
.name = "60S",
.start = AB8500_INT_RTC_60S,
@@ -402,7 +402,7 @@ static struct resource ab8500_rtc_resources[] = {
},
};
-static struct resource ab8500_poweronkey_db_resources[] = {
+static struct resource __devinitdata ab8500_poweronkey_db_resources[] = {
{
.name = "ONKEY_DBF",
.start = AB8500_INT_PON_KEY1DB_F,
@@ -417,20 +417,47 @@ static struct resource ab8500_poweronkey_db_resources[] = {
},
};
-static struct resource ab8500_bm_resources[] = {
+static struct resource __devinitdata ab8500_av_acc_detect_resources[] = {
{
- .name = "MAIN_EXT_CH_NOT_OK",
- .start = AB8500_INT_MAIN_EXT_CH_NOT_OK,
- .end = AB8500_INT_MAIN_EXT_CH_NOT_OK,
- .flags = IORESOURCE_IRQ,
+ .name = "ACC_DETECT_1DB_F",
+ .start = AB8500_INT_ACC_DETECT_1DB_F,
+ .end = AB8500_INT_ACC_DETECT_1DB_F,
+ .flags = IORESOURCE_IRQ,
},
{
- .name = "BATT_OVV",
- .start = AB8500_INT_BATT_OVV,
- .end = AB8500_INT_BATT_OVV,
- .flags = IORESOURCE_IRQ,
+ .name = "ACC_DETECT_1DB_R",
+ .start = AB8500_INT_ACC_DETECT_1DB_R,
+ .end = AB8500_INT_ACC_DETECT_1DB_R,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "ACC_DETECT_21DB_F",
+ .start = AB8500_INT_ACC_DETECT_21DB_F,
+ .end = AB8500_INT_ACC_DETECT_21DB_F,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "ACC_DETECT_21DB_R",
+ .start = AB8500_INT_ACC_DETECT_21DB_R,
+ .end = AB8500_INT_ACC_DETECT_21DB_R,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "ACC_DETECT_22DB_F",
+ .start = AB8500_INT_ACC_DETECT_22DB_F,
+ .end = AB8500_INT_ACC_DETECT_22DB_F,
+ .flags = IORESOURCE_IRQ,
},
{
+ .name = "ACC_DETECT_22DB_R",
+ .start = AB8500_INT_ACC_DETECT_22DB_R,
+ .end = AB8500_INT_ACC_DETECT_22DB_R,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource __devinitdata ab8500_charger_resources[] = {
+ {
.name = "MAIN_CH_UNPLUG_DET",
.start = AB8500_INT_MAIN_CH_UNPLUG_DET,
.end = AB8500_INT_MAIN_CH_UNPLUG_DET,
@@ -443,27 +470,27 @@ static struct resource ab8500_bm_resources[] = {
.flags = IORESOURCE_IRQ,
},
{
- .name = "VBUS_DET_F",
- .start = AB8500_INT_VBUS_DET_F,
- .end = AB8500_INT_VBUS_DET_F,
- .flags = IORESOURCE_IRQ,
- },
- {
.name = "VBUS_DET_R",
.start = AB8500_INT_VBUS_DET_R,
.end = AB8500_INT_VBUS_DET_R,
.flags = IORESOURCE_IRQ,
},
{
- .name = "BAT_CTRL_INDB",
- .start = AB8500_INT_BAT_CTRL_INDB,
- .end = AB8500_INT_BAT_CTRL_INDB,
+ .name = "VBUS_DET_F",
+ .start = AB8500_INT_VBUS_DET_F,
+ .end = AB8500_INT_VBUS_DET_F,
.flags = IORESOURCE_IRQ,
},
{
- .name = "CH_WD_EXP",
- .start = AB8500_INT_CH_WD_EXP,
- .end = AB8500_INT_CH_WD_EXP,
+ .name = "USB_LINK_STATUS",
+ .start = AB8500_INT_USB_LINK_STATUS,
+ .end = AB8500_INT_USB_LINK_STATUS,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "USB_CHARGE_DET_DONE",
+ .start = AB8500_INT_USB_CHG_DET_DONE,
+ .end = AB8500_INT_USB_CHG_DET_DONE,
.flags = IORESOURCE_IRQ,
},
{
@@ -473,21 +500,60 @@ static struct resource ab8500_bm_resources[] = {
.flags = IORESOURCE_IRQ,
},
{
- .name = "NCONV_ACCU",
- .start = AB8500_INT_CCN_CONV_ACC,
- .end = AB8500_INT_CCN_CONV_ACC,
+ .name = "USB_CH_TH_PROT_R",
+ .start = AB8500_INT_USB_CH_TH_PROT_R,
+ .end = AB8500_INT_USB_CH_TH_PROT_R,
.flags = IORESOURCE_IRQ,
},
{
- .name = "LOW_BAT_F",
- .start = AB8500_INT_LOW_BAT_F,
- .end = AB8500_INT_LOW_BAT_F,
+ .name = "USB_CH_TH_PROT_F",
+ .start = AB8500_INT_USB_CH_TH_PROT_F,
+ .end = AB8500_INT_USB_CH_TH_PROT_F,
.flags = IORESOURCE_IRQ,
},
{
- .name = "LOW_BAT_R",
- .start = AB8500_INT_LOW_BAT_R,
- .end = AB8500_INT_LOW_BAT_R,
+ .name = "MAIN_EXT_CH_NOT_OK",
+ .start = AB8500_INT_MAIN_EXT_CH_NOT_OK,
+ .end = AB8500_INT_MAIN_EXT_CH_NOT_OK,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "MAIN_CH_TH_PROT_R",
+ .start = AB8500_INT_MAIN_CH_TH_PROT_R,
+ .end = AB8500_INT_MAIN_CH_TH_PROT_R,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "MAIN_CH_TH_PROT_F",
+ .start = AB8500_INT_MAIN_CH_TH_PROT_F,
+ .end = AB8500_INT_MAIN_CH_TH_PROT_F,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "USB_CHARGER_NOT_OKR",
+ .start = AB8500_INT_USB_CHARGER_NOT_OK,
+ .end = AB8500_INT_USB_CHARGER_NOT_OK,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "USB_CHARGER_NOT_OKF",
+ .start = AB8500_INT_USB_CHARGER_NOT_OKF,
+ .end = AB8500_INT_USB_CHARGER_NOT_OKF,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "CH_WD_EXP",
+ .start = AB8500_INT_CH_WD_EXP,
+ .end = AB8500_INT_CH_WD_EXP,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource __devinitdata ab8500_btemp_resources[] = {
+ {
+ .name = "BAT_CTRL_INDB",
+ .start = AB8500_INT_BAT_CTRL_INDB,
+ .end = AB8500_INT_BAT_CTRL_INDB,
.flags = IORESOURCE_IRQ,
},
{
@@ -503,38 +569,55 @@ static struct resource ab8500_bm_resources[] = {
.flags = IORESOURCE_IRQ,
},
{
- .name = "USB_CHARGER_NOT_OKR",
- .start = AB8500_INT_USB_CHARGER_NOT_OK,
- .end = AB8500_INT_USB_CHARGER_NOT_OK,
+ .name = "BTEMP_LOW_MEDIUM",
+ .start = AB8500_INT_BTEMP_LOW_MEDIUM,
+ .end = AB8500_INT_BTEMP_LOW_MEDIUM,
.flags = IORESOURCE_IRQ,
},
{
- .name = "USB_CHARGE_DET_DONE",
- .start = AB8500_INT_USB_CHG_DET_DONE,
- .end = AB8500_INT_USB_CHG_DET_DONE,
+ .name = "BTEMP_MEDIUM_HIGH",
+ .start = AB8500_INT_BTEMP_MEDIUM_HIGH,
+ .end = AB8500_INT_BTEMP_MEDIUM_HIGH,
.flags = IORESOURCE_IRQ,
},
+};
+
+static struct resource __devinitdata ab8500_fg_resources[] = {
{
- .name = "USB_CH_TH_PROT_R",
- .start = AB8500_INT_USB_CH_TH_PROT_R,
- .end = AB8500_INT_USB_CH_TH_PROT_R,
+ .name = "NCONV_ACCU",
+ .start = AB8500_INT_CCN_CONV_ACC,
+ .end = AB8500_INT_CCN_CONV_ACC,
.flags = IORESOURCE_IRQ,
},
{
- .name = "MAIN_CH_TH_PROT_R",
- .start = AB8500_INT_MAIN_CH_TH_PROT_R,
- .end = AB8500_INT_MAIN_CH_TH_PROT_R,
+ .name = "BATT_OVV",
+ .start = AB8500_INT_BATT_OVV,
+ .end = AB8500_INT_BATT_OVV,
.flags = IORESOURCE_IRQ,
},
{
- .name = "USB_CHARGER_NOT_OKF",
- .start = AB8500_INT_USB_CHARGER_NOT_OKF,
- .end = AB8500_INT_USB_CHARGER_NOT_OKF,
+ .name = "LOW_BAT_F",
+ .start = AB8500_INT_LOW_BAT_F,
+ .end = AB8500_INT_LOW_BAT_F,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "LOW_BAT_R",
+ .start = AB8500_INT_LOW_BAT_R,
+ .end = AB8500_INT_LOW_BAT_R,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "CC_INT_CALIB",
+ .start = AB8500_INT_CC_INT_CALIB,
+ .end = AB8500_INT_CC_INT_CALIB,
.flags = IORESOURCE_IRQ,
},
};
-static struct resource ab8500_debug_resources[] = {
+static struct resource __devinitdata ab8500_chargalg_resources[] = {};
+
+static struct resource __devinitdata ab8500_debug_resources[] = {
{
.name = "IRQ_FIRST",
.start = AB8500_INT_MAIN_EXT_CH_NOT_OK,
@@ -549,7 +632,7 @@ static struct resource ab8500_debug_resources[] = {
},
};
-static struct resource ab8500_usb_resources[] = {
+static struct resource __devinitdata ab8500_usb_resources[] = {
{
.name = "ID_WAKEUP_R",
.start = AB8500_INT_ID_WAKEUP_R,
@@ -580,9 +663,21 @@ static struct resource ab8500_usb_resources[] = {
.end = AB8500_INT_USB_LINK_STATUS,
.flags = IORESOURCE_IRQ,
},
+ {
+ .name = "USB_ADP_PROBE_PLUG",
+ .start = AB8500_INT_ADP_PROBE_PLUG,
+ .end = AB8500_INT_ADP_PROBE_PLUG,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "USB_ADP_PROBE_UNPLUG",
+ .start = AB8500_INT_ADP_PROBE_UNPLUG,
+ .end = AB8500_INT_ADP_PROBE_UNPLUG,
+ .flags = IORESOURCE_IRQ,
+ },
};
-static struct resource ab8500_temp_resources[] = {
+static struct resource __devinitdata ab8500_temp_resources[] = {
{
.name = "AB8500_TEMP_WARM",
.start = AB8500_INT_TEMP_WARM,
@@ -591,7 +686,7 @@ static struct resource ab8500_temp_resources[] = {
},
};
-static struct mfd_cell ab8500_devs[] = {
+static struct mfd_cell __devinitdata ab8500_devs[] = {
#ifdef CONFIG_DEBUG_FS
{
.name = "ab8500-debug",
@@ -621,11 +716,33 @@ static struct mfd_cell ab8500_devs[] = {
.resources = ab8500_rtc_resources,
},
{
- .name = "ab8500-bm",
- .num_resources = ARRAY_SIZE(ab8500_bm_resources),
- .resources = ab8500_bm_resources,
+ .name = "ab8500-charger",
+ .num_resources = ARRAY_SIZE(ab8500_charger_resources),
+ .resources = ab8500_charger_resources,
+ },
+ {
+ .name = "ab8500-btemp",
+ .num_resources = ARRAY_SIZE(ab8500_btemp_resources),
+ .resources = ab8500_btemp_resources,
+ },
+ {
+ .name = "ab8500-fg",
+ .num_resources = ARRAY_SIZE(ab8500_fg_resources),
+ .resources = ab8500_fg_resources,
+ },
+ {
+ .name = "ab8500-chargalg",
+ .num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
+ .resources = ab8500_chargalg_resources,
+ },
+ {
+ .name = "ab8500-acc-det",
+ .num_resources = ARRAY_SIZE(ab8500_av_acc_detect_resources),
+ .resources = ab8500_av_acc_detect_resources,
+ },
+ {
+ .name = "ab8500-codec",
},
- { .name = "ab8500-codec", },
{
.name = "ab8500-usb",
.num_resources = ARRAY_SIZE(ab8500_usb_resources),
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 64748e4..64bdeeb 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -419,20 +419,13 @@ static ssize_t ab8500_bank_write(struct file *file,
size_t count, loff_t *ppos)
{
struct device *dev = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
unsigned long user_bank;
int err;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf) - 1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_bank);
+ err = kstrtoul_from_user(user_buf, count, 0, &user_bank);
if (err)
- return -EINVAL;
+ return err;
if (user_bank >= AB8500_NUM_BANKS) {
dev_err(dev, "debugfs error input > number of banks\n");
@@ -441,7 +434,7 @@ static ssize_t ab8500_bank_write(struct file *file,
debug_bank = user_bank;
- return buf_size;
+ return count;
}
static int ab8500_address_print(struct seq_file *s, void *p)
@@ -459,26 +452,20 @@ static ssize_t ab8500_address_write(struct file *file,
size_t count, loff_t *ppos)
{
struct device *dev = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
unsigned long user_address;
int err;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf) - 1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_address);
+ err = kstrtoul_from_user(user_buf, count, 0, &user_address);
if (err)
- return -EINVAL;
+ return err;
+
if (user_address > 0xff) {
dev_err(dev, "debugfs error input > 0xff\n");
return -EINVAL;
}
debug_address = user_address;
- return buf_size;
+ return count;
}
static int ab8500_val_print(struct seq_file *s, void *p)
@@ -509,20 +496,14 @@ static ssize_t ab8500_val_write(struct file *file,
size_t count, loff_t *ppos)
{
struct device *dev = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
unsigned long user_val;
int err;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf)-1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_val);
+ err = kstrtoul_from_user(user_buf, count, 0, &user_val);
if (err)
- return -EINVAL;
+ return err;
+
if (user_val > 0xff) {
dev_err(dev, "debugfs error input > 0xff\n");
return -EINVAL;
@@ -534,7 +515,7 @@ static ssize_t ab8500_val_write(struct file *file,
return -EINVAL;
}
- return buf_size;
+ return count;
}
static const struct file_operations ab8500_bank_fops = {
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index a0bd0cf..21131c7 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -56,7 +56,7 @@ struct jz4740_adc {
void __iomem *base;
int irq;
- int irq_base;
+ struct irq_chip_generic *gc;
struct clk *clk;
atomic_t clk_ref;
@@ -64,63 +64,17 @@ struct jz4740_adc {
spinlock_t lock;
};
-static inline void jz4740_adc_irq_set_masked(struct jz4740_adc *adc, int irq,
- bool masked)
-{
- unsigned long flags;
- uint8_t val;
-
- irq -= adc->irq_base;
-
- spin_lock_irqsave(&adc->lock, flags);
-
- val = readb(adc->base + JZ_REG_ADC_CTRL);
- if (masked)
- val |= BIT(irq);
- else
- val &= ~BIT(irq);
- writeb(val, adc->base + JZ_REG_ADC_CTRL);
-
- spin_unlock_irqrestore(&adc->lock, flags);
-}
-
-static void jz4740_adc_irq_mask(struct irq_data *data)
-{
- struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
- jz4740_adc_irq_set_masked(adc, data->irq, true);
-}
-
-static void jz4740_adc_irq_unmask(struct irq_data *data)
-{
- struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
- jz4740_adc_irq_set_masked(adc, data->irq, false);
-}
-
-static void jz4740_adc_irq_ack(struct irq_data *data)
-{
- struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
- unsigned int irq = data->irq - adc->irq_base;
- writeb(BIT(irq), adc->base + JZ_REG_ADC_STATUS);
-}
-
-static struct irq_chip jz4740_adc_irq_chip = {
- .name = "jz4740-adc",
- .irq_mask = jz4740_adc_irq_mask,
- .irq_unmask = jz4740_adc_irq_unmask,
- .irq_ack = jz4740_adc_irq_ack,
-};
-
static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc)
{
- struct jz4740_adc *adc = irq_desc_get_handler_data(desc);
+ struct irq_chip_generic *gc = irq_desc_get_handler_data(desc);
uint8_t status;
unsigned int i;
- status = readb(adc->base + JZ_REG_ADC_STATUS);
+ status = readb(gc->reg_base + JZ_REG_ADC_STATUS);
for (i = 0; i < 5; ++i) {
if (status & BIT(i))
- generic_handle_irq(adc->irq_base + i);
+ generic_handle_irq(gc->irq_base + i);
}
}
@@ -249,10 +203,12 @@ const struct mfd_cell jz4740_adc_cells[] = {
static int __devinit jz4740_adc_probe(struct platform_device *pdev)
{
- int ret;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
struct jz4740_adc *adc;
struct resource *mem_base;
- int irq;
+ int ret;
+ int irq_base;
adc = kmalloc(sizeof(*adc), GFP_KERNEL);
if (!adc) {
@@ -267,9 +223,9 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev)
goto err_free;
}
- adc->irq_base = platform_get_irq(pdev, 1);
- if (adc->irq_base < 0) {
- ret = adc->irq_base;
+ irq_base = platform_get_irq(pdev, 1);
+ if (irq_base < 0) {
+ ret = irq_base;
dev_err(&pdev->dev, "Failed to get irq base: %d\n", ret);
goto err_free;
}
@@ -309,20 +265,28 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, adc);
- for (irq = adc->irq_base; irq < adc->irq_base + 5; ++irq) {
- irq_set_chip_data(irq, adc);
- irq_set_chip_and_handler(irq, &jz4740_adc_irq_chip,
- handle_level_irq);
- }
+ gc = irq_alloc_generic_chip("INTC", 1, irq_base, adc->base,
+ handle_level_irq);
+
+ ct = gc->chip_types;
+ ct->regs.mask = JZ_REG_ADC_CTRL;
+ ct->regs.ack = JZ_REG_ADC_STATUS;
+ ct->chip.irq_mask = irq_gc_mask_set_bit;
+ ct->chip.irq_unmask = irq_gc_mask_clr_bit;
+ ct->chip.irq_ack = irq_gc_ack;
+
+ irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL);
+
+ adc->gc = gc;
- irq_set_handler_data(adc->irq, adc);
+ irq_set_handler_data(adc->irq, gc);
irq_set_chained_handler(adc->irq, jz4740_adc_irq_demux);
writeb(0x00, adc->base + JZ_REG_ADC_ENABLE);
writeb(0xff, adc->base + JZ_REG_ADC_CTRL);
ret = mfd_add_devices(&pdev->dev, 0, jz4740_adc_cells,
- ARRAY_SIZE(jz4740_adc_cells), mem_base, adc->irq_base);
+ ARRAY_SIZE(jz4740_adc_cells), mem_base, irq_base);
if (ret < 0)
goto err_clk_put;
@@ -347,6 +311,8 @@ static int __devexit jz4740_adc_remove(struct platform_device *pdev)
mfd_remove_devices(&pdev->dev);
+ irq_remove_generic_chip(adc->gc, IRQ_MSK(5), IRQ_NOPROBE | IRQ_LEVEL, 0);
+ kfree(adc->gc);
irq_set_handler_data(adc->irq, NULL);
irq_set_chained_handler(adc->irq, NULL);
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index ea3f52c..ea1169b 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -37,6 +37,9 @@
#define GPIOBASE 0x44
#define GPIO_IO_SIZE 64
+#define WDTBASE 0x84
+#define WDT_IO_SIZE 64
+
static struct resource smbus_sch_resource = {
.flags = IORESOURCE_IO,
};
@@ -59,6 +62,18 @@ static struct mfd_cell lpc_sch_cells[] = {
},
};
+static struct resource wdt_sch_resource = {
+ .flags = IORESOURCE_IO,
+};
+
+static struct mfd_cell tunnelcreek_cells[] = {
+ {
+ .name = "tunnelcreek_wdt",
+ .num_resources = 1,
+ .resources = &wdt_sch_resource,
+ },
+};
+
static struct pci_device_id lpc_sch_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ITC_LPC) },
@@ -72,6 +87,7 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
unsigned int base_addr_cfg;
unsigned short base_addr;
int i;
+ int ret;
pci_read_config_dword(dev, SMBASE, &base_addr_cfg);
if (!(base_addr_cfg & (1 << 31))) {
@@ -104,8 +120,39 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
for (i=0; i < ARRAY_SIZE(lpc_sch_cells); i++)
lpc_sch_cells[i].id = id->device;
- return mfd_add_devices(&dev->dev, 0,
+ ret = mfd_add_devices(&dev->dev, 0,
lpc_sch_cells, ARRAY_SIZE(lpc_sch_cells), NULL, 0);
+ if (ret)
+ goto out_dev;
+
+ if (id->device == PCI_DEVICE_ID_INTEL_ITC_LPC) {
+ pci_read_config_dword(dev, WDTBASE, &base_addr_cfg);
+ if (!(base_addr_cfg & (1 << 31))) {
+ dev_err(&dev->dev, "Decode of the WDT I/O range disabled\n");
+ ret = -ENODEV;
+ goto out_dev;
+ }
+ base_addr = (unsigned short)base_addr_cfg;
+ if (base_addr == 0) {
+ dev_err(&dev->dev, "I/O space for WDT uninitialized\n");
+ ret = -ENODEV;
+ goto out_dev;
+ }
+
+ wdt_sch_resource.start = base_addr;
+ wdt_sch_resource.end = base_addr + WDT_IO_SIZE - 1;
+
+ for (i = 0; i < ARRAY_SIZE(tunnelcreek_cells); i++)
+ tunnelcreek_cells[i].id = id->device;
+
+ ret = mfd_add_devices(&dev->dev, 0, tunnelcreek_cells,
+ ARRAY_SIZE(tunnelcreek_cells), NULL, 0);
+ }
+
+ return ret;
+out_dev:
+ mfd_remove_devices(&dev->dev);
+ return ret;
}
static void __devexit lpc_sch_remove(struct pci_dev *dev)
diff --git a/drivers/mfd/max8997-irq.c b/drivers/mfd/max8997-irq.c
index 638bf7e..09274cf 100644
--- a/drivers/mfd/max8997-irq.c
+++ b/drivers/mfd/max8997-irq.c
@@ -58,8 +58,6 @@ static struct i2c_client *get_i2c(struct max8997_dev *max8997,
default:
return ERR_PTR(-EINVAL);
}
-
- return ERR_PTR(-EINVAL);
}
struct max8997_irq_data {
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index 9ec7570..de4096a 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -39,6 +39,8 @@ static struct mfd_cell max8998_devs[] = {
.name = "max8998-pmic",
}, {
.name = "max8998-rtc",
+ }, {
+ .name = "max8998-battery",
},
};
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 1717144..29601e7 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -998,9 +998,9 @@ static void usbhs_disable(struct device *dev)
if (is_omap_usbhs_rev2(omap)) {
if (is_ehci_tll_mode(pdata->port_mode[0]))
- clk_enable(omap->usbtll_p1_fck);
+ clk_disable(omap->usbtll_p1_fck);
if (is_ehci_tll_mode(pdata->port_mode[1]))
- clk_enable(omap->usbtll_p2_fck);
+ clk_disable(omap->usbtll_p2_fck);
clk_disable(omap->utmi_p2_fck);
clk_disable(omap->utmi_p1_fck);
}
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 7ab7746..2963689 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -228,7 +228,7 @@ int stmpe_block_write(struct stmpe *stmpe, u8 reg, u8 length,
EXPORT_SYMBOL_GPL(stmpe_block_write);
/**
- * stmpe_set_altfunc: set the alternate function for STMPE pins
+ * stmpe_set_altfunc()- set the alternate function for STMPE pins
* @stmpe: Device to configure
* @pins: Bitmask of pins to affect
* @block: block to enable alternate functions for
diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h
index 0dbdc4e..e4ee3895 100644
--- a/drivers/mfd/stmpe.h
+++ b/drivers/mfd/stmpe.h
@@ -42,6 +42,7 @@ struct stmpe_variant_block {
* @id_mask: bits valid in CHIPID register for comparison with id_val
* @num_gpios: number of GPIOS
* @af_bits: number of bits used to specify the alternate function
+ * @regs: variant specific registers.
* @blocks: list of blocks present on this device
* @num_blocks: number of blocks present on this device
* @num_irqs: number of internal IRQs available on this device
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index 69272e4..696879e 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -287,12 +287,8 @@ static __devinitdata struct i2c_board_info timberdale_saa7706_i2c_board_info = {
static __devinitdata struct timb_radio_platform_data
timberdale_radio_platform_data = {
.i2c_adapter = 0,
- .tuner = {
- .info = &timberdale_tef6868_i2c_board_info
- },
- .dsp = {
- .info = &timberdale_saa7706_i2c_board_info
- }
+ .tuner = &timberdale_tef6868_i2c_board_info,
+ .dsp = &timberdale_saa7706_i2c_board_info
};
static const __devinitconst struct resource timberdale_video_resources[] = {
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 2229e66..6f5b8cf 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -147,12 +147,11 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
if (init_data == NULL)
return -ENOMEM;
- init_data->irq = pmic_plat_data->irq;
- init_data->irq_base = pmic_plat_data->irq;
-
tps65910 = kzalloc(sizeof(struct tps65910), GFP_KERNEL);
- if (tps65910 == NULL)
+ if (tps65910 == NULL) {
+ kfree(init_data);
return -ENOMEM;
+ }
i2c_set_clientdata(i2c, tps65910);
tps65910->dev = &i2c->dev;
@@ -168,17 +167,22 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
if (ret < 0)
goto err;
+ init_data->irq = pmic_plat_data->irq;
+ init_data->irq_base = pmic_plat_data->irq;
+
tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base);
ret = tps65910_irq_init(tps65910, init_data->irq, init_data);
if (ret < 0)
goto err;
+ kfree(init_data);
return ret;
err:
mfd_remove_devices(tps65910->dev);
kfree(tps65910);
+ kfree(init_data);
return ret;
}
@@ -187,6 +191,7 @@ static int tps65910_i2c_remove(struct i2c_client *i2c)
struct tps65910 *tps65910 = i2c_get_clientdata(i2c);
mfd_remove_devices(tps65910->dev);
+ tps65910_irq_exit(tps65910);
kfree(tps65910);
return 0;
diff --git a/drivers/mfd/tps65911-comparator.c b/drivers/mfd/tps65911-comparator.c
index 283ac67..e7ff783 100644
--- a/drivers/mfd/tps65911-comparator.c
+++ b/drivers/mfd/tps65911-comparator.c
@@ -157,6 +157,8 @@ static __devexit int tps65911_comparator_remove(struct platform_device *pdev)
struct tps65910 *tps65910;
tps65910 = dev_get_drvdata(pdev->dev.parent);
+ device_remove_file(&pdev->dev, &dev_attr_comp2_threshold);
+ device_remove_file(&pdev->dev, &dev_attr_comp1_threshold);
return 0;
}
diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c
new file mode 100644
index 0000000..955bc00
--- /dev/null
+++ b/drivers/mfd/tps65912-core.c
@@ -0,0 +1,177 @@
+/*
+ * tps65912-core.c -- TI TPS65912x
+ *
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This driver is based on wm8350 implementation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps65912.h>
+
+static struct mfd_cell tps65912s[] = {
+ {
+ .name = "tps65912-pmic",
+ },
+};
+
+int tps65912_set_bits(struct tps65912 *tps65912, u8 reg, u8 mask)
+{
+ u8 data;
+ int err;
+
+ mutex_lock(&tps65912->io_mutex);
+
+ err = tps65912->read(tps65912, reg, 1, &data);
+ if (err) {
+ dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg);
+ goto out;
+ }
+
+ data |= mask;
+ err = tps65912->write(tps65912, reg, 1, &data);
+ if (err)
+ dev_err(tps65912->dev, "Write to reg 0x%x failed\n", reg);
+
+out:
+ mutex_unlock(&tps65912->io_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(tps65912_set_bits);
+
+int tps65912_clear_bits(struct tps65912 *tps65912, u8 reg, u8 mask)
+{
+ u8 data;
+ int err;
+
+ mutex_lock(&tps65912->io_mutex);
+ err = tps65912->read(tps65912, reg, 1, &data);
+ if (err) {
+ dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg);
+ goto out;
+ }
+
+ data &= ~mask;
+ err = tps65912->write(tps65912, reg, 1, &data);
+ if (err)
+ dev_err(tps65912->dev, "Write to reg 0x%x failed\n", reg);
+
+out:
+ mutex_unlock(&tps65912->io_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(tps65912_clear_bits);
+
+static inline int tps65912_read(struct tps65912 *tps65912, u8 reg)
+{
+ u8 val;
+ int err;
+
+ err = tps65912->read(tps65912, reg, 1, &val);
+ if (err < 0)
+ return err;
+
+ return val;
+}
+
+static inline int tps65912_write(struct tps65912 *tps65912, u8 reg, u8 val)
+{
+ return tps65912->write(tps65912, reg, 1, &val);
+}
+
+int tps65912_reg_read(struct tps65912 *tps65912, u8 reg)
+{
+ int data;
+
+ mutex_lock(&tps65912->io_mutex);
+
+ data = tps65912_read(tps65912, reg);
+ if (data < 0)
+ dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg);
+
+ mutex_unlock(&tps65912->io_mutex);
+ return data;
+}
+EXPORT_SYMBOL_GPL(tps65912_reg_read);
+
+int tps65912_reg_write(struct tps65912 *tps65912, u8 reg, u8 val)
+{
+ int err;
+
+ mutex_lock(&tps65912->io_mutex);
+
+ err = tps65912_write(tps65912, reg, val);
+ if (err < 0)
+ dev_err(tps65912->dev, "Write for reg 0x%x failed\n", reg);
+
+ mutex_unlock(&tps65912->io_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(tps65912_reg_write);
+
+int tps65912_device_init(struct tps65912 *tps65912)
+{
+ struct tps65912_board *pmic_plat_data = tps65912->dev->platform_data;
+ struct tps65912_platform_data *init_data;
+ int ret, dcdc_avs, value;
+
+ init_data = kzalloc(sizeof(struct tps65912_platform_data), GFP_KERNEL);
+ if (init_data == NULL)
+ return -ENOMEM;
+
+ init_data->irq = pmic_plat_data->irq;
+ init_data->irq_base = pmic_plat_data->irq;
+
+ mutex_init(&tps65912->io_mutex);
+ dev_set_drvdata(tps65912->dev, tps65912);
+
+ dcdc_avs = (pmic_plat_data->is_dcdc1_avs << 0 |
+ pmic_plat_data->is_dcdc2_avs << 1 |
+ pmic_plat_data->is_dcdc3_avs << 2 |
+ pmic_plat_data->is_dcdc4_avs << 3);
+ if (dcdc_avs) {
+ tps65912->read(tps65912, TPS65912_I2C_SPI_CFG, 1, &value);
+ dcdc_avs |= value;
+ tps65912->write(tps65912, TPS65912_I2C_SPI_CFG, 1, &dcdc_avs);
+ }
+
+ ret = mfd_add_devices(tps65912->dev, -1,
+ tps65912s, ARRAY_SIZE(tps65912s),
+ NULL, 0);
+ if (ret < 0)
+ goto err;
+
+ ret = tps65912_irq_init(tps65912, init_data->irq, init_data);
+ if (ret < 0)
+ goto err;
+
+ return ret;
+
+err:
+ kfree(init_data);
+ mfd_remove_devices(tps65912->dev);
+ kfree(tps65912);
+ return ret;
+}
+
+void tps65912_device_exit(struct tps65912 *tps65912)
+{
+ mfd_remove_devices(tps65912->dev);
+ kfree(tps65912);
+}
+
+MODULE_AUTHOR("Margarita Olaya <magi@slimlogic.co.uk>");
+MODULE_DESCRIPTION("TPS65912x chip family multi-function driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps65912-i2c.c b/drivers/mfd/tps65912-i2c.c
new file mode 100644
index 0000000..c041f2c
--- /dev/null
+++ b/drivers/mfd/tps65912-i2c.c
@@ -0,0 +1,139 @@
+/*
+ * tps65912-i2c.c -- I2C access for TI TPS65912x PMIC
+ *
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This driver is based on wm8350 implementation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps65912.h>
+
+static int tps65912_i2c_read(struct tps65912 *tps65912, u8 reg,
+ int bytes, void *dest)
+{
+ struct i2c_client *i2c = tps65912->control_data;
+ struct i2c_msg xfer[2];
+ int ret;
+
+ /* Write register */
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 1;
+ xfer[0].buf = &reg;
+
+ /* Read data */
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = bytes;
+ xfer[1].buf = dest;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
+ if (ret == 2)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+ return ret;
+}
+
+static int tps65912_i2c_write(struct tps65912 *tps65912, u8 reg,
+ int bytes, void *src)
+{
+ struct i2c_client *i2c = tps65912->control_data;
+ /* we add 1 byte for device register */
+ u8 msg[TPS6591X_MAX_REGISTER + 1];
+ int ret;
+
+ if (bytes > TPS6591X_MAX_REGISTER)
+ return -EINVAL;
+
+ msg[0] = reg;
+ memcpy(&msg[1], src, bytes);
+
+ ret = i2c_master_send(i2c, msg, bytes + 1);
+ if (ret < 0)
+ return ret;
+ if (ret != bytes + 1)
+ return -EIO;
+
+ return 0;
+}
+
+static int tps65912_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct tps65912 *tps65912;
+
+ tps65912 = kzalloc(sizeof(struct tps65912), GFP_KERNEL);
+ if (tps65912 == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, tps65912);
+ tps65912->dev = &i2c->dev;
+ tps65912->control_data = i2c;
+ tps65912->read = tps65912_i2c_read;
+ tps65912->write = tps65912_i2c_write;
+
+ return tps65912_device_init(tps65912);
+}
+
+static int tps65912_i2c_remove(struct i2c_client *i2c)
+{
+ struct tps65912 *tps65912 = i2c_get_clientdata(i2c);
+
+ tps65912_device_exit(tps65912);
+
+ return 0;
+}
+
+static const struct i2c_device_id tps65912_i2c_id[] = {
+ {"tps65912", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tps65912_i2c_id);
+
+static struct i2c_driver tps65912_i2c_driver = {
+ .driver = {
+ .name = "tps65912",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps65912_i2c_probe,
+ .remove = tps65912_i2c_remove,
+ .id_table = tps65912_i2c_id,
+};
+
+static int __init tps65912_i2c_init(void)
+{
+ int ret;
+
+ ret = i2c_add_driver(&tps65912_i2c_driver);
+ if (ret != 0)
+ pr_err("Failed to register TPS65912 I2C driver: %d\n", ret);
+
+ return ret;
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(tps65912_i2c_init);
+
+static void __exit tps65912_i2c_exit(void)
+{
+ i2c_del_driver(&tps65912_i2c_driver);
+}
+module_exit(tps65912_i2c_exit);
+
+MODULE_AUTHOR("Margarita Olaya <magi@slimlogic.co.uk>");
+MODULE_DESCRIPTION("TPS6591x chip family multi-function driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps65912-irq.c b/drivers/mfd/tps65912-irq.c
new file mode 100644
index 0000000..d360a83
--- /dev/null
+++ b/drivers/mfd/tps65912-irq.c
@@ -0,0 +1,224 @@
+/*
+ * tps65912-irq.c -- TI TPS6591x
+ *
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Margarita Olaya <magi@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This driver is based on wm8350 implementation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/mfd/tps65912.h>
+
+static inline int irq_to_tps65912_irq(struct tps65912 *tps65912,
+ int irq)
+{
+ return irq - tps65912->irq_base;
+}
+
+/*
+ * This is a threaded IRQ handler so can access I2C/SPI. Since the
+ * IRQ handler explicitly clears the IRQ it handles the IRQ line
+ * will be reasserted and the physical IRQ will be handled again if
+ * another interrupt is asserted while we run - in the normal course
+ * of events this is a rare occurrence so we save I2C/SPI reads. We're
+ * also assuming that it's rare to get lots of interrupts firing
+ * simultaneously so try to minimise I/O.
+ */
+static irqreturn_t tps65912_irq(int irq, void *irq_data)
+{
+ struct tps65912 *tps65912 = irq_data;
+ u32 irq_sts;
+ u32 irq_mask;
+ u8 reg;
+ int i;
+
+
+ tps65912->read(tps65912, TPS65912_INT_STS, 1, &reg);
+ irq_sts = reg;
+ tps65912->read(tps65912, TPS65912_INT_STS2, 1, &reg);
+ irq_sts |= reg << 8;
+ tps65912->read(tps65912, TPS65912_INT_STS3, 1, &reg);
+ irq_sts |= reg << 16;
+ tps65912->read(tps65912, TPS65912_INT_STS4, 1, &reg);
+ irq_sts |= reg << 24;
+
+ tps65912->read(tps65912, TPS65912_INT_MSK, 1, &reg);
+ irq_mask = reg;
+ tps65912->read(tps65912, TPS65912_INT_MSK2, 1, &reg);
+ irq_mask |= reg << 8;
+ tps65912->read(tps65912, TPS65912_INT_MSK3, 1, &reg);
+ irq_mask |= reg << 16;
+ tps65912->read(tps65912, TPS65912_INT_MSK4, 1, &reg);
+ irq_mask |= reg << 24;
+
+ irq_sts &= ~irq_mask;
+ if (!irq_sts)
+ return IRQ_NONE;
+
+ for (i = 0; i < tps65912->irq_num; i++) {
+ if (!(irq_sts & (1 << i)))
+ continue;
+
+ handle_nested_irq(tps65912->irq_base + i);
+ }
+
+ /* Write the STS register back to clear IRQs we handled */
+ reg = irq_sts & 0xFF;
+ irq_sts >>= 8;
+ if (reg)
+ tps65912->write(tps65912, TPS65912_INT_STS, 1, &reg);
+ reg = irq_sts & 0xFF;
+ irq_sts >>= 8;
+ if (reg)
+ tps65912->write(tps65912, TPS65912_INT_STS2, 1, &reg);
+ reg = irq_sts & 0xFF;
+ irq_sts >>= 8;
+ if (reg)
+ tps65912->write(tps65912, TPS65912_INT_STS3, 1, &reg);
+ reg = irq_sts & 0xFF;
+ if (reg)
+ tps65912->write(tps65912, TPS65912_INT_STS4, 1, &reg);
+
+ return IRQ_HANDLED;
+}
+
+static void tps65912_irq_lock(struct irq_data *data)
+{
+ struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&tps65912->irq_lock);
+}
+
+static void tps65912_irq_sync_unlock(struct irq_data *data)
+{
+ struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
+ u32 reg_mask;
+ u8 reg;
+
+ tps65912->read(tps65912, TPS65912_INT_MSK, 1, &reg);
+ reg_mask = reg;
+ tps65912->read(tps65912, TPS65912_INT_MSK2, 1, &reg);
+ reg_mask |= reg << 8;
+ tps65912->read(tps65912, TPS65912_INT_MSK3, 1, &reg);
+ reg_mask |= reg << 16;
+ tps65912->read(tps65912, TPS65912_INT_MSK4, 1, &reg);
+ reg_mask |= reg << 24;
+
+ if (tps65912->irq_mask != reg_mask) {
+ reg = tps65912->irq_mask & 0xFF;
+ tps65912->write(tps65912, TPS65912_INT_MSK, 1, &reg);
+ reg = tps65912->irq_mask >> 8 & 0xFF;
+ tps65912->write(tps65912, TPS65912_INT_MSK2, 1, &reg);
+ reg = tps65912->irq_mask >> 16 & 0xFF;
+ tps65912->write(tps65912, TPS65912_INT_MSK3, 1, &reg);
+ reg = tps65912->irq_mask >> 24 & 0xFF;
+ tps65912->write(tps65912, TPS65912_INT_MSK4, 1, &reg);
+ }
+
+ mutex_unlock(&tps65912->irq_lock);
+}
+
+static void tps65912_irq_enable(struct irq_data *data)
+{
+ struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
+
+ tps65912->irq_mask &= ~(1 << irq_to_tps65912_irq(tps65912, data->irq));
+}
+
+static void tps65912_irq_disable(struct irq_data *data)
+{
+ struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
+
+ tps65912->irq_mask |= (1 << irq_to_tps65912_irq(tps65912, data->irq));
+}
+
+static struct irq_chip tps65912_irq_chip = {
+ .name = "tps65912",
+ .irq_bus_lock = tps65912_irq_lock,
+ .irq_bus_sync_unlock = tps65912_irq_sync_unlock,
+ .irq_disable = tps65912_irq_disable,
+ .irq_enable = tps65912_irq_enable,
+};
+
+int tps65912_irq_init(struct tps65912 *tps65912, int irq,
+ struct tps65912_platform_data *pdata)
+{
+ int ret, cur_irq;
+ int flags = IRQF_ONESHOT;
+ u8 reg;
+
+ if (!irq) {
+ dev_warn(tps65912->dev, "No interrupt support, no core IRQ\n");
+ return 0;
+ }
+
+ if (!pdata || !pdata->irq_base) {
+ dev_warn(tps65912->dev, "No interrupt support, no IRQ base\n");
+ return 0;
+ }
+
+ /* Clear unattended interrupts */
+ tps65912->read(tps65912, TPS65912_INT_STS, 1, &reg);
+ tps65912->write(tps65912, TPS65912_INT_STS, 1, &reg);
+ tps65912->read(tps65912, TPS65912_INT_STS2, 1, &reg);
+ tps65912->write(tps65912, TPS65912_INT_STS2, 1, &reg);
+ tps65912->read(tps65912, TPS65912_INT_STS3, 1, &reg);
+ tps65912->write(tps65912, TPS65912_INT_STS3, 1, &reg);
+ tps65912->read(tps65912, TPS65912_INT_STS4, 1, &reg);
+ tps65912->write(tps65912, TPS65912_INT_STS4, 1, &reg);
+
+ /* Mask top level interrupts */
+ tps65912->irq_mask = 0xFFFFFFFF;
+
+ mutex_init(&tps65912->irq_lock);
+ tps65912->chip_irq = irq;
+ tps65912->irq_base = pdata->irq_base;
+
+ tps65912->irq_num = TPS65912_NUM_IRQ;
+
+ /* Register with genirq */
+ for (cur_irq = tps65912->irq_base;
+ cur_irq < tps65912->irq_num + tps65912->irq_base;
+ cur_irq++) {
+ irq_set_chip_data(cur_irq, tps65912);
+ irq_set_chip_and_handler(cur_irq, &tps65912_irq_chip,
+ handle_edge_irq);
+ irq_set_nested_thread(cur_irq, 1);
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ irq_set_noprobe(cur_irq);
+#endif
+ }
+
+ ret = request_threaded_irq(irq, NULL, tps65912_irq, flags,
+ "tps65912", tps65912);
+
+ irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
+ if (ret != 0)
+ dev_err(tps65912->dev, "Failed to request IRQ: %d\n", ret);
+
+ return ret;
+}
+
+int tps65912_irq_exit(struct tps65912 *tps65912)
+{
+ free_irq(tps65912->chip_irq, tps65912);
+ return 0;
+}
diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
new file mode 100644
index 0000000..6d71e0d
--- /dev/null
+++ b/drivers/mfd/tps65912-spi.c
@@ -0,0 +1,142 @@
+/*
+ * tps65912-spi.c -- SPI access for TI TPS65912x PMIC
+ *
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This driver is based on wm8350 implementation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps65912.h>
+
+static int tps65912_spi_write(struct tps65912 *tps65912, u8 addr,
+ int bytes, void *src)
+{
+ struct spi_device *spi = tps65912->control_data;
+ u8 *data = (u8 *) src;
+ int ret;
+ /* bit 23 is the read/write bit */
+ unsigned long spi_data = 1 << 23 | addr << 15 | *data;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ u32 tx_buf, rx_buf;
+
+ tx_buf = spi_data;
+ rx_buf = 0;
+
+ xfer.tx_buf = &tx_buf;
+ xfer.rx_buf = NULL;
+ xfer.len = sizeof(unsigned long);
+ xfer.bits_per_word = 24;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ ret = spi_sync(spi, &msg);
+ return ret;
+}
+
+static int tps65912_spi_read(struct tps65912 *tps65912, u8 addr,
+ int bytes, void *dest)
+{
+ struct spi_device *spi = tps65912->control_data;
+ /* bit 23 is the read/write bit */
+ unsigned long spi_data = 0 << 23 | addr << 15;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ int ret;
+ u8 *data = (u8 *) dest;
+ u32 tx_buf, rx_buf;
+
+ tx_buf = spi_data;
+ rx_buf = 0;
+
+ xfer.tx_buf = &tx_buf;
+ xfer.rx_buf = &rx_buf;
+ xfer.len = sizeof(unsigned long);
+ xfer.bits_per_word = 24;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ if (spi == NULL)
+ return 0;
+
+ ret = spi_sync(spi, &msg);
+ if (ret == 0)
+ *data = (u8) (rx_buf & 0xFF);
+ return ret;
+}
+
+static int __devinit tps65912_spi_probe(struct spi_device *spi)
+{
+ struct tps65912 *tps65912;
+
+ tps65912 = kzalloc(sizeof(struct tps65912), GFP_KERNEL);
+ if (tps65912 == NULL)
+ return -ENOMEM;
+
+ tps65912->dev = &spi->dev;
+ tps65912->control_data = spi;
+ tps65912->read = tps65912_spi_read;
+ tps65912->write = tps65912_spi_write;
+
+ spi_set_drvdata(spi, tps65912);
+
+ return tps65912_device_init(tps65912);
+}
+
+static int __devexit tps65912_spi_remove(struct spi_device *spi)
+{
+ struct tps65912 *tps65912 = spi_get_drvdata(spi);
+
+ tps65912_device_exit(tps65912);
+
+ return 0;
+}
+
+static struct spi_driver tps65912_spi_driver = {
+ .driver = {
+ .name = "tps65912",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = tps65912_spi_probe,
+ .remove = __devexit_p(tps65912_spi_remove),
+};
+
+static int __init tps65912_spi_init(void)
+{
+ int ret;
+
+ ret = spi_register_driver(&tps65912_spi_driver);
+ if (ret != 0)
+ pr_err("Failed to register TPS65912 SPI driver: %d\n", ret);
+
+ return 0;
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(tps65912_spi_init);
+
+static void __exit tps65912_spi_exit(void)
+{
+ spi_unregister_driver(&tps65912_spi_driver);
+}
+module_exit(tps65912_spi_exit);
+
+MODULE_AUTHOR("Margarita Olaya <magi@slimlogic.co.uk>");
+MODULE_DESCRIPTION("SPI support for TPS65912 chip family mfd");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index b8f2a4e..01ecfee 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -110,7 +110,7 @@
#endif
#if defined(CONFIG_TWL4030_CODEC) || defined(CONFIG_TWL4030_CODEC_MODULE) ||\
- defined(CONFIG_SND_SOC_TWL6040) || defined(CONFIG_SND_SOC_TWL6040_MODULE)
+ defined(CONFIG_TWL6040_CORE) || defined(CONFIG_TWL6040_CORE_MODULE)
#define twl_has_codec() true
#else
#define twl_has_codec() false
@@ -815,20 +815,19 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
return PTR_ERR(child);
}
- if (twl_has_codec() && pdata->codec && twl_class_is_4030()) {
+ if (twl_has_codec() && pdata->audio && twl_class_is_4030()) {
sub_chip_id = twl_map[TWL_MODULE_AUDIO_VOICE].sid;
child = add_child(sub_chip_id, "twl4030-audio",
- pdata->codec, sizeof(*pdata->codec),
+ pdata->audio, sizeof(*pdata->audio),
false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
- /* Phoenix codec driver is probed directly atm */
- if (twl_has_codec() && pdata->codec && twl_class_is_6030()) {
+ if (twl_has_codec() && pdata->audio && twl_class_is_6030()) {
sub_chip_id = twl_map[TWL_MODULE_AUDIO_VOICE].sid;
- child = add_child(sub_chip_id, "twl6040-codec",
- pdata->codec, sizeof(*pdata->codec),
+ child = add_child(sub_chip_id, "twl6040",
+ pdata->audio, sizeof(*pdata->audio),
false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
@@ -1284,6 +1283,8 @@ static const struct i2c_device_id twl_ids[] = {
{ "tps65950", 0 }, /* catalog version of twl5030 */
{ "tps65930", TPS_SUBSET }, /* fewer LDOs and DACs; no charger */
{ "tps65920", TPS_SUBSET }, /* fewer LDOs; no codec or charger */
+ { "tps65921", TPS_SUBSET }, /* fewer LDOs; no codec, no LED
+ and vibrator. Charger in USB module*/
{ "twl6030", TWL6030_CLASS }, /* "Phoenix power chip" */
{ "twl6025", TWL6030_CLASS | TWL6025_SUBCLASS }, /* "Phoenix lite" */
{ /* end of list */ },
diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c
new file mode 100644
index 0000000..ae51ab5
--- /dev/null
+++ b/drivers/mfd/twl4030-audio.c
@@ -0,0 +1,277 @@
+/*
+ * MFD driver for twl4030 audio submodule, which contains an audio codec, and
+ * the vibra control.
+ *
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * Copyright: (C) 2009 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/i2c/twl.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/twl4030-audio.h>
+
+#define TWL4030_AUDIO_CELLS 2
+
+static struct platform_device *twl4030_audio_dev;
+
+struct twl4030_audio_resource {
+ int request_count;
+ u8 reg;
+ u8 mask;
+};
+
+struct twl4030_audio {
+ unsigned int audio_mclk;
+ struct mutex mutex;
+ struct twl4030_audio_resource resource[TWL4030_AUDIO_RES_MAX];
+ struct mfd_cell cells[TWL4030_AUDIO_CELLS];
+};
+
+/*
+ * Modify the resource, the function returns the content of the register
+ * after the modification.
+ */
+static int twl4030_audio_set_resource(enum twl4030_audio_res id, int enable)
+{
+ struct twl4030_audio *audio = platform_get_drvdata(twl4030_audio_dev);
+ u8 val;
+
+ twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
+ audio->resource[id].reg);
+
+ if (enable)
+ val |= audio->resource[id].mask;
+ else
+ val &= ~audio->resource[id].mask;
+
+ twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
+ val, audio->resource[id].reg);
+
+ return val;
+}
+
+static inline int twl4030_audio_get_resource(enum twl4030_audio_res id)
+{
+ struct twl4030_audio *audio = platform_get_drvdata(twl4030_audio_dev);
+ u8 val;
+
+ twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
+ audio->resource[id].reg);
+
+ return val;
+}
+
+/*
+ * Enable the resource.
+ * The function returns with error or the content of the register
+ */
+int twl4030_audio_enable_resource(enum twl4030_audio_res id)
+{
+ struct twl4030_audio *audio = platform_get_drvdata(twl4030_audio_dev);
+ int val;
+
+ if (id >= TWL4030_AUDIO_RES_MAX) {
+ dev_err(&twl4030_audio_dev->dev,
+ "Invalid resource ID (%u)\n", id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&audio->mutex);
+ if (!audio->resource[id].request_count)
+ /* Resource was disabled, enable it */
+ val = twl4030_audio_set_resource(id, 1);
+ else
+ val = twl4030_audio_get_resource(id);
+
+ audio->resource[id].request_count++;
+ mutex_unlock(&audio->mutex);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(twl4030_audio_enable_resource);
+
+/*
+ * Disable the resource.
+ * The function returns with error or the content of the register
+ */
+int twl4030_audio_disable_resource(unsigned id)
+{
+ struct twl4030_audio *audio = platform_get_drvdata(twl4030_audio_dev);
+ int val;
+
+ if (id >= TWL4030_AUDIO_RES_MAX) {
+ dev_err(&twl4030_audio_dev->dev,
+ "Invalid resource ID (%u)\n", id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&audio->mutex);
+ if (!audio->resource[id].request_count) {
+ dev_err(&twl4030_audio_dev->dev,
+ "Resource has been disabled already (%u)\n", id);
+ mutex_unlock(&audio->mutex);
+ return -EPERM;
+ }
+ audio->resource[id].request_count--;
+
+ if (!audio->resource[id].request_count)
+ /* Resource can be disabled now */
+ val = twl4030_audio_set_resource(id, 0);
+ else
+ val = twl4030_audio_get_resource(id);
+
+ mutex_unlock(&audio->mutex);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(twl4030_audio_disable_resource);
+
+unsigned int twl4030_audio_get_mclk(void)
+{
+ struct twl4030_audio *audio = platform_get_drvdata(twl4030_audio_dev);
+
+ return audio->audio_mclk;
+}
+EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk);
+
+static int __devinit twl4030_audio_probe(struct platform_device *pdev)
+{
+ struct twl4030_audio *audio;
+ struct twl4030_audio_data *pdata = pdev->dev.platform_data;
+ struct mfd_cell *cell = NULL;
+ int ret, childs = 0;
+ u8 val;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "Platform data is missing\n");
+ return -EINVAL;
+ }
+
+ /* Configure APLL_INFREQ and disable APLL if enabled */
+ val = 0;
+ switch (pdata->audio_mclk) {
+ case 19200000:
+ val |= TWL4030_APLL_INFREQ_19200KHZ;
+ break;
+ case 26000000:
+ val |= TWL4030_APLL_INFREQ_26000KHZ;
+ break;
+ case 38400000:
+ val |= TWL4030_APLL_INFREQ_38400KHZ;
+ break;
+ default:
+ dev_err(&pdev->dev, "Invalid audio_mclk\n");
+ return -EINVAL;
+ }
+ twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
+ val, TWL4030_REG_APLL_CTL);
+
+ audio = kzalloc(sizeof(struct twl4030_audio), GFP_KERNEL);
+ if (!audio)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, audio);
+
+ twl4030_audio_dev = pdev;
+ mutex_init(&audio->mutex);
+ audio->audio_mclk = pdata->audio_mclk;
+
+ /* Codec power */
+ audio->resource[TWL4030_AUDIO_RES_POWER].reg = TWL4030_REG_CODEC_MODE;
+ audio->resource[TWL4030_AUDIO_RES_POWER].mask = TWL4030_CODECPDZ;
+
+ /* PLL */
+ audio->resource[TWL4030_AUDIO_RES_APLL].reg = TWL4030_REG_APLL_CTL;
+ audio->resource[TWL4030_AUDIO_RES_APLL].mask = TWL4030_APLL_EN;
+
+ if (pdata->codec) {
+ cell = &audio->cells[childs];
+ cell->name = "twl4030-codec";
+ cell->platform_data = pdata->codec;
+ cell->pdata_size = sizeof(*pdata->codec);
+ childs++;
+ }
+ if (pdata->vibra) {
+ cell = &audio->cells[childs];
+ cell->name = "twl4030-vibra";
+ cell->platform_data = pdata->vibra;
+ cell->pdata_size = sizeof(*pdata->vibra);
+ childs++;
+ }
+
+ if (childs)
+ ret = mfd_add_devices(&pdev->dev, pdev->id, audio->cells,
+ childs, NULL, 0);
+ else {
+ dev_err(&pdev->dev, "No platform data found for childs\n");
+ ret = -ENODEV;
+ }
+
+ if (!ret)
+ return 0;
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(audio);
+ twl4030_audio_dev = NULL;
+ return ret;
+}
+
+static int __devexit twl4030_audio_remove(struct platform_device *pdev)
+{
+ struct twl4030_audio *audio = platform_get_drvdata(pdev);
+
+ mfd_remove_devices(&pdev->dev);
+ platform_set_drvdata(pdev, NULL);
+ kfree(audio);
+ twl4030_audio_dev = NULL;
+
+ return 0;
+}
+
+MODULE_ALIAS("platform:twl4030-audio");
+
+static struct platform_driver twl4030_audio_driver = {
+ .probe = twl4030_audio_probe,
+ .remove = __devexit_p(twl4030_audio_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "twl4030-audio",
+ },
+};
+
+static int __devinit twl4030_audio_init(void)
+{
+ return platform_driver_register(&twl4030_audio_driver);
+}
+module_init(twl4030_audio_init);
+
+static void __devexit twl4030_audio_exit(void)
+{
+ platform_driver_unregister(&twl4030_audio_driver);
+}
+module_exit(twl4030_audio_exit);
+
+MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/twl4030-codec.c b/drivers/mfd/twl4030-codec.c
deleted file mode 100644
index 2bf4136..0000000
--- a/drivers/mfd/twl4030-codec.c
+++ /dev/null
@@ -1,277 +0,0 @@
-/*
- * MFD driver for twl4030 codec submodule
- *
- * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
- *
- * Copyright: (C) 2009 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/platform_device.h>
-#include <linux/i2c/twl.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/twl4030-codec.h>
-
-#define TWL4030_CODEC_CELLS 2
-
-static struct platform_device *twl4030_codec_dev;
-
-struct twl4030_codec_resource {
- int request_count;
- u8 reg;
- u8 mask;
-};
-
-struct twl4030_codec {
- unsigned int audio_mclk;
- struct mutex mutex;
- struct twl4030_codec_resource resource[TWL4030_CODEC_RES_MAX];
- struct mfd_cell cells[TWL4030_CODEC_CELLS];
-};
-
-/*
- * Modify the resource, the function returns the content of the register
- * after the modification.
- */
-static int twl4030_codec_set_resource(enum twl4030_codec_res id, int enable)
-{
- struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
- u8 val;
-
- twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
- codec->resource[id].reg);
-
- if (enable)
- val |= codec->resource[id].mask;
- else
- val &= ~codec->resource[id].mask;
-
- twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
- val, codec->resource[id].reg);
-
- return val;
-}
-
-static inline int twl4030_codec_get_resource(enum twl4030_codec_res id)
-{
- struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
- u8 val;
-
- twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
- codec->resource[id].reg);
-
- return val;
-}
-
-/*
- * Enable the resource.
- * The function returns with error or the content of the register
- */
-int twl4030_codec_enable_resource(enum twl4030_codec_res id)
-{
- struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
- int val;
-
- if (id >= TWL4030_CODEC_RES_MAX) {
- dev_err(&twl4030_codec_dev->dev,
- "Invalid resource ID (%u)\n", id);
- return -EINVAL;
- }
-
- mutex_lock(&codec->mutex);
- if (!codec->resource[id].request_count)
- /* Resource was disabled, enable it */
- val = twl4030_codec_set_resource(id, 1);
- else
- val = twl4030_codec_get_resource(id);
-
- codec->resource[id].request_count++;
- mutex_unlock(&codec->mutex);
-
- return val;
-}
-EXPORT_SYMBOL_GPL(twl4030_codec_enable_resource);
-
-/*
- * Disable the resource.
- * The function returns with error or the content of the register
- */
-int twl4030_codec_disable_resource(unsigned id)
-{
- struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
- int val;
-
- if (id >= TWL4030_CODEC_RES_MAX) {
- dev_err(&twl4030_codec_dev->dev,
- "Invalid resource ID (%u)\n", id);
- return -EINVAL;
- }
-
- mutex_lock(&codec->mutex);
- if (!codec->resource[id].request_count) {
- dev_err(&twl4030_codec_dev->dev,
- "Resource has been disabled already (%u)\n", id);
- mutex_unlock(&codec->mutex);
- return -EPERM;
- }
- codec->resource[id].request_count--;
-
- if (!codec->resource[id].request_count)
- /* Resource can be disabled now */
- val = twl4030_codec_set_resource(id, 0);
- else
- val = twl4030_codec_get_resource(id);
-
- mutex_unlock(&codec->mutex);
-
- return val;
-}
-EXPORT_SYMBOL_GPL(twl4030_codec_disable_resource);
-
-unsigned int twl4030_codec_get_mclk(void)
-{
- struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
-
- return codec->audio_mclk;
-}
-EXPORT_SYMBOL_GPL(twl4030_codec_get_mclk);
-
-static int __devinit twl4030_codec_probe(struct platform_device *pdev)
-{
- struct twl4030_codec *codec;
- struct twl4030_codec_data *pdata = pdev->dev.platform_data;
- struct mfd_cell *cell = NULL;
- int ret, childs = 0;
- u8 val;
-
- if (!pdata) {
- dev_err(&pdev->dev, "Platform data is missing\n");
- return -EINVAL;
- }
-
- /* Configure APLL_INFREQ and disable APLL if enabled */
- val = 0;
- switch (pdata->audio_mclk) {
- case 19200000:
- val |= TWL4030_APLL_INFREQ_19200KHZ;
- break;
- case 26000000:
- val |= TWL4030_APLL_INFREQ_26000KHZ;
- break;
- case 38400000:
- val |= TWL4030_APLL_INFREQ_38400KHZ;
- break;
- default:
- dev_err(&pdev->dev, "Invalid audio_mclk\n");
- return -EINVAL;
- }
- twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
- val, TWL4030_REG_APLL_CTL);
-
- codec = kzalloc(sizeof(struct twl4030_codec), GFP_KERNEL);
- if (!codec)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, codec);
-
- twl4030_codec_dev = pdev;
- mutex_init(&codec->mutex);
- codec->audio_mclk = pdata->audio_mclk;
-
- /* Codec power */
- codec->resource[TWL4030_CODEC_RES_POWER].reg = TWL4030_REG_CODEC_MODE;
- codec->resource[TWL4030_CODEC_RES_POWER].mask = TWL4030_CODECPDZ;
-
- /* PLL */
- codec->resource[TWL4030_CODEC_RES_APLL].reg = TWL4030_REG_APLL_CTL;
- codec->resource[TWL4030_CODEC_RES_APLL].mask = TWL4030_APLL_EN;
-
- if (pdata->audio) {
- cell = &codec->cells[childs];
- cell->name = "twl4030-codec";
- cell->platform_data = pdata->audio;
- cell->pdata_size = sizeof(*pdata->audio);
- childs++;
- }
- if (pdata->vibra) {
- cell = &codec->cells[childs];
- cell->name = "twl4030-vibra";
- cell->platform_data = pdata->vibra;
- cell->pdata_size = sizeof(*pdata->vibra);
- childs++;
- }
-
- if (childs)
- ret = mfd_add_devices(&pdev->dev, pdev->id, codec->cells,
- childs, NULL, 0);
- else {
- dev_err(&pdev->dev, "No platform data found for childs\n");
- ret = -ENODEV;
- }
-
- if (!ret)
- return 0;
-
- platform_set_drvdata(pdev, NULL);
- kfree(codec);
- twl4030_codec_dev = NULL;
- return ret;
-}
-
-static int __devexit twl4030_codec_remove(struct platform_device *pdev)
-{
- struct twl4030_codec *codec = platform_get_drvdata(pdev);
-
- mfd_remove_devices(&pdev->dev);
- platform_set_drvdata(pdev, NULL);
- kfree(codec);
- twl4030_codec_dev = NULL;
-
- return 0;
-}
-
-MODULE_ALIAS("platform:twl4030-audio");
-
-static struct platform_driver twl4030_codec_driver = {
- .probe = twl4030_codec_probe,
- .remove = __devexit_p(twl4030_codec_remove),
- .driver = {
- .owner = THIS_MODULE,
- .name = "twl4030-audio",
- },
-};
-
-static int __devinit twl4030_codec_init(void)
-{
- return platform_driver_register(&twl4030_codec_driver);
-}
-module_init(twl4030_codec_init);
-
-static void __devexit twl4030_codec_exit(void)
-{
- platform_driver_unregister(&twl4030_codec_driver);
-}
-module_exit(twl4030_codec_exit);
-
-MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
index 3941ddc..b5d598c 100644
--- a/drivers/mfd/twl4030-madc.c
+++ b/drivers/mfd/twl4030-madc.c
@@ -530,13 +530,13 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
if (ret) {
dev_err(twl4030_madc->dev,
"unable to write sel register 0x%X\n", method->sel + 1);
- return ret;
+ goto out;
}
ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_lsb, method->sel);
if (ret) {
dev_err(twl4030_madc->dev,
"unable to write sel register 0x%X\n", method->sel + 1);
- return ret;
+ goto out;
}
/* Select averaging for all channels if do_avg is set */
if (req->do_avg) {
@@ -546,7 +546,7 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
dev_err(twl4030_madc->dev,
"unable to write avg register 0x%X\n",
method->avg + 1);
- return ret;
+ goto out;
}
ret = twl_i2c_write_u8(TWL4030_MODULE_MADC,
ch_lsb, method->avg);
@@ -554,7 +554,7 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
dev_err(twl4030_madc->dev,
"unable to write sel reg 0x%X\n",
method->sel + 1);
- return ret;
+ goto out;
}
}
if (req->type == TWL4030_MADC_IRQ_ONESHOT && req->func_cb != NULL) {
diff --git a/drivers/mfd/twl6030-pwm.c b/drivers/mfd/twl6030-pwm.c
index 5d25bdc..e8fee14 100644
--- a/drivers/mfd/twl6030-pwm.c
+++ b/drivers/mfd/twl6030-pwm.c
@@ -161,3 +161,5 @@ void pwm_free(struct pwm_device *pwm)
kfree(pwm);
}
EXPORT_SYMBOL(pwm_free);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040-core.c
new file mode 100644
index 0000000..24d436c
--- /dev/null
+++ b/drivers/mfd/twl6040-core.c
@@ -0,0 +1,620 @@
+/*
+ * MFD driver for TWL6040 audio device
+ *
+ * Authors: Misael Lopez Cruz <misael.lopez@ti.com>
+ * Jorge Eduardo Candelaria <jorge.candelaria@ti.com>
+ * Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * Copyright: (C) 2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/i2c/twl.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/twl6040.h>
+
+static struct platform_device *twl6040_dev;
+
+int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg)
+{
+ int ret;
+ u8 val = 0;
+
+ mutex_lock(&twl6040->io_mutex);
+ ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg);
+ if (ret < 0) {
+ mutex_unlock(&twl6040->io_mutex);
+ return ret;
+ }
+ mutex_unlock(&twl6040->io_mutex);
+
+ return val;
+}
+EXPORT_SYMBOL(twl6040_reg_read);
+
+int twl6040_reg_write(struct twl6040 *twl6040, unsigned int reg, u8 val)
+{
+ int ret;
+
+ mutex_lock(&twl6040->io_mutex);
+ ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
+ mutex_unlock(&twl6040->io_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(twl6040_reg_write);
+
+int twl6040_set_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask)
+{
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl6040->io_mutex);
+ ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg);
+ if (ret)
+ goto out;
+
+ val |= mask;
+ ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
+out:
+ mutex_unlock(&twl6040->io_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(twl6040_set_bits);
+
+int twl6040_clear_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask)
+{
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl6040->io_mutex);
+ ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg);
+ if (ret)
+ goto out;
+
+ val &= ~mask;
+ ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
+out:
+ mutex_unlock(&twl6040->io_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(twl6040_clear_bits);
+
+/* twl6040 codec manual power-up sequence */
+static int twl6040_power_up(struct twl6040 *twl6040)
+{
+ u8 ldoctl, ncpctl, lppllctl;
+ int ret;
+
+ /* enable high-side LDO, reference system and internal oscillator */
+ ldoctl = TWL6040_HSLDOENA | TWL6040_REFENA | TWL6040_OSCENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ if (ret)
+ return ret;
+ usleep_range(10000, 10500);
+
+ /* enable negative charge pump */
+ ncpctl = TWL6040_NCPENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_NCPCTL, ncpctl);
+ if (ret)
+ goto ncp_err;
+ usleep_range(1000, 1500);
+
+ /* enable low-side LDO */
+ ldoctl |= TWL6040_LSLDOENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ if (ret)
+ goto lsldo_err;
+ usleep_range(1000, 1500);
+
+ /* enable low-power PLL */
+ lppllctl = TWL6040_LPLLENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+ if (ret)
+ goto lppll_err;
+ usleep_range(5000, 5500);
+
+ /* disable internal oscillator */
+ ldoctl &= ~TWL6040_OSCENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ if (ret)
+ goto osc_err;
+
+ return 0;
+
+osc_err:
+ lppllctl &= ~TWL6040_LPLLENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+lppll_err:
+ ldoctl &= ~TWL6040_LSLDOENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+lsldo_err:
+ ncpctl &= ~TWL6040_NCPENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_NCPCTL, ncpctl);
+ncp_err:
+ ldoctl &= ~(TWL6040_HSLDOENA | TWL6040_REFENA | TWL6040_OSCENA);
+ twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+
+ return ret;
+}
+
+/* twl6040 manual power-down sequence */
+static void twl6040_power_down(struct twl6040 *twl6040)
+{
+ u8 ncpctl, ldoctl, lppllctl;
+
+ ncpctl = twl6040_reg_read(twl6040, TWL6040_REG_NCPCTL);
+ ldoctl = twl6040_reg_read(twl6040, TWL6040_REG_LDOCTL);
+ lppllctl = twl6040_reg_read(twl6040, TWL6040_REG_LPPLLCTL);
+
+ /* enable internal oscillator */
+ ldoctl |= TWL6040_OSCENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ usleep_range(1000, 1500);
+
+ /* disable low-power PLL */
+ lppllctl &= ~TWL6040_LPLLENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+
+ /* disable low-side LDO */
+ ldoctl &= ~TWL6040_LSLDOENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+
+ /* disable negative charge pump */
+ ncpctl &= ~TWL6040_NCPENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_NCPCTL, ncpctl);
+
+ /* disable high-side LDO, reference system and internal oscillator */
+ ldoctl &= ~(TWL6040_HSLDOENA | TWL6040_REFENA | TWL6040_OSCENA);
+ twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+}
+
+static irqreturn_t twl6040_naudint_handler(int irq, void *data)
+{
+ struct twl6040 *twl6040 = data;
+ u8 intid, status;
+
+ intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
+
+ if (intid & TWL6040_READYINT)
+ complete(&twl6040->ready);
+
+ if (intid & TWL6040_THINT) {
+ status = twl6040_reg_read(twl6040, TWL6040_REG_STATUS);
+ if (status & TWL6040_TSHUTDET) {
+ dev_warn(&twl6040_dev->dev,
+ "Thermal shutdown, powering-off");
+ twl6040_power(twl6040, 0);
+ } else {
+ dev_warn(&twl6040_dev->dev,
+ "Leaving thermal shutdown, powering-on");
+ twl6040_power(twl6040, 1);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int twl6040_power_up_completion(struct twl6040 *twl6040,
+ int naudint)
+{
+ int time_left;
+ u8 intid;
+
+ time_left = wait_for_completion_timeout(&twl6040->ready,
+ msecs_to_jiffies(144));
+ if (!time_left) {
+ intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
+ if (!(intid & TWL6040_READYINT)) {
+ dev_err(&twl6040_dev->dev,
+ "timeout waiting for READYINT\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+int twl6040_power(struct twl6040 *twl6040, int on)
+{
+ int audpwron = twl6040->audpwron;
+ int naudint = twl6040->irq;
+ int ret = 0;
+
+ mutex_lock(&twl6040->mutex);
+
+ if (on) {
+ /* already powered-up */
+ if (twl6040->power_count++)
+ goto out;
+
+ if (gpio_is_valid(audpwron)) {
+ /* use AUDPWRON line */
+ gpio_set_value(audpwron, 1);
+ /* wait for power-up completion */
+ ret = twl6040_power_up_completion(twl6040, naudint);
+ if (ret) {
+ dev_err(&twl6040_dev->dev,
+ "automatic power-down failed\n");
+ twl6040->power_count = 0;
+ goto out;
+ }
+ } else {
+ /* use manual power-up sequence */
+ ret = twl6040_power_up(twl6040);
+ if (ret) {
+ dev_err(&twl6040_dev->dev,
+ "manual power-up failed\n");
+ twl6040->power_count = 0;
+ goto out;
+ }
+ }
+ /* Default PLL configuration after power up */
+ twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL;
+ twl6040->sysclk = 19200000;
+ } else {
+ /* already powered-down */
+ if (!twl6040->power_count) {
+ dev_err(&twl6040_dev->dev,
+ "device is already powered-off\n");
+ ret = -EPERM;
+ goto out;
+ }
+
+ if (--twl6040->power_count)
+ goto out;
+
+ if (gpio_is_valid(audpwron)) {
+ /* use AUDPWRON line */
+ gpio_set_value(audpwron, 0);
+
+ /* power-down sequence latency */
+ usleep_range(500, 700);
+ } else {
+ /* use manual power-down sequence */
+ twl6040_power_down(twl6040);
+ }
+ twl6040->sysclk = 0;
+ }
+
+out:
+ mutex_unlock(&twl6040->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(twl6040_power);
+
+int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
+ unsigned int freq_in, unsigned int freq_out)
+{
+ u8 hppllctl, lppllctl;
+ int ret = 0;
+
+ mutex_lock(&twl6040->mutex);
+
+ hppllctl = twl6040_reg_read(twl6040, TWL6040_REG_HPPLLCTL);
+ lppllctl = twl6040_reg_read(twl6040, TWL6040_REG_LPPLLCTL);
+
+ switch (pll_id) {
+ case TWL6040_SYSCLK_SEL_LPPLL:
+ /* low-power PLL divider */
+ switch (freq_out) {
+ case 17640000:
+ lppllctl |= TWL6040_LPLLFIN;
+ break;
+ case 19200000:
+ lppllctl &= ~TWL6040_LPLLFIN;
+ break;
+ default:
+ dev_err(&twl6040_dev->dev,
+ "freq_out %d not supported\n", freq_out);
+ ret = -EINVAL;
+ goto pll_out;
+ }
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+
+ switch (freq_in) {
+ case 32768:
+ lppllctl |= TWL6040_LPLLENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL,
+ lppllctl);
+ mdelay(5);
+ lppllctl &= ~TWL6040_HPLLSEL;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL,
+ lppllctl);
+ hppllctl &= ~TWL6040_HPLLENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_HPPLLCTL,
+ hppllctl);
+ break;
+ default:
+ dev_err(&twl6040_dev->dev,
+ "freq_in %d not supported\n", freq_in);
+ ret = -EINVAL;
+ goto pll_out;
+ }
+ break;
+ case TWL6040_SYSCLK_SEL_HPPLL:
+ /* high-performance PLL can provide only 19.2 MHz */
+ if (freq_out != 19200000) {
+ dev_err(&twl6040_dev->dev,
+ "freq_out %d not supported\n", freq_out);
+ ret = -EINVAL;
+ goto pll_out;
+ }
+
+ hppllctl &= ~TWL6040_MCLK_MSK;
+
+ switch (freq_in) {
+ case 12000000:
+ /* PLL enabled, active mode */
+ hppllctl |= TWL6040_MCLK_12000KHZ |
+ TWL6040_HPLLENA;
+ break;
+ case 19200000:
+ /*
+ * PLL disabled
+ * (enable PLL if MCLK jitter quality
+ * doesn't meet specification)
+ */
+ hppllctl |= TWL6040_MCLK_19200KHZ;
+ break;
+ case 26000000:
+ /* PLL enabled, active mode */
+ hppllctl |= TWL6040_MCLK_26000KHZ |
+ TWL6040_HPLLENA;
+ break;
+ case 38400000:
+ /* PLL enabled, active mode */
+ hppllctl |= TWL6040_MCLK_38400KHZ |
+ TWL6040_HPLLENA;
+ break;
+ default:
+ dev_err(&twl6040_dev->dev,
+ "freq_in %d not supported\n", freq_in);
+ ret = -EINVAL;
+ goto pll_out;
+ }
+
+ /* enable clock slicer to ensure input waveform is square */
+ hppllctl |= TWL6040_HPLLSQRENA;
+
+ twl6040_reg_write(twl6040, TWL6040_REG_HPPLLCTL, hppllctl);
+ usleep_range(500, 700);
+ lppllctl |= TWL6040_HPLLSEL;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+ lppllctl &= ~TWL6040_LPLLENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+ break;
+ default:
+ dev_err(&twl6040_dev->dev, "unknown pll id %d\n", pll_id);
+ ret = -EINVAL;
+ goto pll_out;
+ }
+
+ twl6040->sysclk = freq_out;
+ twl6040->pll = pll_id;
+
+pll_out:
+ mutex_unlock(&twl6040->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(twl6040_set_pll);
+
+int twl6040_get_pll(struct twl6040 *twl6040)
+{
+ if (twl6040->power_count)
+ return twl6040->pll;
+ else
+ return -ENODEV;
+}
+EXPORT_SYMBOL(twl6040_get_pll);
+
+unsigned int twl6040_get_sysclk(struct twl6040 *twl6040)
+{
+ return twl6040->sysclk;
+}
+EXPORT_SYMBOL(twl6040_get_sysclk);
+
+static struct resource twl6040_vibra_rsrc[] = {
+ {
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource twl6040_codec_rsrc[] = {
+ {
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static int __devinit twl6040_probe(struct platform_device *pdev)
+{
+ struct twl4030_audio_data *pdata = pdev->dev.platform_data;
+ struct twl6040 *twl6040;
+ struct mfd_cell *cell = NULL;
+ int ret, children = 0;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "Platform data is missing\n");
+ return -EINVAL;
+ }
+
+ /* In order to operate correctly we need valid interrupt config */
+ if (!pdata->naudint_irq || !pdata->irq_base) {
+ dev_err(&pdev->dev, "Invalid IRQ configuration\n");
+ return -EINVAL;
+ }
+
+ twl6040 = kzalloc(sizeof(struct twl6040), GFP_KERNEL);
+ if (!twl6040)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, twl6040);
+
+ twl6040_dev = pdev;
+ twl6040->dev = &pdev->dev;
+ twl6040->audpwron = pdata->audpwron_gpio;
+ twl6040->irq = pdata->naudint_irq;
+ twl6040->irq_base = pdata->irq_base;
+
+ mutex_init(&twl6040->mutex);
+ mutex_init(&twl6040->io_mutex);
+ init_completion(&twl6040->ready);
+
+ twl6040->rev = twl6040_reg_read(twl6040, TWL6040_REG_ASICREV);
+
+ if (gpio_is_valid(twl6040->audpwron)) {
+ ret = gpio_request(twl6040->audpwron, "audpwron");
+ if (ret)
+ goto gpio1_err;
+
+ ret = gpio_direction_output(twl6040->audpwron, 0);
+ if (ret)
+ goto gpio2_err;
+ }
+
+ /* ERRATA: Automatic power-up is not possible in ES1.0 */
+ if (twl6040->rev == TWL6040_REV_ES1_0)
+ twl6040->audpwron = -EINVAL;
+
+ /* codec interrupt */
+ ret = twl6040_irq_init(twl6040);
+ if (ret)
+ goto gpio2_err;
+
+ ret = request_threaded_irq(twl6040->irq_base + TWL6040_IRQ_READY,
+ NULL, twl6040_naudint_handler, 0,
+ "twl6040_irq_ready", twl6040);
+ if (ret) {
+ dev_err(twl6040->dev, "READY IRQ request failed: %d\n",
+ ret);
+ goto irq_err;
+ }
+
+ /* dual-access registers controlled by I2C only */
+ twl6040_set_bits(twl6040, TWL6040_REG_ACCCTL, TWL6040_I2CSEL);
+
+ if (pdata->codec) {
+ int irq = twl6040->irq_base + TWL6040_IRQ_PLUG;
+
+ cell = &twl6040->cells[children];
+ cell->name = "twl6040-codec";
+ twl6040_codec_rsrc[0].start = irq;
+ twl6040_codec_rsrc[0].end = irq;
+ cell->resources = twl6040_codec_rsrc;
+ cell->num_resources = ARRAY_SIZE(twl6040_codec_rsrc);
+ cell->platform_data = pdata->codec;
+ cell->pdata_size = sizeof(*pdata->codec);
+ children++;
+ }
+
+ if (pdata->vibra) {
+ int irq = twl6040->irq_base + TWL6040_IRQ_VIB;
+
+ cell = &twl6040->cells[children];
+ cell->name = "twl6040-vibra";
+ twl6040_vibra_rsrc[0].start = irq;
+ twl6040_vibra_rsrc[0].end = irq;
+ cell->resources = twl6040_vibra_rsrc;
+ cell->num_resources = ARRAY_SIZE(twl6040_vibra_rsrc);
+
+ cell->platform_data = pdata->vibra;
+ cell->pdata_size = sizeof(*pdata->vibra);
+ children++;
+ }
+
+ if (children) {
+ ret = mfd_add_devices(&pdev->dev, pdev->id, twl6040->cells,
+ children, NULL, 0);
+ if (ret)
+ goto mfd_err;
+ } else {
+ dev_err(&pdev->dev, "No platform data found for children\n");
+ ret = -ENODEV;
+ goto mfd_err;
+ }
+
+ return 0;
+
+mfd_err:
+ free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
+irq_err:
+ twl6040_irq_exit(twl6040);
+gpio2_err:
+ if (gpio_is_valid(twl6040->audpwron))
+ gpio_free(twl6040->audpwron);
+gpio1_err:
+ platform_set_drvdata(pdev, NULL);
+ kfree(twl6040);
+ twl6040_dev = NULL;
+ return ret;
+}
+
+static int __devexit twl6040_remove(struct platform_device *pdev)
+{
+ struct twl6040 *twl6040 = platform_get_drvdata(pdev);
+
+ if (twl6040->power_count)
+ twl6040_power(twl6040, 0);
+
+ if (gpio_is_valid(twl6040->audpwron))
+ gpio_free(twl6040->audpwron);
+
+ free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
+ twl6040_irq_exit(twl6040);
+
+ mfd_remove_devices(&pdev->dev);
+ platform_set_drvdata(pdev, NULL);
+ kfree(twl6040);
+ twl6040_dev = NULL;
+
+ return 0;
+}
+
+static struct platform_driver twl6040_driver = {
+ .probe = twl6040_probe,
+ .remove = __devexit_p(twl6040_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "twl6040",
+ },
+};
+
+static int __devinit twl6040_init(void)
+{
+ return platform_driver_register(&twl6040_driver);
+}
+module_init(twl6040_init);
+
+static void __devexit twl6040_exit(void)
+{
+ platform_driver_unregister(&twl6040_driver);
+}
+
+module_exit(twl6040_exit);
+
+MODULE_DESCRIPTION("TWL6040 MFD");
+MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>");
+MODULE_AUTHOR("Jorge Eduardo Candelaria <jorge.candelaria@ti.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:twl6040");
diff --git a/drivers/mfd/twl6040-irq.c b/drivers/mfd/twl6040-irq.c
new file mode 100644
index 0000000..b3f8dda
--- /dev/null
+++ b/drivers/mfd/twl6040-irq.c
@@ -0,0 +1,191 @@
+/*
+ * Interrupt controller support for TWL6040
+ *
+ * Author: Misael Lopez Cruz <misael.lopez@ti.com>
+ *
+ * Copyright: (C) 2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/twl6040.h>
+
+struct twl6040_irq_data {
+ int mask;
+ int status;
+};
+
+static struct twl6040_irq_data twl6040_irqs[] = {
+ {
+ .mask = TWL6040_THMSK,
+ .status = TWL6040_THINT,
+ },
+ {
+ .mask = TWL6040_PLUGMSK,
+ .status = TWL6040_PLUGINT | TWL6040_UNPLUGINT,
+ },
+ {
+ .mask = TWL6040_HOOKMSK,
+ .status = TWL6040_HOOKINT,
+ },
+ {
+ .mask = TWL6040_HFMSK,
+ .status = TWL6040_HFINT,
+ },
+ {
+ .mask = TWL6040_VIBMSK,
+ .status = TWL6040_VIBINT,
+ },
+ {
+ .mask = TWL6040_READYMSK,
+ .status = TWL6040_READYINT,
+ },
+};
+
+static inline
+struct twl6040_irq_data *irq_to_twl6040_irq(struct twl6040 *twl6040,
+ int irq)
+{
+ return &twl6040_irqs[irq - twl6040->irq_base];
+}
+
+static void twl6040_irq_lock(struct irq_data *data)
+{
+ struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&twl6040->irq_mutex);
+}
+
+static void twl6040_irq_sync_unlock(struct irq_data *data)
+{
+ struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
+
+ /* write back to hardware any change in irq mask */
+ if (twl6040->irq_masks_cur != twl6040->irq_masks_cache) {
+ twl6040->irq_masks_cache = twl6040->irq_masks_cur;
+ twl6040_reg_write(twl6040, TWL6040_REG_INTMR,
+ twl6040->irq_masks_cur);
+ }
+
+ mutex_unlock(&twl6040->irq_mutex);
+}
+
+static void twl6040_irq_enable(struct irq_data *data)
+{
+ struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
+ struct twl6040_irq_data *irq_data = irq_to_twl6040_irq(twl6040,
+ data->irq);
+
+ twl6040->irq_masks_cur &= ~irq_data->mask;
+}
+
+static void twl6040_irq_disable(struct irq_data *data)
+{
+ struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
+ struct twl6040_irq_data *irq_data = irq_to_twl6040_irq(twl6040,
+ data->irq);
+
+ twl6040->irq_masks_cur |= irq_data->mask;
+}
+
+static struct irq_chip twl6040_irq_chip = {
+ .name = "twl6040",
+ .irq_bus_lock = twl6040_irq_lock,
+ .irq_bus_sync_unlock = twl6040_irq_sync_unlock,
+ .irq_enable = twl6040_irq_enable,
+ .irq_disable = twl6040_irq_disable,
+};
+
+static irqreturn_t twl6040_irq_thread(int irq, void *data)
+{
+ struct twl6040 *twl6040 = data;
+ u8 intid;
+ int i;
+
+ intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
+
+ /* apply masking and report (backwards to handle READYINT first) */
+ for (i = ARRAY_SIZE(twl6040_irqs) - 1; i >= 0; i--) {
+ if (twl6040->irq_masks_cur & twl6040_irqs[i].mask)
+ intid &= ~twl6040_irqs[i].status;
+ if (intid & twl6040_irqs[i].status)
+ handle_nested_irq(twl6040->irq_base + i);
+ }
+
+ /* ack unmasked irqs */
+ twl6040_reg_write(twl6040, TWL6040_REG_INTID, intid);
+
+ return IRQ_HANDLED;
+}
+
+int twl6040_irq_init(struct twl6040 *twl6040)
+{
+ int cur_irq, ret;
+ u8 val;
+
+ mutex_init(&twl6040->irq_mutex);
+
+ /* mask the individual interrupt sources */
+ twl6040->irq_masks_cur = TWL6040_ALLINT_MSK;
+ twl6040->irq_masks_cache = TWL6040_ALLINT_MSK;
+ twl6040_reg_write(twl6040, TWL6040_REG_INTMR, TWL6040_ALLINT_MSK);
+
+ /* Register them with genirq */
+ for (cur_irq = twl6040->irq_base;
+ cur_irq < twl6040->irq_base + ARRAY_SIZE(twl6040_irqs);
+ cur_irq++) {
+ irq_set_chip_data(cur_irq, twl6040);
+ irq_set_chip_and_handler(cur_irq, &twl6040_irq_chip,
+ handle_level_irq);
+ irq_set_nested_thread(cur_irq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ irq_set_noprobe(cur_irq);
+#endif
+ }
+
+ ret = request_threaded_irq(twl6040->irq, NULL, twl6040_irq_thread,
+ IRQF_ONESHOT, "twl6040", twl6040);
+ if (ret) {
+ dev_err(twl6040->dev, "failed to request IRQ %d: %d\n",
+ twl6040->irq, ret);
+ return ret;
+ }
+
+ /* reset interrupts */
+ val = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
+
+ /* interrupts cleared on write */
+ twl6040_clear_bits(twl6040, TWL6040_REG_ACCCTL, TWL6040_INTCLRMODE);
+
+ return 0;
+}
+EXPORT_SYMBOL(twl6040_irq_init);
+
+void twl6040_irq_exit(struct twl6040 *twl6040)
+{
+ free_irq(twl6040->irq, twl6040);
+}
+EXPORT_SYMBOL(twl6040_irq_exit);
diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c
new file mode 100644
index 0000000..8721095
--- /dev/null
+++ b/drivers/mfd/wm831x-auxadc.c
@@ -0,0 +1,299 @@
+/*
+ * wm831x-auxadc.c -- AUXADC for Wolfson WM831x PMICs
+ *
+ * Copyright 2009-2011 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mfd/core.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+
+#include <linux/mfd/wm831x/core.h>
+#include <linux/mfd/wm831x/pdata.h>
+#include <linux/mfd/wm831x/irq.h>
+#include <linux/mfd/wm831x/auxadc.h>
+#include <linux/mfd/wm831x/otp.h>
+#include <linux/mfd/wm831x/regulator.h>
+
+struct wm831x_auxadc_req {
+ struct list_head list;
+ enum wm831x_auxadc input;
+ int val;
+ struct completion done;
+};
+
+static int wm831x_auxadc_read_irq(struct wm831x *wm831x,
+ enum wm831x_auxadc input)
+{
+ struct wm831x_auxadc_req *req;
+ int ret;
+ bool ena = false;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ init_completion(&req->done);
+ req->input = input;
+ req->val = -ETIMEDOUT;
+
+ mutex_lock(&wm831x->auxadc_lock);
+
+ /* Enqueue the request */
+ list_add(&req->list, &wm831x->auxadc_pending);
+
+ ena = !wm831x->auxadc_active;
+
+ if (ena) {
+ ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
+ WM831X_AUX_ENA, WM831X_AUX_ENA);
+ if (ret != 0) {
+ dev_err(wm831x->dev, "Failed to enable AUXADC: %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ /* Enable the conversion if not already running */
+ if (!(wm831x->auxadc_active & (1 << input))) {
+ ret = wm831x_set_bits(wm831x, WM831X_AUXADC_SOURCE,
+ 1 << input, 1 << input);
+ if (ret != 0) {
+ dev_err(wm831x->dev,
+ "Failed to set AUXADC source: %d\n", ret);
+ goto out;
+ }
+
+ wm831x->auxadc_active |= 1 << input;
+ }
+
+ /* We convert at the fastest rate possible */
+ if (ena) {
+ ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
+ WM831X_AUX_CVT_ENA |
+ WM831X_AUX_RATE_MASK,
+ WM831X_AUX_CVT_ENA |
+ WM831X_AUX_RATE_MASK);
+ if (ret != 0) {
+ dev_err(wm831x->dev, "Failed to start AUXADC: %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ mutex_unlock(&wm831x->auxadc_lock);
+
+ /* Wait for an interrupt */
+ wait_for_completion_timeout(&req->done, msecs_to_jiffies(500));
+
+ mutex_lock(&wm831x->auxadc_lock);
+
+ list_del(&req->list);
+ ret = req->val;
+
+out:
+ mutex_unlock(&wm831x->auxadc_lock);
+
+ kfree(req);
+
+ return ret;
+}
+
+static irqreturn_t wm831x_auxadc_irq(int irq, void *irq_data)
+{
+ struct wm831x *wm831x = irq_data;
+ struct wm831x_auxadc_req *req;
+ int ret, input, val;
+
+ ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA);
+ if (ret < 0) {
+ dev_err(wm831x->dev,
+ "Failed to read AUXADC data: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ input = ((ret & WM831X_AUX_DATA_SRC_MASK)
+ >> WM831X_AUX_DATA_SRC_SHIFT) - 1;
+
+ if (input == 14)
+ input = WM831X_AUX_CAL;
+
+ val = ret & WM831X_AUX_DATA_MASK;
+
+ mutex_lock(&wm831x->auxadc_lock);
+
+ /* Disable this conversion, we're about to complete all users */
+ wm831x_set_bits(wm831x, WM831X_AUXADC_SOURCE,
+ 1 << input, 0);
+ wm831x->auxadc_active &= ~(1 << input);
+
+ /* Turn off the entire convertor if idle */
+ if (!wm831x->auxadc_active)
+ wm831x_reg_write(wm831x, WM831X_AUXADC_CONTROL, 0);
+
+ /* Wake up any threads waiting for this request */
+ list_for_each_entry(req, &wm831x->auxadc_pending, list) {
+ if (req->input == input) {
+ req->val = val;
+ complete(&req->done);
+ }
+ }
+
+ mutex_unlock(&wm831x->auxadc_lock);
+
+ return IRQ_HANDLED;
+}
+
+static int wm831x_auxadc_read_polled(struct wm831x *wm831x,
+ enum wm831x_auxadc input)
+{
+ int ret, src, timeout;
+
+ mutex_lock(&wm831x->auxadc_lock);
+
+ ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
+ WM831X_AUX_ENA, WM831X_AUX_ENA);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to enable AUXADC: %d\n", ret);
+ goto out;
+ }
+
+ /* We force a single source at present */
+ src = input;
+ ret = wm831x_reg_write(wm831x, WM831X_AUXADC_SOURCE,
+ 1 << src);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to set AUXADC source: %d\n", ret);
+ goto out;
+ }
+
+ ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
+ WM831X_AUX_CVT_ENA, WM831X_AUX_CVT_ENA);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to start AUXADC: %d\n", ret);
+ goto disable;
+ }
+
+ /* If we're not using interrupts then poll the
+ * interrupt status register */
+ timeout = 5;
+ while (timeout) {
+ msleep(1);
+
+ ret = wm831x_reg_read(wm831x,
+ WM831X_INTERRUPT_STATUS_1);
+ if (ret < 0) {
+ dev_err(wm831x->dev,
+ "ISR 1 read failed: %d\n", ret);
+ goto disable;
+ }
+
+ /* Did it complete? */
+ if (ret & WM831X_AUXADC_DATA_EINT) {
+ wm831x_reg_write(wm831x,
+ WM831X_INTERRUPT_STATUS_1,
+ WM831X_AUXADC_DATA_EINT);
+ break;
+ } else {
+ dev_err(wm831x->dev,
+ "AUXADC conversion timeout\n");
+ ret = -EBUSY;
+ goto disable;
+ }
+ }
+
+ ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA);
+ if (ret < 0) {
+ dev_err(wm831x->dev,
+ "Failed to read AUXADC data: %d\n", ret);
+ goto disable;
+ }
+
+ src = ((ret & WM831X_AUX_DATA_SRC_MASK)
+ >> WM831X_AUX_DATA_SRC_SHIFT) - 1;
+
+ if (src == 14)
+ src = WM831X_AUX_CAL;
+
+ if (src != input) {
+ dev_err(wm831x->dev, "Data from source %d not %d\n",
+ src, input);
+ ret = -EINVAL;
+ } else {
+ ret &= WM831X_AUX_DATA_MASK;
+ }
+
+disable:
+ wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_ENA, 0);
+out:
+ mutex_unlock(&wm831x->auxadc_lock);
+ return ret;
+}
+
+/**
+ * wm831x_auxadc_read: Read a value from the WM831x AUXADC
+ *
+ * @wm831x: Device to read from.
+ * @input: AUXADC input to read.
+ */
+int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
+{
+ return wm831x->auxadc_read(wm831x, input);
+}
+EXPORT_SYMBOL_GPL(wm831x_auxadc_read);
+
+/**
+ * wm831x_auxadc_read_uv: Read a voltage from the WM831x AUXADC
+ *
+ * @wm831x: Device to read from.
+ * @input: AUXADC input to read.
+ */
+int wm831x_auxadc_read_uv(struct wm831x *wm831x, enum wm831x_auxadc input)
+{
+ int ret;
+
+ ret = wm831x_auxadc_read(wm831x, input);
+ if (ret < 0)
+ return ret;
+
+ ret *= 1465;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(wm831x_auxadc_read_uv);
+
+void wm831x_auxadc_init(struct wm831x *wm831x)
+{
+ int ret;
+
+ mutex_init(&wm831x->auxadc_lock);
+ INIT_LIST_HEAD(&wm831x->auxadc_pending);
+
+ if (wm831x->irq && wm831x->irq_base) {
+ wm831x->auxadc_read = wm831x_auxadc_read_irq;
+
+ ret = request_threaded_irq(wm831x->irq_base +
+ WM831X_IRQ_AUXADC_DATA,
+ NULL, wm831x_auxadc_irq, 0,
+ "auxadc", wm831x);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "AUXADC IRQ request failed: %d\n",
+ ret);
+ wm831x->auxadc_read = NULL;
+ }
+ }
+
+ if (!wm831x->auxadc_read)
+ wm831x->auxadc_read = wm831x_auxadc_read_polled;
+}
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 265f75f..282e76a 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -295,7 +295,7 @@ int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg,
goto out;
r &= ~mask;
- r |= val;
+ r |= val & mask;
ret = wm831x_write(wm831x, reg, 2, &r);
@@ -306,146 +306,6 @@ out:
}
EXPORT_SYMBOL_GPL(wm831x_set_bits);
-/**
- * wm831x_auxadc_read: Read a value from the WM831x AUXADC
- *
- * @wm831x: Device to read from.
- * @input: AUXADC input to read.
- */
-int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
-{
- int ret, src, irq_masked, timeout;
-
- /* Are we using the interrupt? */
- irq_masked = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_1_MASK);
- irq_masked &= WM831X_AUXADC_DATA_EINT;
-
- mutex_lock(&wm831x->auxadc_lock);
-
- ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
- WM831X_AUX_ENA, WM831X_AUX_ENA);
- if (ret < 0) {
- dev_err(wm831x->dev, "Failed to enable AUXADC: %d\n", ret);
- goto out;
- }
-
- /* We force a single source at present */
- src = input;
- ret = wm831x_reg_write(wm831x, WM831X_AUXADC_SOURCE,
- 1 << src);
- if (ret < 0) {
- dev_err(wm831x->dev, "Failed to set AUXADC source: %d\n", ret);
- goto out;
- }
-
- /* Clear any notification from a very late arriving interrupt */
- try_wait_for_completion(&wm831x->auxadc_done);
-
- ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
- WM831X_AUX_CVT_ENA, WM831X_AUX_CVT_ENA);
- if (ret < 0) {
- dev_err(wm831x->dev, "Failed to start AUXADC: %d\n", ret);
- goto disable;
- }
-
- if (irq_masked) {
- /* If we're not using interrupts then poll the
- * interrupt status register */
- timeout = 5;
- while (timeout) {
- msleep(1);
-
- ret = wm831x_reg_read(wm831x,
- WM831X_INTERRUPT_STATUS_1);
- if (ret < 0) {
- dev_err(wm831x->dev,
- "ISR 1 read failed: %d\n", ret);
- goto disable;
- }
-
- /* Did it complete? */
- if (ret & WM831X_AUXADC_DATA_EINT) {
- wm831x_reg_write(wm831x,
- WM831X_INTERRUPT_STATUS_1,
- WM831X_AUXADC_DATA_EINT);
- break;
- } else {
- dev_err(wm831x->dev,
- "AUXADC conversion timeout\n");
- ret = -EBUSY;
- goto disable;
- }
- }
- } else {
- /* If we are using interrupts then wait for the
- * interrupt to complete. Use an extremely long
- * timeout to handle situations with heavy load where
- * the notification of the interrupt may be delayed by
- * threaded IRQ handling. */
- if (!wait_for_completion_timeout(&wm831x->auxadc_done,
- msecs_to_jiffies(500))) {
- dev_err(wm831x->dev, "Timed out waiting for AUXADC\n");
- ret = -EBUSY;
- goto disable;
- }
- }
-
- ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA);
- if (ret < 0) {
- dev_err(wm831x->dev, "Failed to read AUXADC data: %d\n", ret);
- } else {
- src = ((ret & WM831X_AUX_DATA_SRC_MASK)
- >> WM831X_AUX_DATA_SRC_SHIFT) - 1;
-
- if (src == 14)
- src = WM831X_AUX_CAL;
-
- if (src != input) {
- dev_err(wm831x->dev, "Data from source %d not %d\n",
- src, input);
- ret = -EINVAL;
- } else {
- ret &= WM831X_AUX_DATA_MASK;
- }
- }
-
-disable:
- wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_ENA, 0);
-out:
- mutex_unlock(&wm831x->auxadc_lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(wm831x_auxadc_read);
-
-static irqreturn_t wm831x_auxadc_irq(int irq, void *irq_data)
-{
- struct wm831x *wm831x = irq_data;
-
- complete(&wm831x->auxadc_done);
-
- return IRQ_HANDLED;
-}
-
-/**
- * wm831x_auxadc_read_uv: Read a voltage from the WM831x AUXADC
- *
- * @wm831x: Device to read from.
- * @input: AUXADC input to read.
- */
-int wm831x_auxadc_read_uv(struct wm831x *wm831x, enum wm831x_auxadc input)
-{
- int ret;
-
- ret = wm831x_auxadc_read(wm831x, input);
- if (ret < 0)
- return ret;
-
- ret *= 1465;
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(wm831x_auxadc_read_uv);
-
static struct resource wm831x_dcdc1_resources[] = {
{
.start = WM831X_DC1_CONTROL_1,
@@ -872,6 +732,9 @@ static struct mfd_cell wm8310_devs[] = {
.resources = wm831x_dcdc4_resources,
},
{
+ .name = "wm831x-clk",
+ },
+ {
.name = "wm831x-epe",
.id = 1,
},
@@ -976,11 +839,6 @@ static struct mfd_cell wm8310_devs[] = {
.resources = wm831x_power_resources,
},
{
- .name = "wm831x-rtc",
- .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
- .resources = wm831x_rtc_resources,
- },
- {
.name = "wm831x-status",
.id = 1,
.num_resources = ARRAY_SIZE(wm831x_status1_resources),
@@ -1028,6 +886,9 @@ static struct mfd_cell wm8311_devs[] = {
.resources = wm831x_dcdc4_resources,
},
{
+ .name = "wm831x-clk",
+ },
+ {
.name = "wm831x-epe",
.id = 1,
},
@@ -1108,11 +969,6 @@ static struct mfd_cell wm8311_devs[] = {
.resources = wm831x_power_resources,
},
{
- .name = "wm831x-rtc",
- .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
- .resources = wm831x_rtc_resources,
- },
- {
.name = "wm831x-status",
.id = 1,
.num_resources = ARRAY_SIZE(wm831x_status1_resources),
@@ -1125,11 +981,6 @@ static struct mfd_cell wm8311_devs[] = {
.resources = wm831x_status2_resources,
},
{
- .name = "wm831x-touch",
- .num_resources = ARRAY_SIZE(wm831x_touch_resources),
- .resources = wm831x_touch_resources,
- },
- {
.name = "wm831x-watchdog",
.num_resources = ARRAY_SIZE(wm831x_wdt_resources),
.resources = wm831x_wdt_resources,
@@ -1165,6 +1016,9 @@ static struct mfd_cell wm8312_devs[] = {
.resources = wm831x_dcdc4_resources,
},
{
+ .name = "wm831x-clk",
+ },
+ {
.name = "wm831x-epe",
.id = 1,
},
@@ -1269,11 +1123,6 @@ static struct mfd_cell wm8312_devs[] = {
.resources = wm831x_power_resources,
},
{
- .name = "wm831x-rtc",
- .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
- .resources = wm831x_rtc_resources,
- },
- {
.name = "wm831x-status",
.id = 1,
.num_resources = ARRAY_SIZE(wm831x_status1_resources),
@@ -1286,11 +1135,6 @@ static struct mfd_cell wm8312_devs[] = {
.resources = wm831x_status2_resources,
},
{
- .name = "wm831x-touch",
- .num_resources = ARRAY_SIZE(wm831x_touch_resources),
- .resources = wm831x_touch_resources,
- },
- {
.name = "wm831x-watchdog",
.num_resources = ARRAY_SIZE(wm831x_wdt_resources),
.resources = wm831x_wdt_resources,
@@ -1326,6 +1170,9 @@ static struct mfd_cell wm8320_devs[] = {
.resources = wm8320_dcdc4_buck_resources,
},
{
+ .name = "wm831x-clk",
+ },
+ {
.name = "wm831x-gpio",
.num_resources = ARRAY_SIZE(wm831x_gpio_resources),
.resources = wm831x_gpio_resources,
@@ -1405,11 +1252,6 @@ static struct mfd_cell wm8320_devs[] = {
.resources = wm831x_on_resources,
},
{
- .name = "wm831x-rtc",
- .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
- .resources = wm831x_rtc_resources,
- },
- {
.name = "wm831x-status",
.id = 1,
.num_resources = ARRAY_SIZE(wm831x_status1_resources),
@@ -1428,6 +1270,22 @@ static struct mfd_cell wm8320_devs[] = {
},
};
+static struct mfd_cell touch_devs[] = {
+ {
+ .name = "wm831x-touch",
+ .num_resources = ARRAY_SIZE(wm831x_touch_resources),
+ .resources = wm831x_touch_resources,
+ },
+};
+
+static struct mfd_cell rtc_devs[] = {
+ {
+ .name = "wm831x-rtc",
+ .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
+ .resources = wm831x_rtc_resources,
+ },
+};
+
static struct mfd_cell backlight_devs[] = {
{
.name = "wm831x-backlight",
@@ -1440,14 +1298,12 @@ static struct mfd_cell backlight_devs[] = {
int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
{
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int rev;
+ int rev, wm831x_num;
enum wm831x_parent parent;
int ret, i;
mutex_init(&wm831x->io_lock);
mutex_init(&wm831x->key_lock);
- mutex_init(&wm831x->auxadc_lock);
- init_completion(&wm831x->auxadc_done);
dev_set_drvdata(wm831x->dev, wm831x);
ret = wm831x_reg_read(wm831x, WM831X_PARENT_ID);
@@ -1592,45 +1448,51 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
}
}
+ /* Multiply by 10 as we have many subdevices of the same type */
+ if (pdata && pdata->wm831x_num)
+ wm831x_num = pdata->wm831x_num * 10;
+ else
+ wm831x_num = -1;
+
ret = wm831x_irq_init(wm831x, irq);
if (ret != 0)
goto err;
- if (wm831x->irq_base) {
- ret = request_threaded_irq(wm831x->irq_base +
- WM831X_IRQ_AUXADC_DATA,
- NULL, wm831x_auxadc_irq, 0,
- "auxadc", wm831x);
- if (ret < 0)
- dev_err(wm831x->dev, "AUXADC IRQ request failed: %d\n",
- ret);
- }
+ wm831x_auxadc_init(wm831x);
/* The core device is up, instantiate the subdevices. */
switch (parent) {
case WM8310:
- ret = mfd_add_devices(wm831x->dev, -1,
+ ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8310_devs, ARRAY_SIZE(wm8310_devs),
NULL, wm831x->irq_base);
break;
case WM8311:
- ret = mfd_add_devices(wm831x->dev, -1,
+ ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8311_devs, ARRAY_SIZE(wm8311_devs),
NULL, wm831x->irq_base);
+ if (!pdata || !pdata->disable_touch)
+ mfd_add_devices(wm831x->dev, wm831x_num,
+ touch_devs, ARRAY_SIZE(touch_devs),
+ NULL, wm831x->irq_base);
break;
case WM8312:
- ret = mfd_add_devices(wm831x->dev, -1,
+ ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8312_devs, ARRAY_SIZE(wm8312_devs),
NULL, wm831x->irq_base);
+ if (!pdata || !pdata->disable_touch)
+ mfd_add_devices(wm831x->dev, wm831x_num,
+ touch_devs, ARRAY_SIZE(touch_devs),
+ NULL, wm831x->irq_base);
break;
case WM8320:
case WM8321:
case WM8325:
case WM8326:
- ret = mfd_add_devices(wm831x->dev, -1,
+ ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8320_devs, ARRAY_SIZE(wm8320_devs),
NULL, wm831x->irq_base);
break;
@@ -1645,9 +1507,30 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
goto err_irq;
}
+ /* The RTC can only be used if the 32.768kHz crystal is
+ * enabled; this can't be controlled by software at runtime.
+ */
+ ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to read clock status: %d\n", ret);
+ goto err_irq;
+ }
+
+ if (ret & WM831X_XTAL_ENA) {
+ ret = mfd_add_devices(wm831x->dev, wm831x_num,
+ rtc_devs, ARRAY_SIZE(rtc_devs),
+ NULL, wm831x->irq_base);
+ if (ret != 0) {
+ dev_err(wm831x->dev, "Failed to add RTC: %d\n", ret);
+ goto err_irq;
+ }
+ } else {
+ dev_info(wm831x->dev, "32.768kHz clock disabled, no RTC\n");
+ }
+
if (pdata && pdata->backlight) {
/* Treat errors as non-critical */
- ret = mfd_add_devices(wm831x->dev, -1, backlight_devs,
+ ret = mfd_add_devices(wm831x->dev, wm831x_num, backlight_devs,
ARRAY_SIZE(backlight_devs), NULL,
wm831x->irq_base);
if (ret < 0)
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 42b928e..ada1835 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -348,6 +348,15 @@ static void wm831x_irq_sync_unlock(struct irq_data *data)
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
int i;
+ for (i = 0; i < ARRAY_SIZE(wm831x->gpio_update); i++) {
+ if (wm831x->gpio_update[i]) {
+ wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + i,
+ WM831X_GPN_INT_MODE | WM831X_GPN_POL,
+ wm831x->gpio_update[i]);
+ wm831x->gpio_update[i] = 0;
+ }
+ }
+
for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
/* If there's been a change in the mask write it back
* to the hardware. */
@@ -387,7 +396,7 @@ static void wm831x_irq_disable(struct irq_data *data)
static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
{
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
- int val, irq;
+ int irq;
irq = data->irq - wm831x->irq_base;
@@ -399,22 +408,30 @@ static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
return -EINVAL;
}
+ /* Rebase the IRQ into the GPIO range so we've got a sensible array
+ * index.
+ */
+ irq -= WM831X_IRQ_GPIO_1;
+
+ /* We set the high bit to flag that we need an update; don't
+ * do the update here as we can be called with the bus lock
+ * held.
+ */
switch (type) {
case IRQ_TYPE_EDGE_BOTH:
- val = WM831X_GPN_INT_MODE;
+ wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_INT_MODE;
break;
case IRQ_TYPE_EDGE_RISING:
- val = WM831X_GPN_POL;
+ wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL;
break;
case IRQ_TYPE_EDGE_FALLING:
- val = 0;
+ wm831x->gpio_update[irq] = 0x10000;
break;
default:
return -EINVAL;
}
- return wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + irq,
- WM831X_GPN_INT_MODE | WM831X_GPN_POL, val);
+ return 0;
}
static struct irq_chip wm831x_irq_chip = {
@@ -432,7 +449,7 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
{
struct wm831x *wm831x = data;
unsigned int i;
- int primary;
+ int primary, status_addr;
int status_regs[WM831X_NUM_IRQ_REGS] = { 0 };
int read[WM831X_NUM_IRQ_REGS] = { 0 };
int *status;
@@ -467,8 +484,9 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
/* Hopefully there should only be one register to read
* each time otherwise we ought to do a block read. */
if (!read[offset]) {
- *status = wm831x_reg_read(wm831x,
- irq_data_to_status_reg(&wm831x_irqs[i]));
+ status_addr = irq_data_to_status_reg(&wm831x_irqs[i]);
+
+ *status = wm831x_reg_read(wm831x, status_addr);
if (*status < 0) {
dev_err(wm831x->dev,
"Failed to read IRQ status: %d\n",
@@ -477,26 +495,21 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
}
read[offset] = 1;
+
+ /* Ignore any bits that we don't think are masked */
+ *status &= ~wm831x->irq_masks_cur[offset];
+
+ /* Acknowledge now so we don't miss
+ * notifications while we handle.
+ */
+ wm831x_reg_write(wm831x, status_addr, *status);
}
- /* Report it if it isn't masked, or forget the status. */
- if ((*status & ~wm831x->irq_masks_cur[offset])
- & wm831x_irqs[i].mask)
+ if (*status & wm831x_irqs[i].mask)
handle_nested_irq(wm831x->irq_base + i);
- else
- *status &= ~wm831x_irqs[i].mask;
}
out:
- /* Touchscreen interrupts are handled specially in the driver */
- status_regs[0] &= ~(WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT);
-
- for (i = 0; i < ARRAY_SIZE(status_regs); i++) {
- if (status_regs[i])
- wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1 + i,
- status_regs[i]);
- }
-
return IRQ_HANDLED;
}
@@ -515,13 +528,22 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
0xffff);
}
- if (!pdata || !pdata->irq_base) {
- dev_err(wm831x->dev,
- "No interrupt base specified, no interrupts\n");
+ /* Try to dynamically allocate IRQs if no base is specified */
+ if (!pdata || !pdata->irq_base)
+ wm831x->irq_base = -1;
+ else
+ wm831x->irq_base = pdata->irq_base;
+
+ wm831x->irq_base = irq_alloc_descs(wm831x->irq_base, 0,
+ WM831X_NUM_IRQS, 0);
+ if (wm831x->irq_base < 0) {
+ dev_warn(wm831x->dev, "Failed to allocate IRQs: %d\n",
+ wm831x->irq_base);
+ wm831x->irq_base = 0;
return 0;
}
- if (pdata->irq_cmos)
+ if (pdata && pdata->irq_cmos)
i = 0;
else
i = WM831X_IRQ_OD;
@@ -541,7 +563,6 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
}
wm831x->irq = irq;
- wm831x->irq_base = pdata->irq_base;
/* Register them with genirq */
for (cur_irq = wm831x->irq_base;
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
index ed4b22a..8a1fafd 100644
--- a/drivers/mfd/wm8350-irq.c
+++ b/drivers/mfd/wm8350-irq.c
@@ -473,17 +473,13 @@ int wm8350_irq_init(struct wm8350 *wm8350, int irq,
{
int ret, cur_irq, i;
int flags = IRQF_ONESHOT;
+ int irq_base = -1;
if (!irq) {
dev_warn(wm8350->dev, "No interrupt support, no core IRQ\n");
return 0;
}
- if (!pdata || !pdata->irq_base) {
- dev_warn(wm8350->dev, "No interrupt support, no IRQ base\n");
- return 0;
- }
-
/* Mask top level interrupts */
wm8350_reg_write(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK, 0xFFFF);
@@ -502,7 +498,17 @@ int wm8350_irq_init(struct wm8350 *wm8350, int irq,
wm8350->chip_irq = irq;
wm8350->irq_base = pdata->irq_base;
- if (pdata->irq_high) {
+ if (pdata && pdata->irq_base > 0)
+ irq_base = pdata->irq_base;
+
+ wm8350->irq_base = irq_alloc_descs(irq_base, 0, ARRAY_SIZE(wm8350_irqs), 0);
+ if (wm8350->irq_base < 0) {
+ dev_warn(wm8350->dev, "Allocating irqs failed with %d\n",
+ wm8350->irq_base);
+ return 0;
+ }
+
+ if (pdata && pdata->irq_high) {
flags |= IRQF_TRIGGER_HIGH;
wm8350_set_bits(wm8350, WM8350_SYSTEM_CONTROL_1,
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index e198d40..96479c9 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -316,7 +316,7 @@ static int wm8994_suspend(struct device *dev)
static int wm8994_resume(struct device *dev)
{
struct wm8994 *wm8994 = dev_get_drvdata(dev);
- int ret;
+ int ret, i;
/* We may have lied to the PM core about suspending */
if (!wm8994->suspended)
@@ -329,10 +329,16 @@ static int wm8994_resume(struct device *dev)
return ret;
}
- ret = wm8994_write(wm8994, WM8994_INTERRUPT_STATUS_1_MASK,
- WM8994_NUM_IRQ_REGS * 2, &wm8994->irq_masks_cur);
- if (ret < 0)
- dev_err(dev, "Failed to restore interrupt masks: %d\n", ret);
+ /* Write register at a time as we use the cache on the CPU so store
+ * it in native endian.
+ */
+ for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
+ ret = wm8994_reg_write(wm8994, WM8994_INTERRUPT_STATUS_1_MASK
+ + i, wm8994->irq_masks_cur[i]);
+ if (ret < 0)
+ dev_err(dev, "Failed to restore interrupt masks: %d\n",
+ ret);
+ }
ret = wm8994_write(wm8994, WM8994_LDO_1, WM8994_NUM_LDO_REGS * 2,
&wm8994->ldo_regs);
@@ -403,7 +409,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
break;
default:
BUG();
- return -EINVAL;
+ goto err;
}
wm8994->supplies = kzalloc(sizeof(struct regulator_bulk_data) *
@@ -425,7 +431,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
break;
default:
BUG();
- return -EINVAL;
+ goto err;
}
ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies,
@@ -476,13 +482,18 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
goto err_enable;
}
- switch (ret) {
- case 0:
- case 1:
- if (wm8994->type == WM8994)
+ switch (wm8994->type) {
+ case WM8994:
+ switch (ret) {
+ case 0:
+ case 1:
dev_warn(wm8994->dev,
"revision %c not fully supported\n",
'A' + ret);
+ break;
+ default:
+ break;
+ }
break;
default:
break;
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index 71c6e8f..d682f7b 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -231,12 +231,6 @@ static irqreturn_t wm8994_irq_thread(int irq, void *data)
status[i] &= ~wm8994->irq_masks_cur[i];
}
- /* Report */
- for (i = 0; i < ARRAY_SIZE(wm8994_irqs); i++) {
- if (status[wm8994_irqs[i].reg - 1] & wm8994_irqs[i].mask)
- handle_nested_irq(wm8994->irq_base + i);
- }
-
/* Ack any unmasked IRQs */
for (i = 0; i < ARRAY_SIZE(status); i++) {
if (status[i])
@@ -244,6 +238,12 @@ static irqreturn_t wm8994_irq_thread(int irq, void *data)
status[i]);
}
+ /* Report */
+ for (i = 0; i < ARRAY_SIZE(wm8994_irqs); i++) {
+ if (status[wm8994_irqs[i].reg - 1] & wm8994_irqs[i].mask)
+ handle_nested_irq(wm8994->irq_base + i);
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0a4d86c..2d6423c 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -146,6 +146,7 @@ config PHANTOM
config INTEL_MID_PTI
tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard"
+ depends on PCI
default n
help
The PTI (Parallel Trace Interface) driver directs
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c
index 54e3d05..3590315 100644
--- a/drivers/misc/ab8500-pwm.c
+++ b/drivers/misc/ab8500-pwm.c
@@ -164,5 +164,5 @@ subsys_initcall(ab8500_pwm_init);
module_exit(ab8500_pwm_exit);
MODULE_AUTHOR("Arun MURTHY <arun.murthy@stericsson.com>");
MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver");
-MODULE_ALIAS("AB8500 PWM driver");
+MODULE_ALIAS("platform:ab8500-pwm");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c
index efec413..68cd05b 100644
--- a/drivers/misc/cb710/core.c
+++ b/drivers/misc/cb710/core.c
@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(cb710_pci_update_config_reg);
static int __devinit cb710_pci_configure(struct pci_dev *pdev)
{
unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
- struct pci_dev *pdev0 = pci_get_slot(pdev->bus, devfn);
+ struct pci_dev *pdev0;
u32 val;
cb710_pci_update_config_reg(pdev, 0x48,
@@ -43,6 +43,7 @@ static int __devinit cb710_pci_configure(struct pci_dev *pdev)
if (val & 0x80000000)
return 0;
+ pdev0 = pci_get_slot(pdev->bus, devfn);
if (!pdev0)
return -ENODEV;
diff --git a/drivers/misc/fsa9480.c b/drivers/misc/fsa9480.c
index 5325a7e..27dc0d2 100644
--- a/drivers/misc/fsa9480.c
+++ b/drivers/misc/fsa9480.c
@@ -455,7 +455,7 @@ static int __devinit fsa9480_probe(struct i2c_client *client,
fail2:
if (client->irq)
- free_irq(client->irq, NULL);
+ free_irq(client->irq, usbsw);
fail1:
i2c_set_clientdata(client, NULL);
kfree(usbsw);
@@ -466,7 +466,7 @@ static int __devexit fsa9480_remove(struct i2c_client *client)
{
struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client);
if (client->irq)
- free_irq(client->irq, NULL);
+ free_irq(client->irq, usbsw);
i2c_set_clientdata(client, NULL);
sysfs_remove_group(&client->dev.kobj, &fsa9480_group);
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index b05db55..21b28fc 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -26,7 +26,7 @@
#include <linux/sched.h>
#include <linux/mutex.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/io.h>
#define PHANTOM_VERSION "n0.9.8"
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index 8653bd0..0b56e3f 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -33,6 +33,8 @@
#include <linux/mutex.h>
#include <linux/miscdevice.h>
#include <linux/pti.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
#define DRIVERNAME "pti"
#define PCINAME "pciPTI"
@@ -163,6 +165,11 @@ static void pti_write_to_aperture(struct pti_masterchannel *mc,
static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc,
const char *thread_name)
{
+ /*
+ * Since we access the comm member in current's task_struct, we only
+ * need to be as large as what 'comm' in that structure is.
+ */
+ char comm[TASK_COMM_LEN];
struct pti_masterchannel mccontrol = {.master = CONTROL_ID,
.channel = 0};
const char *thread_name_p;
@@ -170,13 +177,6 @@ static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc,
u8 control_frame[CONTROL_FRAME_LEN];
if (!thread_name) {
- /*
- * Since we access the comm member in current's task_struct,
- * we only need to be as large as what 'comm' in that
- * structure is.
- */
- char comm[TASK_COMM_LEN];
-
if (!in_interrupt())
get_task_comm(comm, current);
else
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 54c91ff..ba168a7 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -338,6 +338,12 @@ void st_int_recv(void *disc_data,
/* Unknow packet? */
default:
type = *ptr;
+ if (st_gdata->list[type] == NULL) {
+ pr_err("chip/interface misbehavior dropping"
+ " frame starting with 0x%02x", type);
+ goto done;
+
+ }
st_gdata->rx_skb = alloc_skb(
st_gdata->list[type]->max_frame_size,
GFP_ATOMIC);
@@ -354,6 +360,7 @@ void st_int_recv(void *disc_data,
ptr++;
count--;
}
+done:
spin_unlock_irqrestore(&st_gdata->lock, flags);
pr_debug("done %s", __func__);
return;
@@ -717,9 +724,10 @@ static void st_tty_close(struct tty_struct *tty)
*/
spin_lock_irqsave(&st_gdata->lock, flags);
for (i = ST_BT; i < ST_MAX_CHANNELS; i++) {
- if (st_gdata->list[i] != NULL)
+ if (st_gdata->is_registered[i] == true)
pr_err("%d not un-registered", i);
st_gdata->list[i] = NULL;
+ st_gdata->is_registered[i] = false;
}
st_gdata->protos_registered = 0;
spin_unlock_irqrestore(&st_gdata->lock, flags);
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 38fd2f0..3a35805 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -68,6 +68,7 @@ void validate_firmware_response(struct kim_data_s *kim_gdata)
if (unlikely(skb->data[5] != 0)) {
pr_err("no proper response during fw download");
pr_err("data6 %x", skb->data[5]);
+ kfree_skb(skb);
return; /* keep waiting for the proper response */
}
/* becos of all the script being downloaded */
@@ -210,6 +211,7 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
pr_err(" waiting for ver info- timed out ");
return -ETIMEDOUT;
}
+ INIT_COMPLETION(kim_gdata->kim_rcvd);
version =
MAKEWORD(kim_gdata->resp_buffer[13],
@@ -298,6 +300,7 @@ static long download_firmware(struct kim_data_s *kim_gdata)
switch (((struct bts_action *)ptr)->type) {
case ACTION_SEND_COMMAND: /* action send */
+ pr_debug("S");
action_ptr = &(((struct bts_action *)ptr)->data[0]);
if (unlikely
(((struct hci_command *)action_ptr)->opcode ==
@@ -335,6 +338,10 @@ static long download_firmware(struct kim_data_s *kim_gdata)
release_firmware(kim_gdata->fw_entry);
return -ETIMEDOUT;
}
+ /* reinit completion before sending for the
+ * relevant wait
+ */
+ INIT_COMPLETION(kim_gdata->kim_rcvd);
/*
* Free space found in uart buffer, call st_int_write
@@ -361,6 +368,7 @@ static long download_firmware(struct kim_data_s *kim_gdata)
}
break;
case ACTION_WAIT_EVENT: /* wait */
+ pr_debug("W");
if (!wait_for_completion_timeout
(&kim_gdata->kim_rcvd,
msecs_to_jiffies(CMD_RESP_TIME))) {
@@ -434,11 +442,17 @@ long st_kim_start(void *kim_data)
{
long err = 0;
long retry = POR_RETRY_COUNT;
+ struct ti_st_plat_data *pdata;
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
pr_info(" %s", __func__);
+ pdata = kim_gdata->kim_pdev->dev.platform_data;
do {
+ /* platform specific enabling code here */
+ if (pdata->chip_enable)
+ pdata->chip_enable(kim_gdata);
+
/* Configure BT nShutdown to HIGH state */
gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
mdelay(5); /* FIXME: a proper toggle */
@@ -460,6 +474,12 @@ long st_kim_start(void *kim_data)
pr_info("ldisc_install = 0");
sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,
NULL, "install");
+ /* the following wait is never going to be completed,
+ * since the ldisc was never installed, hence serving
+ * as a mdelay of LDISC_TIME msecs */
+ err = wait_for_completion_timeout
+ (&kim_gdata->ldisc_installed,
+ msecs_to_jiffies(LDISC_TIME));
err = -ETIMEDOUT;
continue;
} else {
@@ -472,6 +492,13 @@ long st_kim_start(void *kim_data)
pr_info("ldisc_install = 0");
sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,
NULL, "install");
+ /* this wait might be completed, though in the
+ * tty_close() since the ldisc is already
+ * installed */
+ err = wait_for_completion_timeout
+ (&kim_gdata->ldisc_installed,
+ msecs_to_jiffies(LDISC_TIME));
+ err = -EINVAL;
continue;
} else { /* on success don't retry */
break;
@@ -489,6 +516,8 @@ long st_kim_stop(void *kim_data)
{
long err = 0;
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
+ struct ti_st_plat_data *pdata =
+ kim_gdata->kim_pdev->dev.platform_data;
INIT_COMPLETION(kim_gdata->ldisc_installed);
@@ -515,6 +544,10 @@ long st_kim_stop(void *kim_data)
gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
mdelay(1);
gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+
+ /* platform specific disable */
+ if (pdata->chip_disable)
+ pdata->chip_disable(kim_gdata);
return err;
}
diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c
index 3f24951..1ff460a 100644
--- a/drivers/misc/ti-st/st_ll.c
+++ b/drivers/misc/ti-st/st_ll.c
@@ -22,6 +22,7 @@
#define pr_fmt(fmt) "(stll) :" fmt
#include <linux/skbuff.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/ti_wilink_st.h>
/**********************************************************************/
@@ -37,6 +38,9 @@ static void send_ll_cmd(struct st_data_s *st_data,
static void ll_device_want_to_sleep(struct st_data_s *st_data)
{
+ struct kim_data_s *kim_data;
+ struct ti_st_plat_data *pdata;
+
pr_debug("%s", __func__);
/* sanity check */
if (st_data->ll_state != ST_LL_AWAKE)
@@ -46,10 +50,19 @@ static void ll_device_want_to_sleep(struct st_data_s *st_data)
send_ll_cmd(st_data, LL_SLEEP_ACK);
/* update state */
st_data->ll_state = ST_LL_ASLEEP;
+
+ /* communicate to platform about chip asleep */
+ kim_data = st_data->kim_data;
+ pdata = kim_data->kim_pdev->dev.platform_data;
+ if (pdata->chip_asleep)
+ pdata->chip_asleep(NULL);
}
static void ll_device_want_to_wakeup(struct st_data_s *st_data)
{
+ struct kim_data_s *kim_data;
+ struct ti_st_plat_data *pdata;
+
/* diff actions in diff states */
switch (st_data->ll_state) {
case ST_LL_ASLEEP:
@@ -70,6 +83,12 @@ static void ll_device_want_to_wakeup(struct st_data_s *st_data)
}
/* update state */
st_data->ll_state = ST_LL_AWAKE;
+
+ /* communicate to platform about chip wakeup */
+ kim_data = st_data->kim_data;
+ pdata = kim_data->kim_pdev->dev.platform_data;
+ if (pdata->chip_asleep)
+ pdata->chip_awake(NULL);
}
/**********************************************************************/
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 6df5a55..053d36c 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -45,7 +45,7 @@
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
-MODULE_VERSION("1.2.1.2-k");
+MODULE_VERSION("1.2.1.3-k");
MODULE_ALIAS("dmi:*:svnVMware*:*");
MODULE_ALIAS("vmware_vmmemctl");
MODULE_LICENSE("GPL");
@@ -215,7 +215,6 @@ struct vmballoon {
};
static struct vmballoon balloon;
-static struct workqueue_struct *vmballoon_wq;
/*
* Send "start" command to the host, communicating supported version
@@ -674,7 +673,12 @@ static void vmballoon_work(struct work_struct *work)
vmballoon_deflate(b);
}
- queue_delayed_work(vmballoon_wq, dwork, round_jiffies_relative(HZ));
+ /*
+ * We are using a freezable workqueue so that balloon operations are
+ * stopped while the system transitions to/from sleep/hibernation.
+ */
+ queue_delayed_work(system_freezable_wq,
+ dwork, round_jiffies_relative(HZ));
}
/*
@@ -785,12 +789,6 @@ static int __init vmballoon_init(void)
if (x86_hyper != &x86_hyper_vmware)
return -ENODEV;
- vmballoon_wq = create_freezable_workqueue("vmmemctl");
- if (!vmballoon_wq) {
- pr_err("failed to create workqueue\n");
- return -ENOMEM;
- }
-
INIT_LIST_HEAD(&balloon.pages);
INIT_LIST_HEAD(&balloon.refused_pages);
@@ -805,34 +803,27 @@ static int __init vmballoon_init(void)
*/
if (!vmballoon_send_start(&balloon)) {
pr_err("failed to send start command to the host\n");
- error = -EIO;
- goto fail;
+ return -EIO;
}
if (!vmballoon_send_guest_id(&balloon)) {
pr_err("failed to send guest ID to the host\n");
- error = -EIO;
- goto fail;
+ return -EIO;
}
error = vmballoon_debugfs_init(&balloon);
if (error)
- goto fail;
+ return error;
- queue_delayed_work(vmballoon_wq, &balloon.dwork, 0);
+ queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
return 0;
-
-fail:
- destroy_workqueue(vmballoon_wq);
- return error;
}
module_init(vmballoon_init);
static void __exit vmballoon_exit(void)
{
cancel_delayed_work_sync(&balloon.dwork);
- destroy_workqueue(vmballoon_wq);
vmballoon_debugfs_exit(&balloon);
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 006a5e9..2bf229a 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -224,7 +224,7 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
static int mmc_test_busy(struct mmc_command *cmd)
{
return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
- (R1_CURRENT_STATE(cmd->resp[0]) == 7);
+ (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
}
/*
@@ -2900,7 +2900,7 @@ static const struct file_operations mmc_test_fops_testlist = {
.release = single_release,
};
-static void mmc_test_free_file_test(struct mmc_card *card)
+static void mmc_test_free_dbgfs_file(struct mmc_card *card)
{
struct mmc_test_dbgfs_file *df, *dfs;
@@ -2917,34 +2917,21 @@ static void mmc_test_free_file_test(struct mmc_card *card)
mutex_unlock(&mmc_test_lock);
}
-static int mmc_test_register_file_test(struct mmc_card *card)
+static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
+ const char *name, mode_t mode, const struct file_operations *fops)
{
struct dentry *file = NULL;
struct mmc_test_dbgfs_file *df;
- int ret = 0;
-
- mutex_lock(&mmc_test_lock);
-
- if (card->debugfs_root)
- file = debugfs_create_file("test", S_IWUSR | S_IRUGO,
- card->debugfs_root, card, &mmc_test_fops_test);
-
- if (IS_ERR_OR_NULL(file)) {
- dev_err(&card->dev,
- "Can't create test. Perhaps debugfs is disabled.\n");
- ret = -ENODEV;
- goto err;
- }
if (card->debugfs_root)
- file = debugfs_create_file("testlist", S_IRUGO,
- card->debugfs_root, card, &mmc_test_fops_testlist);
+ file = debugfs_create_file(name, mode, card->debugfs_root,
+ card, fops);
if (IS_ERR_OR_NULL(file)) {
dev_err(&card->dev,
- "Can't create testlist. Perhaps debugfs is disabled.\n");
- ret = -ENODEV;
- goto err;
+ "Can't create %s. Perhaps debugfs is disabled.\n",
+ name);
+ return -ENODEV;
}
df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
@@ -2952,14 +2939,31 @@ static int mmc_test_register_file_test(struct mmc_card *card)
debugfs_remove(file);
dev_err(&card->dev,
"Can't allocate memory for internal usage.\n");
- ret = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
df->card = card;
df->file = file;
list_add(&df->link, &mmc_test_file_test);
+ return 0;
+}
+
+static int mmc_test_register_dbgfs_file(struct mmc_card *card)
+{
+ int ret;
+
+ mutex_lock(&mmc_test_lock);
+
+ ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
+ &mmc_test_fops_test);
+ if (ret)
+ goto err;
+
+ ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
+ &mmc_test_fops_testlist);
+ if (ret)
+ goto err;
err:
mutex_unlock(&mmc_test_lock);
@@ -2974,7 +2978,7 @@ static int mmc_test_probe(struct mmc_card *card)
if (!mmc_card_mmc(card) && !mmc_card_sd(card))
return -ENODEV;
- ret = mmc_test_register_file_test(card);
+ ret = mmc_test_register_dbgfs_file(card);
if (ret)
return ret;
@@ -2986,7 +2990,7 @@ static int mmc_test_probe(struct mmc_card *card)
static void mmc_test_remove(struct mmc_card *card)
{
mmc_test_free_result(card);
- mmc_test_free_file_test(card);
+ mmc_test_free_dbgfs_file(card);
}
static struct mmc_driver mmc_driver = {
@@ -3006,7 +3010,7 @@ static void __exit mmc_test_exit(void)
{
/* Clear stalled data if card is still plugged */
mmc_test_free_result(NULL);
- mmc_test_free_file_test(NULL);
+ mmc_test_free_dbgfs_file(NULL);
mmc_unregister_driver(&mmc_driver);
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 89bdeae..b27b940 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -133,7 +133,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
if (mrq->done)
mrq->done(mrq);
- mmc_host_clk_gate(host);
+ mmc_host_clk_release(host);
}
}
@@ -192,7 +192,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
mrq->stop->mrq = mrq;
}
}
- mmc_host_clk_ungate(host);
+ mmc_host_clk_hold(host);
led_trigger_event(host->led, LED_FULL);
host->ops->request(host, mrq);
}
@@ -728,15 +728,17 @@ static inline void mmc_set_ios(struct mmc_host *host)
*/
void mmc_set_chip_select(struct mmc_host *host, int mode)
{
+ mmc_host_clk_hold(host);
host->ios.chip_select = mode;
mmc_set_ios(host);
+ mmc_host_clk_release(host);
}
/*
* Sets the host clock to the highest possible frequency that
* is below "hz".
*/
-void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
{
WARN_ON(hz < host->f_min);
@@ -747,6 +749,13 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
mmc_set_ios(host);
}
+void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+{
+ mmc_host_clk_hold(host);
+ __mmc_set_clock(host, hz);
+ mmc_host_clk_release(host);
+}
+
#ifdef CONFIG_MMC_CLKGATE
/*
* This gates the clock by setting it to 0 Hz.
@@ -779,7 +788,7 @@ void mmc_ungate_clock(struct mmc_host *host)
if (host->clk_old) {
BUG_ON(host->ios.clock);
/* This call will also set host->clk_gated to false */
- mmc_set_clock(host, host->clk_old);
+ __mmc_set_clock(host, host->clk_old);
}
}
@@ -807,8 +816,10 @@ void mmc_set_ungated(struct mmc_host *host)
*/
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
{
+ mmc_host_clk_hold(host);
host->ios.bus_mode = mode;
mmc_set_ios(host);
+ mmc_host_clk_release(host);
}
/*
@@ -816,8 +827,10 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
*/
void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
{
+ mmc_host_clk_hold(host);
host->ios.bus_width = width;
mmc_set_ios(host);
+ mmc_host_clk_release(host);
}
/**
@@ -1015,8 +1028,10 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
ocr &= 3 << bit;
+ mmc_host_clk_hold(host);
host->ios.vdd = bit;
mmc_set_ios(host);
+ mmc_host_clk_release(host);
} else {
pr_warning("%s: host doesn't support card's voltages\n",
mmc_hostname(host));
@@ -1063,8 +1078,10 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11
*/
void mmc_set_timing(struct mmc_host *host, unsigned int timing)
{
+ mmc_host_clk_hold(host);
host->ios.timing = timing;
mmc_set_ios(host);
+ mmc_host_clk_release(host);
}
/*
@@ -1072,8 +1089,10 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
*/
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
{
+ mmc_host_clk_hold(host);
host->ios.drv_type = drv_type;
mmc_set_ios(host);
+ mmc_host_clk_release(host);
}
/*
@@ -1091,6 +1110,8 @@ static void mmc_power_up(struct mmc_host *host)
{
int bit;
+ mmc_host_clk_hold(host);
+
/* If ocr is set, we use it */
if (host->ocr)
bit = ffs(host->ocr) - 1;
@@ -1126,10 +1147,14 @@ static void mmc_power_up(struct mmc_host *host)
* time required to reach a stable voltage.
*/
mmc_delay(10);
+
+ mmc_host_clk_release(host);
}
static void mmc_power_off(struct mmc_host *host)
{
+ mmc_host_clk_hold(host);
+
host->ios.clock = 0;
host->ios.vdd = 0;
@@ -1147,6 +1172,8 @@ static void mmc_power_off(struct mmc_host *host)
host->ios.bus_width = MMC_BUS_WIDTH_1;
host->ios.timing = MMC_TIMING_LEGACY;
mmc_set_ios(host);
+
+ mmc_host_clk_release(host);
}
/*
@@ -1502,7 +1529,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
goto out;
}
} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
- R1_CURRENT_STATE(cmd.resp[0]) == 7);
+ R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG);
out:
return err;
}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index b29d3e8..793d0a0 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -119,14 +119,14 @@ static void mmc_host_clk_gate_work(struct work_struct *work)
}
/**
- * mmc_host_clk_ungate - ungate hardware MCI clocks
+ * mmc_host_clk_hold - ungate hardware MCI clocks
* @host: host to ungate.
*
* Makes sure the host ios.clock is restored to a non-zero value
* past this call. Increase clock reference count and ungate clock
* if we're the first user.
*/
-void mmc_host_clk_ungate(struct mmc_host *host)
+void mmc_host_clk_hold(struct mmc_host *host)
{
unsigned long flags;
@@ -164,14 +164,14 @@ static bool mmc_host_may_gate_card(struct mmc_card *card)
}
/**
- * mmc_host_clk_gate - gate off hardware MCI clocks
+ * mmc_host_clk_release - gate off hardware MCI clocks
* @host: host to gate.
*
* Calls the host driver with ios.clock set to zero as often as possible
* in order to gate off hardware MCI clocks. Decrease clock reference
* count and schedule disabling of clock.
*/
-void mmc_host_clk_gate(struct mmc_host *host)
+void mmc_host_clk_release(struct mmc_host *host)
{
unsigned long flags;
@@ -179,7 +179,7 @@ void mmc_host_clk_gate(struct mmc_host *host)
host->clk_requests--;
if (mmc_host_may_gate_card(host->card) &&
!host->clk_requests)
- schedule_work(&host->clk_gate_work);
+ queue_work(system_nrt_wq, &host->clk_gate_work);
spin_unlock_irqrestore(&host->clk_lock, flags);
}
@@ -231,7 +231,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
if (cancel_work_sync(&host->clk_gate_work))
mmc_host_clk_gate_delayed(host);
if (host->clk_gated)
- mmc_host_clk_ungate(host);
+ mmc_host_clk_hold(host);
/* There should be only one user now */
WARN_ON(host->clk_requests > 1);
}
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index de199f9..fb8a5cd 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -16,16 +16,16 @@ int mmc_register_host_class(void);
void mmc_unregister_host_class(void);
#ifdef CONFIG_MMC_CLKGATE
-void mmc_host_clk_ungate(struct mmc_host *host);
-void mmc_host_clk_gate(struct mmc_host *host);
+void mmc_host_clk_hold(struct mmc_host *host);
+void mmc_host_clk_release(struct mmc_host *host);
unsigned int mmc_host_clk_rate(struct mmc_host *host);
#else
-static inline void mmc_host_clk_ungate(struct mmc_host *host)
+static inline void mmc_host_clk_hold(struct mmc_host *host)
{
}
-static inline void mmc_host_clk_gate(struct mmc_host *host)
+static inline void mmc_host_clk_release(struct mmc_host *host)
{
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index aa7d1d7..5700b1c 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -259,7 +259,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
}
card->ext_csd.rev = ext_csd[EXT_CSD_REV];
- if (card->ext_csd.rev > 5) {
+ if (card->ext_csd.rev > 6) {
printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
mmc_hostname(card->host), card->ext_csd.rev);
err = -EINVAL;
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 845ce7c..770c3d0 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -407,7 +407,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
break;
if (mmc_host_is_spi(card->host))
break;
- } while (R1_CURRENT_STATE(status) == 7);
+ } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
if (mmc_host_is_spi(card->host)) {
if (status & R1_SPI_ILLEGAL_COMMAND)
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 633975f..0370e03 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -469,56 +469,75 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status)
return 0;
}
-static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
+static void sd_update_bus_speed_mode(struct mmc_card *card)
{
- unsigned int bus_speed = 0, timing = 0;
- int err;
-
/*
* If the host doesn't support any of the UHS-I modes, fallback on
* default speed.
*/
if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
- MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
- return 0;
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) {
+ card->sd_bus_speed = 0;
+ return;
+ }
if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
- bus_speed = UHS_SDR104_BUS_SPEED;
- timing = MMC_TIMING_UHS_SDR104;
- card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
} else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
- bus_speed = UHS_DDR50_BUS_SPEED;
- timing = MMC_TIMING_UHS_DDR50;
- card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+ card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
SD_MODE_UHS_SDR50)) {
- bus_speed = UHS_SDR50_BUS_SPEED;
- timing = MMC_TIMING_UHS_SDR50;
- card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
- bus_speed = UHS_SDR25_BUS_SPEED;
- timing = MMC_TIMING_UHS_SDR25;
- card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
SD_MODE_UHS_SDR12)) {
- bus_speed = UHS_SDR12_BUS_SPEED;
- timing = MMC_TIMING_UHS_SDR12;
- card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
+ }
+}
+
+static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
+{
+ int err;
+ unsigned int timing = 0;
+
+ switch (card->sd_bus_speed) {
+ case UHS_SDR104_BUS_SPEED:
+ timing = MMC_TIMING_UHS_SDR104;
+ card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+ break;
+ case UHS_DDR50_BUS_SPEED:
+ timing = MMC_TIMING_UHS_DDR50;
+ card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+ break;
+ case UHS_SDR50_BUS_SPEED:
+ timing = MMC_TIMING_UHS_SDR50;
+ card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+ break;
+ case UHS_SDR25_BUS_SPEED:
+ timing = MMC_TIMING_UHS_SDR25;
+ card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+ break;
+ case UHS_SDR12_BUS_SPEED:
+ timing = MMC_TIMING_UHS_SDR12;
+ card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+ break;
+ default:
+ return 0;
}
- card->sd_bus_speed = bus_speed;
- err = mmc_sd_switch(card, 1, 0, bus_speed, status);
+ err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status);
if (err)
return err;
- if ((status[16] & 0xF) != bus_speed)
+ if ((status[16] & 0xF) != card->sd_bus_speed)
printk(KERN_WARNING "%s: Problem setting bus speed mode!\n",
mmc_hostname(card->host));
else {
@@ -618,18 +637,24 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
}
+ /*
+ * Select the bus speed mode depending on host
+ * and card capability.
+ */
+ sd_update_bus_speed_mode(card);
+
/* Set the driver strength for the card */
err = sd_select_driver_type(card, status);
if (err)
goto out;
- /* Set bus speed mode of the card */
- err = sd_set_bus_speed_mode(card, status);
+ /* Set current limit for the card */
+ err = sd_set_current_limit(card, status);
if (err)
goto out;
- /* Set current limit for the card */
- err = sd_set_current_limit(card, status);
+ /* Set bus speed mode of the card */
+ err = sd_set_bus_speed_mode(card, status);
if (err)
goto out;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 77f0b6b..ff0f714 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -62,7 +62,7 @@ struct idmac_desc {
u32 des1; /* Buffer sizes */
#define IDMAC_SET_BUFFER1_SIZE(d, s) \
- ((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff))
+ ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
u32 des2; /* buffer 1 physical address */
@@ -699,7 +699,7 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
/* DDR mode set */
- if (ios->ddr) {
+ if (ios->timing == MMC_TIMING_UHS_DDR50) {
regs = mci_readl(slot->host, UHS_REG);
regs |= (0x1 << slot->id) << 16;
mci_writel(slot->host, UHS_REG, regs);
@@ -1646,7 +1646,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
mmc->caps |= MMC_CAP_4_BIT_DATA;
if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
- mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
#ifdef CONFIG_MMC_DW_IDMAC
mmc->max_segs = host->ring_size;
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 710b706..4dc0028 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -16,20 +16,23 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/gpio.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
-#include <mach/hardware.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <mach/esdhc.h>
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
+#define SDHCI_CTRL_D3CD 0x08
/* VENDOR SPEC register */
#define SDHCI_VENDOR_SPEC 0xC0
#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002
-#define ESDHC_FLAG_GPIO_FOR_CD (1 << 0)
/*
* The CMDTYPE of the CMD register (offset 0xE) should be set to
* "11" when the STOP CMD12 is issued on imx53 to abort one
@@ -43,10 +46,67 @@
*/
#define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1)
+enum imx_esdhc_type {
+ IMX25_ESDHC,
+ IMX35_ESDHC,
+ IMX51_ESDHC,
+ IMX53_ESDHC,
+};
+
struct pltfm_imx_data {
int flags;
u32 scratchpad;
+ enum imx_esdhc_type devtype;
+ struct esdhc_platform_data boarddata;
+};
+
+static struct platform_device_id imx_esdhc_devtype[] = {
+ {
+ .name = "sdhci-esdhc-imx25",
+ .driver_data = IMX25_ESDHC,
+ }, {
+ .name = "sdhci-esdhc-imx35",
+ .driver_data = IMX35_ESDHC,
+ }, {
+ .name = "sdhci-esdhc-imx51",
+ .driver_data = IMX51_ESDHC,
+ }, {
+ .name = "sdhci-esdhc-imx53",
+ .driver_data = IMX53_ESDHC,
+ }, {
+ /* sentinel */
+ }
};
+MODULE_DEVICE_TABLE(platform, imx_esdhc_devtype);
+
+static const struct of_device_id imx_esdhc_dt_ids[] = {
+ { .compatible = "fsl,imx25-esdhc", .data = &imx_esdhc_devtype[IMX25_ESDHC], },
+ { .compatible = "fsl,imx35-esdhc", .data = &imx_esdhc_devtype[IMX35_ESDHC], },
+ { .compatible = "fsl,imx51-esdhc", .data = &imx_esdhc_devtype[IMX51_ESDHC], },
+ { .compatible = "fsl,imx53-esdhc", .data = &imx_esdhc_devtype[IMX53_ESDHC], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids);
+
+static inline int is_imx25_esdhc(struct pltfm_imx_data *data)
+{
+ return data->devtype == IMX25_ESDHC;
+}
+
+static inline int is_imx35_esdhc(struct pltfm_imx_data *data)
+{
+ return data->devtype == IMX35_ESDHC;
+}
+
+static inline int is_imx51_esdhc(struct pltfm_imx_data *data)
+{
+ return data->devtype == IMX51_ESDHC;
+}
+
+static inline int is_imx53_esdhc(struct pltfm_imx_data *data)
+{
+ return data->devtype == IMX53_ESDHC;
+}
static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg)
{
@@ -60,17 +120,14 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
- /* fake CARD_PRESENT flag on mx25/35 */
+ /* fake CARD_PRESENT flag */
u32 val = readl(host->ioaddr + reg);
if (unlikely((reg == SDHCI_PRESENT_STATE)
- && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD))) {
- struct esdhc_platform_data *boarddata =
- host->mmc->parent->platform_data;
-
- if (boarddata && gpio_is_valid(boarddata->cd_gpio)
- && gpio_get_value(boarddata->cd_gpio))
+ && gpio_is_valid(boarddata->cd_gpio))) {
+ if (gpio_get_value(boarddata->cd_gpio))
/* no card, if a valid gpio says so... */
val &= ~SDHCI_CARD_PRESENT;
else
@@ -85,14 +142,33 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
-
- if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)
- && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD)))
- /*
- * these interrupts won't work with a custom card_detect gpio
- * (only applied to mx25/35)
- */
- val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+ u32 data;
+
+ if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
+ if (boarddata->cd_type == ESDHC_CD_GPIO)
+ /*
+ * These interrupts won't work with a custom
+ * card_detect gpio (only applied to mx25/35)
+ */
+ val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
+
+ if (val & SDHCI_INT_CARD_INT) {
+ /*
+ * Clear and then set D3CD bit to avoid missing the
+ * card interrupt. This is a eSDHC controller problem
+ * so we need to apply the following workaround: clear
+ * and set D3CD bit will make eSDHC re-sample the card
+ * interrupt. In case a card interrupt was lost,
+ * re-sample it by the following steps.
+ */
+ data = readl(host->ioaddr + SDHCI_HOST_CONTROL);
+ data &= ~SDHCI_CTRL_D3CD;
+ writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
+ data |= SDHCI_CTRL_D3CD;
+ writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
+ }
+ }
if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
&& (reg == SDHCI_INT_STATUS)
@@ -162,8 +238,10 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
*/
return;
case SDHCI_HOST_CONTROL:
- /* FSL messed up here, so we can just keep those two */
- new_val = val & (SDHCI_CTRL_LED | SDHCI_CTRL_4BITBUS);
+ /* FSL messed up here, so we can just keep those three */
+ new_val = val & (SDHCI_CTRL_LED | \
+ SDHCI_CTRL_4BITBUS | \
+ SDHCI_CTRL_D3CD);
/* ensure the endianess */
new_val |= ESDHC_HOST_CONTROL_LE;
/* DMA mode bits are shifted */
@@ -173,6 +251,17 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
return;
}
esdhc_clrset_le(host, 0xff, val, reg);
+
+ /*
+ * The esdhc has a design violation to SDHC spec which tells
+ * that software reset should not affect card detection circuit.
+ * But esdhc clears its SYSCTL register bits [0..2] during the
+ * software reset. This will stop those clocks that card detection
+ * circuit relies on. To work around it, we turn the clocks on back
+ * to keep card detection circuit functional.
+ */
+ if ((reg == SDHCI_SOFTWARE_RESET) && (val & 1))
+ esdhc_clrset_le(host, 0x7, 0x7, ESDHC_SYSTEM_CONTROL);
}
static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
@@ -189,6 +278,26 @@ static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
return clk_get_rate(pltfm_host->clk) / 256 / 16;
}
+static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+
+ switch (boarddata->wp_type) {
+ case ESDHC_WP_GPIO:
+ if (gpio_is_valid(boarddata->wp_gpio))
+ return gpio_get_value(boarddata->wp_gpio);
+ case ESDHC_WP_CONTROLLER:
+ return !(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
+ SDHCI_WRITE_PROTECT);
+ case ESDHC_WP_NONE:
+ break;
+ }
+
+ return -ENOSYS;
+}
+
static struct sdhci_ops sdhci_esdhc_ops = {
.read_l = esdhc_readl_le,
.read_w = esdhc_readw_le,
@@ -198,6 +307,7 @@ static struct sdhci_ops sdhci_esdhc_ops = {
.set_clock = esdhc_set_clock,
.get_max_clock = esdhc_pltfm_get_max_clock,
.get_min_clock = esdhc_pltfm_get_min_clock,
+ .get_ro = esdhc_pltfm_get_ro,
};
static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
@@ -207,17 +317,6 @@ static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
.ops = &sdhci_esdhc_ops,
};
-static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
-{
- struct esdhc_platform_data *boarddata =
- host->mmc->parent->platform_data;
-
- if (boarddata && gpio_is_valid(boarddata->wp_gpio))
- return gpio_get_value(boarddata->wp_gpio);
- else
- return -ENOSYS;
-}
-
static irqreturn_t cd_irq(int irq, void *data)
{
struct sdhci_host *sdhost = (struct sdhci_host *)data;
@@ -226,8 +325,48 @@ static irqreturn_t cd_irq(int irq, void *data)
return IRQ_HANDLED;
};
+#ifdef CONFIG_OF
+static int __devinit
+sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
+ struct esdhc_platform_data *boarddata)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ if (!np)
+ return -ENODEV;
+
+ if (of_get_property(np, "fsl,card-wired", NULL))
+ boarddata->cd_type = ESDHC_CD_PERMANENT;
+
+ if (of_get_property(np, "fsl,cd-controller", NULL))
+ boarddata->cd_type = ESDHC_CD_CONTROLLER;
+
+ if (of_get_property(np, "fsl,wp-controller", NULL))
+ boarddata->wp_type = ESDHC_WP_CONTROLLER;
+
+ boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
+ if (gpio_is_valid(boarddata->cd_gpio))
+ boarddata->cd_type = ESDHC_CD_GPIO;
+
+ boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
+ if (gpio_is_valid(boarddata->wp_gpio))
+ boarddata->wp_type = ESDHC_WP_GPIO;
+
+ return 0;
+}
+#else
+static inline int
+sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
+ struct esdhc_platform_data *boarddata)
+{
+ return -ENODEV;
+}
+#endif
+
static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id =
+ of_match_device(imx_esdhc_dt_ids, &pdev->dev);
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
struct esdhc_platform_data *boarddata;
@@ -242,8 +381,14 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
pltfm_host = sdhci_priv(host);
imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL);
- if (!imx_data)
- return -ENOMEM;
+ if (!imx_data) {
+ err = -ENOMEM;
+ goto err_imx_data;
+ }
+
+ if (of_id)
+ pdev->id_entry = of_id->data;
+ imx_data->devtype = pdev->id_entry->driver_data;
pltfm_host->priv = imx_data;
clk = clk_get(mmc_dev(host->mmc), NULL);
@@ -255,50 +400,72 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
clk_enable(clk);
pltfm_host->clk = clk;
- if (!cpu_is_mx25())
+ if (!is_imx25_esdhc(imx_data))
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
- if (cpu_is_mx25() || cpu_is_mx35()) {
+ if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data))
/* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
- /* write_protect can't be routed to controller, use gpio */
- sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro;
- }
- if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51()))
+ if (is_imx53_esdhc(imx_data))
imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT;
- boarddata = host->mmc->parent->platform_data;
- if (boarddata) {
+ boarddata = &imx_data->boarddata;
+ if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
+ if (!host->mmc->parent->platform_data) {
+ dev_err(mmc_dev(host->mmc), "no board data!\n");
+ err = -EINVAL;
+ goto no_board_data;
+ }
+ imx_data->boarddata = *((struct esdhc_platform_data *)
+ host->mmc->parent->platform_data);
+ }
+
+ /* write_protect */
+ if (boarddata->wp_type == ESDHC_WP_GPIO) {
err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP");
if (err) {
dev_warn(mmc_dev(host->mmc),
- "no write-protect pin available!\n");
- boarddata->wp_gpio = err;
+ "no write-protect pin available!\n");
+ boarddata->wp_gpio = -EINVAL;
}
+ } else {
+ boarddata->wp_gpio = -EINVAL;
+ }
+ /* card_detect */
+ if (boarddata->cd_type != ESDHC_CD_GPIO)
+ boarddata->cd_gpio = -EINVAL;
+
+ switch (boarddata->cd_type) {
+ case ESDHC_CD_GPIO:
err = gpio_request_one(boarddata->cd_gpio, GPIOF_IN, "ESDHC_CD");
if (err) {
- dev_warn(mmc_dev(host->mmc),
+ dev_err(mmc_dev(host->mmc),
"no card-detect pin available!\n");
goto no_card_detect_pin;
}
- /* i.MX5x has issues to be researched */
- if (!cpu_is_mx25() && !cpu_is_mx35())
- goto not_supported;
-
err = request_irq(gpio_to_irq(boarddata->cd_gpio), cd_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
mmc_hostname(host->mmc), host);
if (err) {
- dev_warn(mmc_dev(host->mmc), "request irq error\n");
+ dev_err(mmc_dev(host->mmc), "request irq error\n");
goto no_card_detect_irq;
}
+ /* fall through */
- imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD;
- /* Now we have a working card_detect again */
+ case ESDHC_CD_CONTROLLER:
+ /* we have a working card_detect back */
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ break;
+
+ case ESDHC_CD_PERMANENT:
+ host->mmc->caps = MMC_CAP_NONREMOVABLE;
+ break;
+
+ case ESDHC_CD_NONE:
+ break;
}
err = sdhci_add_host(host);
@@ -307,16 +474,21 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
return 0;
- no_card_detect_irq:
- gpio_free(boarddata->cd_gpio);
- no_card_detect_pin:
- boarddata->cd_gpio = err;
- not_supported:
- kfree(imx_data);
- err_add_host:
+err_add_host:
+ if (gpio_is_valid(boarddata->cd_gpio))
+ free_irq(gpio_to_irq(boarddata->cd_gpio), host);
+no_card_detect_irq:
+ if (gpio_is_valid(boarddata->cd_gpio))
+ gpio_free(boarddata->cd_gpio);
+ if (gpio_is_valid(boarddata->wp_gpio))
+ gpio_free(boarddata->wp_gpio);
+no_card_detect_pin:
+no_board_data:
clk_disable(pltfm_host->clk);
clk_put(pltfm_host->clk);
- err_clk_get:
+err_clk_get:
+ kfree(imx_data);
+err_imx_data:
sdhci_pltfm_free(pdev);
return err;
}
@@ -325,20 +497,18 @@ static int __devexit sdhci_esdhc_imx_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
sdhci_remove_host(host, dead);
- if (boarddata && gpio_is_valid(boarddata->wp_gpio))
+ if (gpio_is_valid(boarddata->wp_gpio))
gpio_free(boarddata->wp_gpio);
- if (boarddata && gpio_is_valid(boarddata->cd_gpio)) {
+ if (gpio_is_valid(boarddata->cd_gpio)) {
+ free_irq(gpio_to_irq(boarddata->cd_gpio), host);
gpio_free(boarddata->cd_gpio);
-
- if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION))
- free_irq(gpio_to_irq(boarddata->cd_gpio), host);
}
clk_disable(pltfm_host->clk);
@@ -354,7 +524,9 @@ static struct platform_driver sdhci_esdhc_imx_driver = {
.driver = {
.name = "sdhci-esdhc-imx",
.owner = THIS_MODULE,
+ .of_match_table = imx_esdhc_dt_ids,
},
+ .id_table = imx_esdhc_devtype,
.probe = sdhci_esdhc_imx_probe,
.remove = __devexit_p(sdhci_esdhc_imx_remove),
#ifdef CONFIG_PM
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 71c0ce1..6414efe 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -85,6 +85,7 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
{
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
+ struct device_node *np = pdev->dev.of_node;
struct resource *iomem;
int ret;
@@ -98,7 +99,7 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
dev_err(&pdev->dev, "Invalid iomem size!\n");
/* Some PCI-based MFD need the parent here */
- if (pdev->dev.parent != &platform_bus)
+ if (pdev->dev.parent != &platform_bus && !np)
host = sdhci_alloc_host(pdev->dev.parent, sizeof(*pltfm_host));
else
host = sdhci_alloc_host(&pdev->dev, sizeof(*pltfm_host));
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 4198dbb..fc7e4a5 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -195,7 +195,8 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
clk_enable(clk);
host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
- | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
+ | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
+ | SDHCI_QUIRK_32BIT_ADMA_SIZE;
/* enable 1/8V DDR capable */
host->mmc->caps |= MMC_CAP_1_8V_DDR;
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 460ffaf..fe886d6 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -19,6 +19,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/module.h>
#include <linux/mmc/host.h>
@@ -301,6 +302,8 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
ctrl &= ~SDHCI_CTRL_8BITBUS;
break;
default:
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ ctrl &= ~SDHCI_CTRL_8BITBUS;
break;
}
@@ -502,6 +505,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
/* This host supports the Auto CMD12 */
host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
+ /* Samsung SoCs need BROKEN_ADMA_ZEROLEN_DESC */
+ host->quirks |= SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC;
+
if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index c31a334..0e02cc1 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -628,12 +628,11 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
/* timeout in us */
if (!data)
target_timeout = cmd->cmd_timeout_ms * 1000;
- else
- target_timeout = data->timeout_ns / 1000 +
- data->timeout_clks / host->clock;
-
- if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
- host->timeout_clk = host->clock / 1000;
+ else {
+ target_timeout = data->timeout_ns / 1000;
+ if (host->clock)
+ target_timeout += data->timeout_clks / host->clock;
+ }
/*
* Figure out needed cycles.
@@ -645,7 +644,6 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
* =>
* (1) / (2) > 2^6
*/
- BUG_ON(!host->timeout_clk);
count = 0;
current_timeout = (1 << 13) * 1000 / host->timeout_clk;
while (current_timeout < target_timeout) {
@@ -1867,9 +1865,6 @@ static void sdhci_tasklet_finish(unsigned long param)
del_timer(&host->timer);
- if (host->version >= SDHCI_SPEC_300)
- del_timer(&host->tuning_timer);
-
mrq = host->mrq;
/*
@@ -2461,22 +2456,6 @@ int sdhci_add_host(struct sdhci_host *host)
host->max_clk = host->ops->get_max_clock(host);
}
- host->timeout_clk =
- (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
- if (host->timeout_clk == 0) {
- if (host->ops->get_timeout_clock) {
- host->timeout_clk = host->ops->get_timeout_clock(host);
- } else if (!(host->quirks &
- SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
- printk(KERN_ERR
- "%s: Hardware doesn't specify timeout clock "
- "frequency.\n", mmc_hostname(mmc));
- return -ENODEV;
- }
- }
- if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
- host->timeout_clk *= 1000;
-
/*
* In case of Host Controller v3.00, find out whether clock
* multiplier is supported.
@@ -2509,10 +2488,26 @@ int sdhci_add_host(struct sdhci_host *host)
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
+ host->timeout_clk =
+ (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
+ if (host->timeout_clk == 0) {
+ if (host->ops->get_timeout_clock) {
+ host->timeout_clk = host->ops->get_timeout_clock(host);
+ } else if (!(host->quirks &
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
+ printk(KERN_ERR
+ "%s: Hardware doesn't specify timeout clock "
+ "frequency.\n", mmc_hostname(mmc));
+ return -ENODEV;
+ }
+ }
+ if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
+ host->timeout_clk *= 1000;
+
if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
- mmc->max_discard_to = (1 << 27) / (mmc->f_max / 1000);
- else
- mmc->max_discard_to = (1 << 27) / host->timeout_clk;
+ host->timeout_clk = mmc->f_max / 1000;
+
+ mmc->max_discard_to = (1 << 27) / host->timeout_clk;
mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 774f643..0c4a672 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -120,11 +120,11 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data->hclk = clk_get_rate(priv->clk);
mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
mmc_data->get_cd = sh_mobile_sdhi_get_cd;
- if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
- mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
if (p) {
mmc_data->flags = p->tmio_flags;
+ if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
+ mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
mmc_data->ocr_mask = p->tmio_ocr_mask;
mmc_data->capabilities |= p->tmio_caps;
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 8d185de..44a9668 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -27,7 +27,6 @@
static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
{
const struct mfd_cell *cell = mfd_get_cell(dev);
- struct mmc_host *mmc = platform_get_drvdata(dev);
int ret;
ret = tmio_mmc_host_suspend(&dev->dev);
@@ -42,7 +41,6 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
static int tmio_mmc_resume(struct platform_device *dev)
{
const struct mfd_cell *cell = mfd_get_cell(dev);
- struct mmc_host *mmc = platform_get_drvdata(dev);
int ret = 0;
/* Tell the MFD core we are ready to be enabled */
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 087d880..eeaf643 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -21,6 +21,7 @@
#include <linux/mutex.h>
#include <linux/pagemap.h>
#include <linux/spinlock.h>
+#include <linux/scatterlist.h>
/* Definitions for values the CTRL_SDIO_STATUS register can take. */
#define TMIO_SDIO_STAT_IOIRQ 0x0001
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 65b5b76..64fbb00 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -181,7 +181,7 @@ static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
#define ubi_dbg_msg(fmt, ...) do { \
if (0) \
- pr_debug(fmt "\n", ##__VA_ARGS__); \
+ printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \
} while (0)
#define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index b7622c3..e1eca2a 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -282,6 +282,7 @@ obj-$(CONFIG_USB_HSO) += usb/
obj-$(CONFIG_USB_USBNET) += usb/
obj-$(CONFIG_USB_ZD1201) += usb/
obj-$(CONFIG_USB_IPHETH) += usb/
+obj-$(CONFIG_USB_CDC_PHONET) += usb/
obj-$(CONFIG_WLAN) += wireless/
obj-$(CONFIG_NET_TULIP) += tulip/
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 536038b..31798f5 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -1502,13 +1502,13 @@ static int __devinit ace_init(struct net_device *dev)
* firmware to wipe the ring without re-initializing it.
*/
if (!test_and_set_bit(0, &ap->std_refill_busy))
- ace_load_std_rx_ring(ap, RX_RING_SIZE);
+ ace_load_std_rx_ring(dev, RX_RING_SIZE);
else
printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n",
ap->name);
if (ap->version >= 2) {
if (!test_and_set_bit(0, &ap->mini_refill_busy))
- ace_load_mini_rx_ring(ap, RX_MINI_SIZE);
+ ace_load_mini_rx_ring(dev, RX_MINI_SIZE);
else
printk(KERN_ERR "%s: Someone is busy refilling "
"the RX mini ring\n", ap->name);
@@ -1584,9 +1584,10 @@ static void ace_watchdog(struct net_device *data)
}
-static void ace_tasklet(unsigned long dev)
+static void ace_tasklet(unsigned long arg)
{
- struct ace_private *ap = netdev_priv((struct net_device *)dev);
+ struct net_device *dev = (struct net_device *) arg;
+ struct ace_private *ap = netdev_priv(dev);
int cur_size;
cur_size = atomic_read(&ap->cur_rx_bufs);
@@ -1595,7 +1596,7 @@ static void ace_tasklet(unsigned long dev)
#ifdef DEBUG
printk("refilling buffers (current %i)\n", cur_size);
#endif
- ace_load_std_rx_ring(ap, RX_RING_SIZE - cur_size);
+ ace_load_std_rx_ring(dev, RX_RING_SIZE - cur_size);
}
if (ap->version >= 2) {
@@ -1606,7 +1607,7 @@ static void ace_tasklet(unsigned long dev)
printk("refilling mini buffers (current %i)\n",
cur_size);
#endif
- ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size);
+ ace_load_mini_rx_ring(dev, RX_MINI_SIZE - cur_size);
}
}
@@ -1616,7 +1617,7 @@ static void ace_tasklet(unsigned long dev)
#ifdef DEBUG
printk("refilling jumbo buffers (current %i)\n", cur_size);
#endif
- ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size);
+ ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size);
}
ap->tasklet_pending = 0;
}
@@ -1642,8 +1643,9 @@ static void ace_dump_trace(struct ace_private *ap)
* done only before the device is enabled, thus no interrupts are
* generated and by the interrupt handler/tasklet handler.
*/
-static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
+static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs)
{
+ struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
short i, idx;
@@ -1657,11 +1659,10 @@ static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
struct rx_desc *rd;
dma_addr_t mapping;
- skb = dev_alloc_skb(ACE_STD_BUFSIZE + NET_IP_ALIGN);
+ skb = netdev_alloc_skb_ip_align(dev, ACE_STD_BUFSIZE);
if (!skb)
break;
- skb_reserve(skb, NET_IP_ALIGN);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
ACE_STD_BUFSIZE,
@@ -1705,8 +1706,9 @@ static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
}
-static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
+static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs)
{
+ struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
short i, idx;
@@ -1718,11 +1720,10 @@ static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
struct rx_desc *rd;
dma_addr_t mapping;
- skb = dev_alloc_skb(ACE_MINI_BUFSIZE + NET_IP_ALIGN);
+ skb = netdev_alloc_skb_ip_align(dev, ACE_MINI_BUFSIZE);
if (!skb)
break;
- skb_reserve(skb, NET_IP_ALIGN);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
ACE_MINI_BUFSIZE,
@@ -1762,8 +1763,9 @@ static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
* Load the jumbo rx ring, this may happen at any time if the MTU
* is changed to a value > 1500.
*/
-static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
+static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs)
{
+ struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
short i, idx;
@@ -1774,11 +1776,10 @@ static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
struct rx_desc *rd;
dma_addr_t mapping;
- skb = dev_alloc_skb(ACE_JUMBO_BUFSIZE + NET_IP_ALIGN);
+ skb = netdev_alloc_skb_ip_align(dev, ACE_JUMBO_BUFSIZE);
if (!skb)
break;
- skb_reserve(skb, NET_IP_ALIGN);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
ACE_JUMBO_BUFSIZE,
@@ -2196,7 +2197,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
#ifdef DEBUG
printk("low on std buffers %i\n", cur_size);
#endif
- ace_load_std_rx_ring(ap,
+ ace_load_std_rx_ring(dev,
RX_RING_SIZE - cur_size);
} else
run_tasklet = 1;
@@ -2212,7 +2213,8 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
printk("low on mini buffers %i\n",
cur_size);
#endif
- ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size);
+ ace_load_mini_rx_ring(dev,
+ RX_MINI_SIZE - cur_size);
} else
run_tasklet = 1;
}
@@ -2228,7 +2230,8 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
printk("low on jumbo buffers %i\n",
cur_size);
#endif
- ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size);
+ ace_load_jumbo_rx_ring(dev,
+ RX_JUMBO_SIZE - cur_size);
} else
run_tasklet = 1;
}
@@ -2267,7 +2270,7 @@ static int ace_open(struct net_device *dev)
if (ap->jumbo &&
!test_and_set_bit(0, &ap->jumbo_refill_busy))
- ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE);
+ ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
if (dev->flags & IFF_PROMISC) {
cmd.evt = C_SET_PROMISC_MODE;
@@ -2575,7 +2578,7 @@ static int ace_change_mtu(struct net_device *dev, int new_mtu)
"support\n", dev->name);
ap->jumbo = 1;
if (!test_and_set_bit(0, &ap->jumbo_refill_busy))
- ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE);
+ ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
ace_set_rxtx_parms(dev, 1);
}
} else {
diff --git a/drivers/net/acenic.h b/drivers/net/acenic.h
index f67dc9b..51c486c 100644
--- a/drivers/net/acenic.h
+++ b/drivers/net/acenic.h
@@ -766,9 +766,9 @@ static inline void ace_unmask_irq(struct net_device *dev)
* Prototypes
*/
static int ace_init(struct net_device *dev);
-static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs);
-static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs);
-static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs);
+static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs);
+static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs);
+static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs);
static irqreturn_t ace_interrupt(int irq, void *dev_id);
static int ace_load_firmware(struct net_device *dev);
static int ace_open(struct net_device *dev);
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 52fe21e..3b1416e 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -308,8 +308,11 @@ static void am79c961_timer(unsigned long data)
struct net_device *dev = (struct net_device *)data;
struct dev_priv *priv = netdev_priv(dev);
unsigned int lnkstat, carrier;
+ unsigned long flags;
+ spin_lock_irqsave(&priv->chip_lock, flags);
lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
carrier = netif_carrier_ok(dev);
if (lnkstat && !carrier) {
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index c346e65..9f3e530 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -44,7 +44,7 @@
* SMP torture testing
*/
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/byteorder.h>
#include <linux/compiler.h>
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index e0f87cf..d4f7dda 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -20,7 +20,7 @@
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/crc32.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/atlx/atl2.h b/drivers/net/atlx/atl2.h
index 78344dd..bf9016e 100644
--- a/drivers/net/atlx/atl2.h
+++ b/drivers/net/atlx/atl2.h
@@ -25,7 +25,7 @@
#ifndef _ATL2_H_
#define _ATL2_H_
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/netdevice.h>
#ifndef _ATL2_HW_H_
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 5b0dba6..37e5790 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -63,8 +63,9 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
#ifdef BCM_CNIC
- /* We don't want TPA on FCoE, FWD and OOO L2 rings */
- bnx2x_fcoe(bp, disable_tpa) = 1;
+ /* We don't want TPA on an FCoE L2 ring */
+ if (IS_FCOE_FP(fp))
+ fp->disable_tpa = 1;
#endif
}
@@ -1404,10 +1405,9 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct bnx2x *bp = netdev_priv(dev);
+
#ifdef BCM_CNIC
- if (NO_FCOE(bp))
- return skb_tx_hash(dev, skb);
- else {
+ if (!NO_FCOE(bp)) {
struct ethhdr *hdr = (struct ethhdr *)skb->data;
u16 ether_type = ntohs(hdr->h_proto);
@@ -1424,8 +1424,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
return bnx2x_fcoe_tx(bp, txq_index);
}
#endif
- /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
- */
+ /* select a non-FCoE queue */
return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
}
@@ -1448,6 +1447,28 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
bp->num_queues += NON_ETH_CONTEXT_USE;
}
+/**
+ * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
+ *
+ * @bp: Driver handle
+ *
+ * We currently support for at most 16 Tx queues for each CoS thus we will
+ * allocate a multiple of 16 for ETH L2 rings according to the value of the
+ * bp->max_cos.
+ *
+ * If there is an FCoE L2 queue the appropriate Tx queue will have the next
+ * index after all ETH L2 indices.
+ *
+ * If the actual number of Tx queues (for each CoS) is less than 16 then there
+ * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
+ * 16..31,...) with indicies that are not coupled with any real Tx queue.
+ *
+ * The proper configuration of skb->queue_mapping is handled by
+ * bnx2x_select_queue() and __skb_tx_hash().
+ *
+ * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
+ * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
+ */
static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
{
int rc, tx, rx;
@@ -1989,14 +2010,20 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
return -EINVAL;
}
+ /*
+ * It's important to set the bp->state to the value different from
+ * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
+ * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
+ */
+ bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
+ smp_mb();
+
/* Stop Tx */
bnx2x_tx_disable(bp);
#ifdef BCM_CNIC
bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
#endif
- bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
- smp_mb();
bp->rx_mode = BNX2X_RX_MODE_NONE;
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
index a4ea35f..a1e004a 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -920,7 +920,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
{
- if (!CHIP_IS_E1x(bp)) {
+ if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) {
bp->dcb_state = dcb_on;
bp->dcbx_enabled = dcbx_enabled;
} else {
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 06727f3..dc24de4 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -1204,6 +1204,8 @@ struct drv_port_mb {
#define LINK_STATUS_PFC_ENABLED 0x20000000
+ #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000
+
u32 port_stx;
u32 stat_nig_timer;
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index bcd8f00..d45b155 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -1546,6 +1546,12 @@ static void bnx2x_umac_enable(struct link_params *params,
vars->line_speed);
break;
}
+ if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE;
+
+ if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
+ val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE;
+
REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
udelay(50);
@@ -1661,10 +1667,20 @@ static void bnx2x_xmac_disable(struct link_params *params)
{
u8 port = params->port;
struct bnx2x *bp = params->bp;
- u32 xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+ u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
if (REG_RD(bp, MISC_REG_RESET_REG_2) &
MISC_REGISTERS_RESET_REG_2_XMAC) {
+ /*
+ * Send an indication to change the state in the NIG back to XON
+ * Clearing this bit enables the next set of this bit to get
+ * rising edge
+ */
+ pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI);
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
+ (pfc_ctrl & ~(1<<1)));
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
+ (pfc_ctrl | (1<<1)));
DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0);
usleep_range(1000, 1000);
@@ -1729,6 +1745,10 @@ static int bnx2x_emac_enable(struct link_params *params,
DP(NETIF_MSG_LINK, "enabling EMAC\n");
+ /* Disable BMAC */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
/* enable emac and not bmac */
REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
@@ -2583,12 +2603,6 @@ static int bnx2x_bmac1_enable(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
wb_data, 2);
- if (vars->phy_flags & PHY_TX_ERROR_CHECK_FLAG) {
- REG_RD_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LSS_STATUS,
- wb_data, 2);
- if (wb_data[0] > 0)
- return -ESRCH;
- }
return 0;
}
@@ -2654,16 +2668,6 @@ static int bnx2x_bmac2_enable(struct link_params *params,
udelay(30);
bnx2x_update_pfc_bmac2(params, vars, is_lb);
- if (vars->phy_flags & PHY_TX_ERROR_CHECK_FLAG) {
- REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LSS_STAT,
- wb_data, 2);
- if (wb_data[0] > 0) {
- DP(NETIF_MSG_LINK, "Got bad LSS status 0x%x\n",
- wb_data[0]);
- return -ESRCH;
- }
- }
-
return 0;
}
@@ -2949,7 +2953,9 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
u32 val;
u16 i;
int rc = 0;
-
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
/* address */
val = ((phy->addr << 21) | (devad << 16) | reg |
EMAC_MDIO_COMM_COMMAND_ADDRESS |
@@ -3003,6 +3009,9 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
}
}
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
return rc;
}
@@ -3012,6 +3021,9 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
u32 tmp;
u8 i;
int rc = 0;
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
/* address */
@@ -3065,7 +3077,9 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
}
}
-
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
return rc;
}
@@ -4353,6 +4367,9 @@ void bnx2x_link_status_update(struct link_params *params,
vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
vars->phy_flags = PHY_XGXS_FLAG;
+ if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
+ vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
+
if (vars->link_up) {
DP(NETIF_MSG_LINK, "phy link up\n");
@@ -4444,6 +4461,8 @@ void bnx2x_link_status_update(struct link_params *params,
/* indicate no mac active */
vars->mac_type = MAC_TYPE_NONE;
+ if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
+ vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
}
/* Sync media type */
@@ -5903,20 +5922,30 @@ int bnx2x_set_led(struct link_params *params,
tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
EMAC_WR(bp, EMAC_REG_EMAC_LED,
(tmp | EMAC_LED_OVERRIDE));
- return rc;
+ /*
+ * return here without enabling traffic
+ * LED blink andsetting rate in ON mode.
+ * In oper mode, enabling LED blink
+ * and setting rate is needed.
+ */
+ if (mode == LED_MODE_ON)
+ return rc;
}
- } else if (SINGLE_MEDIA_DIRECT(params) &&
- (CHIP_IS_E1x(bp) ||
- CHIP_IS_E2(bp))) {
+ } else if (SINGLE_MEDIA_DIRECT(params)) {
/*
* This is a work-around for HW issue found when link
* is up in CL73
*/
- REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
- } else {
+ if (CHIP_IS_E1x(bp) ||
+ CHIP_IS_E2(bp) ||
+ (mode == LED_MODE_ON))
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+ else
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+ hw_led_mode);
+ } else
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
- }
REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
/* Set blinking rate to ~15.9Hz */
@@ -6160,6 +6189,7 @@ static int bnx2x_update_link_down(struct link_params *params,
/* update shared memory */
vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK |
LINK_STATUS_LINK_UP |
+ LINK_STATUS_PHYSICAL_LINK_FLAG |
LINK_STATUS_AUTO_NEGOTIATE_COMPLETE |
LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK |
LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK |
@@ -6197,7 +6227,8 @@ static int bnx2x_update_link_up(struct link_params *params,
u8 port = params->port;
int rc = 0;
- vars->link_status |= LINK_STATUS_LINK_UP;
+ vars->link_status |= (LINK_STATUS_LINK_UP |
+ LINK_STATUS_PHYSICAL_LINK_FLAG);
vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
@@ -7998,6 +8029,9 @@ static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
+ /* Restart microcode to re-read the new mode */
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+ bnx2x_warpcore_reset_lane(bp, phy, 0);
}
@@ -8116,7 +8150,6 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].
config));
-
bnx2x_set_gpio_int(bp, gpio_num,
MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
gpio_port);
@@ -8125,8 +8158,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
* Disable transmit for this module
*/
phy->media_type = ETH_PHY_NOT_PRESENT;
- if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
- PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
+ if (((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) ||
+ CHIP_IS_E3(bp))
bnx2x_sfp_set_transmitter(params, phy, 0);
}
}
@@ -8228,9 +8262,6 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
u16 cnt, val, tmp1;
struct bnx2x *bp = params->bp;
- /* SPF+ PHY: Set flag to check for Tx error */
- vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG;
-
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
@@ -8414,9 +8445,6 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
- /* SPF+ PHY: Set flag to check for Tx error */
- vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG;
-
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
bnx2x_wait_reset_complete(bp, phy, params);
@@ -8585,9 +8613,6 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
/* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
- /* SPF+ PHY: Set flag to check for Tx error */
- vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG;
-
bnx2x_wait_reset_complete(bp, phy, params);
rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
/* Should be 0x6 to enable XS on Tx side. */
@@ -9243,7 +9268,13 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
if (phy->req_duplex == DUPLEX_FULL)
autoneg_val |= (1<<8);
- bnx2x_cl45_write(bp, phy,
+ /*
+ * Always write this if this is not 84833.
+ * For 84833, write it only when it's a forced speed.
+ */
+ if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+ ((autoneg_val & (1<<12)) == 0))
+ bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD,
MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
@@ -9257,13 +9288,12 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
0x3200);
- } else if (phy->req_line_speed != SPEED_10 &&
- phy->req_line_speed != SPEED_100) {
+ } else
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD,
MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
1);
- }
+
/* Save spirom version */
bnx2x_save_848xx_spirom_version(phy, params);
@@ -9756,11 +9786,9 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy,
MDIO_CTL_DEVAD,
0x400f, &val16);
- /* Put to low power mode on newer FW */
- if ((val16 & 0x303f) > 0x1009)
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, 0x800);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL, 0x800);
}
}
@@ -10191,8 +10219,15 @@ static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
u32 cfg_pin;
u8 port;
- /* This works with E3 only, no need to check the chip
- before determining the port. */
+ /*
+ * In case of no EPIO routed to reset the GPHY, put it
+ * in low power mode.
+ */
+ bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800);
+ /*
+ * This works with E3 only, no need to check the chip
+ * before determining the port.
+ */
port = params->port;
cfg_pin = (REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region,
@@ -10603,7 +10638,8 @@ static struct bnx2x_phy phy_warpcore = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
.addr = 0xff,
.def_md_devad = 0,
- .flags = FLAGS_HW_LOCK_REQUIRED,
+ .flags = (FLAGS_HW_LOCK_REQUIRED |
+ FLAGS_TX_ERROR_CHECK),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -10729,7 +10765,8 @@ static struct bnx2x_phy phy_8706 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
.addr = 0xff,
.def_md_devad = 0,
- .flags = FLAGS_INIT_XGXS_FIRST,
+ .flags = (FLAGS_INIT_XGXS_FIRST |
+ FLAGS_TX_ERROR_CHECK),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -10760,7 +10797,8 @@ static struct bnx2x_phy phy_8726 = {
.addr = 0xff,
.def_md_devad = 0,
.flags = (FLAGS_HW_LOCK_REQUIRED |
- FLAGS_INIT_XGXS_FIRST),
+ FLAGS_INIT_XGXS_FIRST |
+ FLAGS_TX_ERROR_CHECK),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -10791,7 +10829,8 @@ static struct bnx2x_phy phy_8727 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
.addr = 0xff,
.def_md_devad = 0,
- .flags = FLAGS_FAN_FAILURE_DET_REQ,
+ .flags = (FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_TX_ERROR_CHECK),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -11112,6 +11151,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
*/
if (CHIP_REV(bp) == CHIP_REV_Ax)
phy->flags |= FLAGS_MDC_MDIO_WA;
+ else
+ phy->flags |= FLAGS_MDC_MDIO_WA_B0;
} else {
switch (switch_cfg) {
case SWITCH_CFG_1G:
@@ -11500,13 +11541,12 @@ void bnx2x_init_xmac_loopback(struct link_params *params,
* Set WC to loopback mode since link is required to provide clock
* to the XMAC in 20G mode
*/
- if (vars->line_speed == SPEED_20000) {
- bnx2x_set_aer_mmd(params, &params->phy[0]);
- bnx2x_warpcore_reset_lane(bp, &params->phy[0], 0);
- params->phy[INT_PHY].config_loopback(
+ bnx2x_set_aer_mmd(params, &params->phy[0]);
+ bnx2x_warpcore_reset_lane(bp, &params->phy[0], 0);
+ params->phy[INT_PHY].config_loopback(
&params->phy[INT_PHY],
params);
- }
+
bnx2x_xmac_enable(params, vars, 1);
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
}
@@ -11684,12 +11724,16 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
if (reset_ext_phy) {
+ bnx2x_set_mdio_clk(bp, params->chip_id, port);
for (phy_index = EXT_PHY1; phy_index < params->num_phys;
phy_index++) {
- if (params->phy[phy_index].link_reset)
+ if (params->phy[phy_index].link_reset) {
+ bnx2x_set_aer_mmd(params,
+ &params->phy[phy_index]);
params->phy[phy_index].link_reset(
&params->phy[phy_index],
params);
+ }
if (params->phy[phy_index].flags &
FLAGS_REARM_LATCH_SIGNAL)
clear_latch_ind = 1;
@@ -12178,10 +12222,6 @@ static void bnx2x_analyze_link_error(struct link_params *params,
u8 led_mode;
u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0;
- /*DP(NETIF_MSG_LINK, "CHECK LINK: %x half_open:%x-> lss:%x\n",
- vars->link_up,
- half_open_conn, lss_status);*/
-
if ((lss_status ^ half_open_conn) == 0)
return;
@@ -12194,6 +12234,7 @@ static void bnx2x_analyze_link_error(struct link_params *params,
* b. Update link_vars->link_up
*/
if (lss_status) {
+ DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n");
vars->link_status &= ~LINK_STATUS_LINK_UP;
vars->link_up = 0;
vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
@@ -12203,6 +12244,7 @@ static void bnx2x_analyze_link_error(struct link_params *params,
*/
led_mode = LED_MODE_OFF;
} else {
+ DP(NETIF_MSG_LINK, "Remote Fault cleared\n");
vars->link_status |= LINK_STATUS_LINK_UP;
vars->link_up = 1;
vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
@@ -12219,6 +12261,15 @@ static void bnx2x_analyze_link_error(struct link_params *params,
bnx2x_notify_link_changed(bp);
}
+/******************************************************************************
+* Description:
+* This function checks for half opened connection change indication.
+* When such change occurs, it calls the bnx2x_analyze_link_error
+* to check if Remote Fault is set or cleared. Reception of remote fault
+* status message in the MAC indicates that the peer's MAC has detected
+* a fault, for example, due to break in the TX side of fiber.
+*
+******************************************************************************/
static void bnx2x_check_half_open_conn(struct link_params *params,
struct link_vars *vars)
{
@@ -12229,9 +12280,28 @@ static void bnx2x_check_half_open_conn(struct link_params *params,
if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
return;
- if (!CHIP_IS_E3(bp) &&
+ if (CHIP_IS_E3(bp) &&
(REG_RD(bp, MISC_REG_RESET_REG_2) &
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))) {
+ (MISC_REGISTERS_RESET_REG_2_XMAC))) {
+ /* Check E3 XMAC */
+ /*
+ * Note that link speed cannot be queried here, since it may be
+ * zero while link is down. In case UMAC is active, LSS will
+ * simply not be set
+ */
+ mac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+ /* Clear stick bits (Requires rising edge) */
+ REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0);
+ REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS,
+ XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS |
+ XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS);
+ if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
+ lss_status = 1;
+
+ bnx2x_analyze_link_error(params, vars, lss_status);
+ } else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
/* Check E1X / E2 BMAC */
u32 lss_status_reg;
u32 wb_data[2];
@@ -12253,14 +12323,20 @@ static void bnx2x_check_half_open_conn(struct link_params *params,
void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
+ u16 phy_idx;
if (!params) {
- DP(NETIF_MSG_LINK, "Ininitliazed params !\n");
+ DP(NETIF_MSG_LINK, "Uninitialized params !\n");
return;
}
- /* DP(NETIF_MSG_LINK, "Periodic called vars->phy_flags 0x%x speed 0x%x
- RESET_REG_2 0x%x\n", vars->phy_flags, vars->line_speed,
- REG_RD(bp, MISC_REG_RESET_REG_2)); */
- bnx2x_check_half_open_conn(params, vars);
+
+ for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
+ if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
+ bnx2x_set_aer_mmd(params, &params->phy[phy_idx]);
+ bnx2x_check_half_open_conn(params, vars);
+ break;
+ }
+ }
+
if (CHIP_IS_E3(bp))
bnx2x_check_over_curr(params, vars);
}
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index 6a7708d..c12db6d 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -145,6 +145,8 @@ struct bnx2x_phy {
#define FLAGS_SFP_NOT_APPROVED (1<<7)
#define FLAGS_MDC_MDIO_WA (1<<8)
#define FLAGS_DUMMY_READ (1<<9)
+#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
+#define FLAGS_TX_ERROR_CHECK (1<<12)
/* preemphasis values for the rx side */
u16 rx_preemphasis[4];
@@ -276,7 +278,6 @@ struct link_vars {
#define PHY_PHYSICAL_LINK_FLAG (1<<2)
#define PHY_HALF_OPEN_CONN_FLAG (1<<3)
#define PHY_OVER_CURRENT_FLAG (1<<4)
-#define PHY_TX_ERROR_CHECK_FLAG (1<<5)
u8 mac_type;
#define MAC_TYPE_NONE 0
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 1507091..f74582a 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -5798,6 +5798,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
+ /*
+ * take the UNDI lock to protect undi_unload flow from accessing
+ * registers while we're resetting the chip
+ */
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+
bnx2x_reset_common(bp);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
@@ -5808,6 +5814,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
}
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+
bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
if (!CHIP_IS_E1x(bp)) {
@@ -10251,10 +10259,17 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
/* clean indirect addresses */
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
PCICFG_VENDOR_ID_OFFSET);
- REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
+ /* Clean the following indirect addresses for all functions since it
+ * is not used by the driver.
+ */
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
/*
* Enable internal target-read (in case we are probed after PF FLR).
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 02461fe..40266c1 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -3007,11 +3007,27 @@
/* [R 6] Debug only: Number of used entries in the data FIFO */
#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c
/* [R 7] Debug only: Number of used entries in the header FIFO */
-#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478
-#define PXP2_REG_PGL_ADDR_88_F0 0x120534
-#define PXP2_REG_PGL_ADDR_8C_F0 0x120538
-#define PXP2_REG_PGL_ADDR_90_F0 0x12053c
-#define PXP2_REG_PGL_ADDR_94_F0 0x120540
+#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478
+#define PXP2_REG_PGL_ADDR_88_F0 0x120534
+/* [R 32] GRC address for configuration access to PCIE config address 0x88.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_88_F1 0x120544
+#define PXP2_REG_PGL_ADDR_8C_F0 0x120538
+/* [R 32] GRC address for configuration access to PCIE config address 0x8c.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_8C_F1 0x120548
+#define PXP2_REG_PGL_ADDR_90_F0 0x12053c
+/* [R 32] GRC address for configuration access to PCIE config address 0x90.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_90_F1 0x12054c
+#define PXP2_REG_PGL_ADDR_94_F0 0x120540
+/* [R 32] GRC address for configuration access to PCIE config address 0x94.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_94_F1 0x120550
#define PXP2_REG_PGL_CONTROL0 0x120490
#define PXP2_REG_PGL_CONTROL1 0x120514
#define PXP2_REG_PGL_DEBUG 0x120520
@@ -4771,9 +4787,11 @@
The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] -
header pointer. */
#define UCM_REG_XX_TABLE 0xe0300
+#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE (0x1<<28)
#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15)
#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24)
#define UMAC_COMMAND_CONFIG_REG_PAD_EN (0x1<<5)
+#define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE (0x1<<8)
#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN (0x1<<4)
#define UMAC_COMMAND_CONFIG_REG_RX_ENA (0x1<<1)
#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13)
@@ -5622,8 +5640,9 @@
#define EMAC_MDIO_COMM_START_BUSY (1L<<29)
#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4)
#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31)
-#define EMAC_MDIO_MODE_CLOCK_CNT (0x3fL<<16)
+#define EMAC_MDIO_MODE_CLOCK_CNT (0x3ffL<<16)
#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16
+#define EMAC_MDIO_STATUS_10MB (1L<<1)
#define EMAC_MODE_25G_MODE (1L<<5)
#define EMAC_MODE_HALF_DUPLEX (1L<<1)
#define EMAC_MODE_PORT_GMII (2L<<2)
@@ -5634,6 +5653,7 @@
#define EMAC_REG_EMAC_MAC_MATCH 0x10
#define EMAC_REG_EMAC_MDIO_COMM 0xac
#define EMAC_REG_EMAC_MDIO_MODE 0xb4
+#define EMAC_REG_EMAC_MDIO_STATUS 0xb0
#define EMAC_REG_EMAC_MODE 0x0
#define EMAC_REG_EMAC_RX_MODE 0xc8
#define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 02842d0..43f2ea5 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1557,8 +1557,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (slave_dev->type != ARPHRD_ETHER)
bond_setup_by_slave(bond_dev, slave_dev);
- else
+ else {
ether_setup(bond_dev);
+ bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ }
netdev_bonding_change(bond_dev,
NETDEV_POST_TYPE_CHANGE);
@@ -3417,9 +3419,27 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
static int bond_open(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
+ int i;
bond->kill_timers = 0;
+ /* reset slave->backup and slave->inactive */
+ read_lock(&bond->lock);
+ if (bond->slave_cnt > 0) {
+ read_lock(&bond->curr_slave_lock);
+ bond_for_each_slave(bond, slave, i) {
+ if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+ && (slave != bond->curr_active_slave)) {
+ bond_set_slave_inactive_flags(slave);
+ } else {
+ bond_set_slave_active_flags(slave);
+ }
+ }
+ read_unlock(&bond->curr_slave_lock);
+ }
+ read_unlock(&bond->lock);
+
INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed);
if (bond_is_lb(bond)) {
@@ -4330,7 +4350,7 @@ static void bond_setup(struct net_device *bond_dev)
bond_dev->tx_queue_len = 0;
bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
bond_dev->priv_flags |= IFF_BONDING;
- bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
/* At first, we block adding VLANs. That's the only way to
* prevent problems that occur when adding VLANs over an
@@ -4691,7 +4711,7 @@ static int bond_check_params(struct bond_params *params)
/* miimon and arp_interval not set, we need one so things
* work as expected, see bonding.txt for details
*/
- pr_warning("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n");
+ pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n");
}
if (primary && !USES_PRIMARY(bond_mode)) {
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index b60835f..2dfb4bf 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1025,6 +1025,7 @@ static ssize_t bonding_store_primary(struct device *d,
int i;
struct slave *slave;
struct bonding *bond = to_bond(d);
+ char ifname[IFNAMSIZ];
if (!rtnl_trylock())
return restart_syscall();
@@ -1035,32 +1036,33 @@ static ssize_t bonding_store_primary(struct device *d,
if (!USES_PRIMARY(bond->params.mode)) {
pr_info("%s: Unable to set primary slave; %s is in mode %d\n",
bond->dev->name, bond->dev->name, bond->params.mode);
- } else {
- bond_for_each_slave(bond, slave, i) {
- if (strnicmp
- (slave->dev->name, buf,
- strlen(slave->dev->name)) == 0) {
- pr_info("%s: Setting %s as primary slave.\n",
- bond->dev->name, slave->dev->name);
- bond->primary_slave = slave;
- strcpy(bond->params.primary, slave->dev->name);
- bond_select_active_slave(bond);
- goto out;
- }
- }
+ goto out;
+ }
- /* if we got here, then we didn't match the name of any slave */
+ sscanf(buf, "%16s", ifname); /* IFNAMSIZ */
- if (strlen(buf) == 0 || buf[0] == '\n') {
- pr_info("%s: Setting primary slave to None.\n",
- bond->dev->name);
- bond->primary_slave = NULL;
- bond_select_active_slave(bond);
- } else {
- pr_info("%s: Unable to set %.*s as primary slave as it is not a slave.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
+ /* check to see if we are clearing primary */
+ if (!strlen(ifname) || buf[0] == '\n') {
+ pr_info("%s: Setting primary slave to None.\n",
+ bond->dev->name);
+ bond->primary_slave = NULL;
+ bond_select_active_slave(bond);
+ goto out;
+ }
+
+ bond_for_each_slave(bond, slave, i) {
+ if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
+ pr_info("%s: Setting %s as primary slave.\n",
+ bond->dev->name, slave->dev->name);
+ bond->primary_slave = slave;
+ strcpy(bond->params.primary, slave->dev->name);
+ bond_select_active_slave(bond);
+ goto out;
}
}
+
+ pr_info("%s: Unable to set %.*s as primary slave.\n",
+ bond->dev->name, (int)strlen(buf) - 1, buf);
out:
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
@@ -1195,6 +1197,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
struct slave *old_active = NULL;
struct slave *new_active = NULL;
struct bonding *bond = to_bond(d);
+ char ifname[IFNAMSIZ];
if (!rtnl_trylock())
return restart_syscall();
@@ -1203,56 +1206,62 @@ static ssize_t bonding_store_active_slave(struct device *d,
read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
- if (!USES_PRIMARY(bond->params.mode))
+ if (!USES_PRIMARY(bond->params.mode)) {
pr_info("%s: Unable to change active slave; %s is in mode %d\n",
bond->dev->name, bond->dev->name, bond->params.mode);
- else {
- bond_for_each_slave(bond, slave, i) {
- if (strnicmp
- (slave->dev->name, buf,
- strlen(slave->dev->name)) == 0) {
- old_active = bond->curr_active_slave;
- new_active = slave;
- if (new_active == old_active) {
- /* do nothing */
- pr_info("%s: %s is already the current active slave.\n",
+ goto out;
+ }
+
+ sscanf(buf, "%16s", ifname); /* IFNAMSIZ */
+
+ /* check to see if we are clearing active */
+ if (!strlen(ifname) || buf[0] == '\n') {
+ pr_info("%s: Clearing current active slave.\n",
+ bond->dev->name);
+ bond->curr_active_slave = NULL;
+ bond_select_active_slave(bond);
+ goto out;
+ }
+
+ bond_for_each_slave(bond, slave, i) {
+ if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
+ old_active = bond->curr_active_slave;
+ new_active = slave;
+ if (new_active == old_active) {
+ /* do nothing */
+ pr_info("%s: %s is already the current"
+ " active slave.\n",
+ bond->dev->name,
+ slave->dev->name);
+ goto out;
+ }
+ else {
+ if ((new_active) &&
+ (old_active) &&
+ (new_active->link == BOND_LINK_UP) &&
+ IS_UP(new_active->dev)) {
+ pr_info("%s: Setting %s as active"
+ " slave.\n",
bond->dev->name,
slave->dev->name);
- goto out;
+ bond_change_active_slave(bond,
+ new_active);
}
else {
- if ((new_active) &&
- (old_active) &&
- (new_active->link == BOND_LINK_UP) &&
- IS_UP(new_active->dev)) {
- pr_info("%s: Setting %s as active slave.\n",
- bond->dev->name,
- slave->dev->name);
- bond_change_active_slave(bond, new_active);
- }
- else {
- pr_info("%s: Could not set %s as active slave; either %s is down or the link is down.\n",
- bond->dev->name,
- slave->dev->name,
- slave->dev->name);
- }
- goto out;
+ pr_info("%s: Could not set %s as"
+ " active slave; either %s is"
+ " down or the link is down.\n",
+ bond->dev->name,
+ slave->dev->name,
+ slave->dev->name);
}
+ goto out;
}
}
-
- /* if we got here, then we didn't match the name of any slave */
-
- if (strlen(buf) == 0 || buf[0] == '\n') {
- pr_info("%s: Setting active slave to None.\n",
- bond->dev->name);
- bond->primary_slave = NULL;
- bond_select_active_slave(bond);
- } else {
- pr_info("%s: Unable to set %.*s as active slave as it is not a slave.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
- }
}
+
+ pr_info("%s: Unable to set %.*s as active slave.\n",
+ bond->dev->name, (int)strlen(buf) - 1, buf);
out:
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 231385b..c7f3d4e 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -408,7 +408,7 @@ static void plx_pci_del_card(struct pci_dev *pdev)
struct sja1000_priv *priv;
int i = 0;
- for (i = 0; i < card->channels; i++) {
+ for (i = 0; i < PLX_PCI_MAX_CHAN; i++) {
dev = card->net_dev[i];
if (!dev)
continue;
@@ -536,7 +536,6 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
if (err) {
dev_err(&pdev->dev, "Registering device failed "
"(err=%d)\n", err);
- free_sja1000dev(dev);
goto failure_cleanup;
}
@@ -549,6 +548,7 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
dev_err(&pdev->dev, "Channel #%d not detected\n",
i + 1);
free_sja1000dev(dev);
+ card->net_dev[i] = NULL;
}
}
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index f523f1c..4b70b7e 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -197,7 +197,7 @@ static void slc_bump(struct slcan *sl)
skb->ip_summed = CHECKSUM_UNNECESSARY;
memcpy(skb_put(skb, sizeof(struct can_frame)),
&cf, sizeof(struct can_frame));
- netif_rx(skb);
+ netif_rx_ni(skb);
sl->dev->stats.rx_packets++;
sl->dev->stats.rx_bytes += cf.can_dlc;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index f7bbde9..a812492 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -503,9 +503,9 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_unlock_irqrestore(&priv->mbx_lock, flags);
/* Prepare mailbox for transmission */
+ data = cf->can_dlc | (get_tx_head_prio(priv) << 8);
if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
data |= HECC_CANMCF_RTR;
- data |= get_tx_head_prio(priv) << 8;
hecc_write_mbx(priv, mbxno, HECC_CANMCF, data);
if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
@@ -923,6 +923,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv->can.do_get_state = ti_hecc_get_state;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+ spin_lock_init(&priv->mbx_lock);
ndev->irq = irq->start;
ndev->flags |= IFF_ECHO;
platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index b414f5a..fdb7a17 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -98,7 +98,7 @@
#include <net/checksum.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/byteorder.h>
@@ -2452,14 +2452,13 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id)
struct net_device *dev = dev_id;
struct cas *cp = netdev_priv(dev);
unsigned long flags;
- int ring;
+ int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
/* check for shared irq */
if (status == 0)
return IRQ_NONE;
- ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
spin_lock_irqsave(&cp->lock, flags);
if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
#ifdef USE_NAPI
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 086ce04..e0638cb 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -40,7 +40,7 @@
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/gpio.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 32636a1..805076c 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -34,7 +34,7 @@
#include <linux/slab.h>
#include <net/neighbour.h>
#include <linux/notifier.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/proc_fs.h>
#include <linux/if_vlan.h>
#include <net/netevent.h>
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
index fd3eb07..7a12d52 100644
--- a/drivers/net/cxgb3/l2t.h
+++ b/drivers/net/cxgb3/l2t.h
@@ -34,7 +34,7 @@
#include <linux/spinlock.h>
#include "t3cdev.h"
-#include <asm/atomic.h>
+#include <linux/atomic.h>
enum {
L2T_STATE_VALID, /* entry is up to date */
diff --git a/drivers/net/cxgb3/t3cdev.h b/drivers/net/cxgb3/t3cdev.h
index be55e9a..705713b 100644
--- a/drivers/net/cxgb3/t3cdev.h
+++ b/drivers/net/cxgb3/t3cdev.h
@@ -33,7 +33,7 @@
#define _T3CDEV_H_
#include <linux/list.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h
index 1b48c01..b1d39b8 100644
--- a/drivers/net/cxgb4/cxgb4_uld.h
+++ b/drivers/net/cxgb4/cxgb4_uld.h
@@ -38,7 +38,7 @@
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
/* CPL message priority levels */
enum {
diff --git a/drivers/net/cxgb4/l2t.h b/drivers/net/cxgb4/l2t.h
index 7bd8f42..02b31d0 100644
--- a/drivers/net/cxgb4/l2t.h
+++ b/drivers/net/cxgb4/l2t.h
@@ -37,7 +37,7 @@
#include <linux/spinlock.h>
#include <linux/if_ether.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
struct adapter;
struct l2t_data;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index c5f0f04..5548d46 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -838,6 +838,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
+ E1000_WRITE_FLUSH();
msleep(10);
/* Test each interrupt */
@@ -856,6 +857,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMC, mask);
ew32(ICS, mask);
+ E1000_WRITE_FLUSH();
msleep(10);
if (adapter->test_icr & mask) {
@@ -873,6 +875,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMS, mask);
ew32(ICS, mask);
+ E1000_WRITE_FLUSH();
msleep(10);
if (!(adapter->test_icr & mask)) {
@@ -890,6 +893,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMC, ~mask & 0x00007FFF);
ew32(ICS, ~mask & 0x00007FFF);
+ E1000_WRITE_FLUSH();
msleep(10);
if (adapter->test_icr) {
@@ -901,6 +905,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
+ E1000_WRITE_FLUSH();
msleep(10);
/* Unhook test interrupt handler */
@@ -1394,6 +1399,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
if (unlikely(++k == txdr->count)) k = 0;
}
ew32(TDT, k);
+ E1000_WRITE_FLUSH();
msleep(200);
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 1698622..8545c7a 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -446,6 +446,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
/* Must reset the PHY before resetting the MAC */
if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST));
+ E1000_WRITE_FLUSH();
msleep(5);
}
@@ -3752,6 +3753,7 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
/* Clear SK and CS */
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
udelay(1);
}
@@ -3824,6 +3826,7 @@ static void e1000_release_eeprom(struct e1000_hw *hw)
eecd &= ~E1000_EECD_SK; /* Lower SCK */
ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
udelay(hw->eeprom.delay_usec);
} else if (hw->eeprom.type == e1000_eeprom_microwire) {
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 480f259..536b3a5 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -2085,7 +2085,8 @@ struct e1000_info e1000_82574_info = {
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_CHECK_PHY_HANG
- | FLAG2_DISABLE_ASPM_L0S,
+ | FLAG2_DISABLE_ASPM_L0S
+ | FLAG2_NO_DISABLE_RX,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
@@ -2104,7 +2105,8 @@ struct e1000_info e1000_82583_info = {
| FLAG_HAS_AMT
| FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_CTRLEXT_ON_LOAD,
- .flags2 = FLAG2_DISABLE_ASPM_L0S,
+ .flags2 = FLAG2_DISABLE_ASPM_L0S
+ | FLAG2_NO_DISABLE_RX,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 638d175..8533ad7 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -155,6 +155,9 @@ struct e1000_info;
#define HV_M_STATUS_SPEED_1000 0x0200
#define HV_M_STATUS_LINK_UP 0x0040
+#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
+#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
+
/* Time to wait before putting the device into D3 if there's no link (in ms). */
#define LINK_TIMEOUT 100
@@ -453,6 +456,8 @@ struct e1000_info {
#define FLAG2_DISABLE_ASPM_L0S (1 << 7)
#define FLAG2_DISABLE_AIM (1 << 8)
#define FLAG2_CHECK_PHY_HANG (1 << 9)
+#define FLAG2_NO_DISABLE_RX (1 << 10)
+#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index c0ecb2d..e4f4225 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1313,6 +1313,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
udelay(2);
@@ -1347,6 +1348,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
E1000_KMRNCTRLSTA_OFFSET) | data;
ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
udelay(2);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index cb1a362..6a0526a 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -28,8 +28,8 @@
/* ethtool support for e1000 */
-#include <linux/interrupt.h>
#include <linux/netdevice.h>
+#include <linux/interrupt.h>
#include <linux/ethtool.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -964,6 +964,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
+ e1e_flush();
usleep_range(10000, 20000);
/* Test each interrupt */
@@ -996,6 +997,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMC, mask);
ew32(ICS, mask);
+ e1e_flush();
usleep_range(10000, 20000);
if (adapter->test_icr & mask) {
@@ -1014,6 +1016,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMS, mask);
ew32(ICS, mask);
+ e1e_flush();
usleep_range(10000, 20000);
if (!(adapter->test_icr & mask)) {
@@ -1032,6 +1035,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMC, ~mask & 0x00007FFF);
ew32(ICS, ~mask & 0x00007FFF);
+ e1e_flush();
usleep_range(10000, 20000);
if (adapter->test_icr) {
@@ -1043,6 +1047,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
+ e1e_flush();
usleep_range(10000, 20000);
/* Unhook test interrupt handler */
@@ -1201,7 +1206,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rx_ring->next_to_clean = 0;
rctl = er32(RCTL);
- ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF));
ew32(RDBAH, ((u64) rx_ring->dma >> 32));
ew32(RDLEN, rx_ring->size);
@@ -1276,6 +1282,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
E1000_CTRL_FD); /* Force Duplex to FULL */
ew32(CTRL, ctrl_reg);
+ e1e_flush();
udelay(500);
return 0;
@@ -1418,6 +1425,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
*/
#define E1000_SERDES_LB_ON 0x410
ew32(SCTL, E1000_SERDES_LB_ON);
+ e1e_flush();
usleep_range(10000, 20000);
return 0;
@@ -1513,6 +1521,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
hw->phy.media_type == e1000_media_type_internal_serdes) {
#define E1000_SERDES_LB_OFF 0x400
ew32(SCTL, E1000_SERDES_LB_OFF);
+ e1e_flush();
usleep_range(10000, 20000);
break;
}
@@ -1592,6 +1601,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
k = 0;
}
ew32(TDT, k);
+ e1e_flush();
msleep(200);
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index c175212..54add27 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -137,8 +137,9 @@
#define HV_PM_CTRL PHY_REG(770, 17)
/* PHY Low Power Idle Control */
-#define I82579_LPI_CTRL PHY_REG(772, 20)
-#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
+#define I82579_LPI_CTRL PHY_REG(772, 20)
+#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
+#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
/* EMI Registers */
#define I82579_EMI_ADDR 0x10
@@ -163,6 +164,11 @@
#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
#define HV_KMRN_MDIO_SLOW 0x0400
+/* KMRN FIFO Control and Status */
+#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
+
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
union ich8_hws_flash_status {
@@ -283,6 +289,7 @@ static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw)
ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
ew32(CTRL, ctrl);
+ e1e_flush();
udelay(10);
ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
ew32(CTRL, ctrl);
@@ -656,6 +663,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val;
bool link;
+ u16 phy_reg;
/*
* We only want to go out to the PHY registers to see if Auto-Neg
@@ -688,16 +696,35 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
mac->get_link_status = false;
- if (hw->phy.type == e1000_phy_82578) {
- ret_val = e1000_link_stall_workaround_hv(hw);
- if (ret_val)
- goto out;
- }
-
- if (hw->mac.type == e1000_pch2lan) {
+ switch (hw->mac.type) {
+ case e1000_pch2lan:
ret_val = e1000_k1_workaround_lv(hw);
if (ret_val)
goto out;
+ /* fall-thru */
+ case e1000_pchlan:
+ if (hw->phy.type == e1000_phy_82578) {
+ ret_val = e1000_link_stall_workaround_hv(hw);
+ if (ret_val)
+ goto out;
+ }
+
+ /*
+ * Workaround for PCHx parts in half-duplex:
+ * Set the number of preambles removed from the packet
+ * when it is passed from the PHY to the MAC to prevent
+ * the MAC from misinterpreting the packet type.
+ */
+ e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
+ phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
+
+ if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
+ phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
+
+ e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
+ break;
+ default:
+ break;
}
/*
@@ -787,6 +814,11 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
(adapter->hw.phy.type == e1000_phy_igp_3))
adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
+ /* Enable workaround for 82579 w/ ME enabled */
+ if ((adapter->hw.mac.type == e1000_pch2lan) &&
+ (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+ adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
+
/* Disable EEE by default until IEEE802.3az spec is finalized */
if (adapter->flags2 & FLAG2_HAS_EEE)
adapter->hw.dev_spec.ich8lan.eee_disable = true;
@@ -1230,9 +1262,11 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
ew32(CTRL, reg);
ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
+ e1e_flush();
udelay(20);
ew32(CTRL, ctrl_reg);
ew32(CTRL_EXT, ctrl_ext);
+ e1e_flush();
udelay(20);
out:
@@ -1352,7 +1386,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
return ret_val;
/* Preamble tuning for SSC */
- ret_val = e1e_wphy(hw, PHY_REG(770, 16), 0xA204);
+ ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
if (ret_val)
return ret_val;
}
@@ -1642,6 +1676,7 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
s32 ret_val = 0;
u16 status_reg = 0;
u32 mac_reg;
+ u16 phy_reg;
if (hw->mac.type != e1000_pch2lan)
goto out;
@@ -1656,12 +1691,19 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
mac_reg = er32(FEXTNVM4);
mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
- if (status_reg & HV_M_STATUS_SPEED_1000)
+ ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
+ if (ret_val)
+ goto out;
+
+ if (status_reg & HV_M_STATUS_SPEED_1000) {
mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
- else
+ phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
+ } else {
mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
-
+ phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
+ }
ew32(FEXTNVM4, mac_reg);
+ ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
}
out:
@@ -2134,8 +2176,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
ret_val = 0;
for (i = 0; i < words; i++) {
- if ((dev_spec->shadow_ram) &&
- (dev_spec->shadow_ram[offset+i].modified)) {
+ if (dev_spec->shadow_ram[offset+i].modified) {
data[i] = dev_spec->shadow_ram[offset+i].value;
} else {
ret_val = e1000_read_flash_word_ich8lan(hw,
@@ -3090,6 +3131,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ret_val = e1000_acquire_swflag_ich8lan(hw);
e_dbg("Issuing a global reset to ich8lan\n");
ew32(CTRL, (ctrl | E1000_CTRL_RST));
+ /* cannot issue a flush here because it hangs the hardware */
msleep(20);
if (!ret_val)
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 65580b40..0893ab1 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -190,7 +190,8 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
/* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
if (!((nvm_data & NVM_COMPAT_LOM) ||
(hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
- (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)))
+ (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES)))
goto out;
ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
@@ -200,10 +201,10 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
goto out;
}
- if (nvm_alt_mac_addr_offset == 0xFFFF) {
+ if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
+ (nvm_alt_mac_addr_offset == 0x0000))
/* There is no Alternate MAC Address */
goto out;
- }
if (hw->bus.func == E1000_FUNC_1)
nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
@@ -1986,6 +1987,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
/* Clear SK and CS */
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
ew32(EECD, eecd);
+ e1e_flush();
udelay(1);
/*
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 4353ad5..2198e61 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -31,12 +31,12 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
+#include <linux/interrupt.h>
#include <linux/tcp.h>
#include <linux/ipv6.h>
#include <linux/slab.h>
@@ -56,7 +56,7 @@
#define DRV_EXTRAVERSION "-k"
-#define DRV_VERSION "1.3.16" DRV_EXTRAVERSION
+#define DRV_VERSION "1.4.4" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -519,6 +519,63 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
}
/**
+ * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa()
+ * @hw: pointer to the HW structure
+ * @tail: address of tail descriptor register
+ * @i: value to write to tail descriptor register
+ *
+ * When updating the tail register, the ME could be accessing Host CSR
+ * registers at the same time. Normally, this is handled in h/w by an
+ * arbiter but on some parts there is a bug that acknowledges Host accesses
+ * later than it should which could result in the descriptor register to
+ * have an incorrect value. Workaround this by checking the FWSM register
+ * which has bit 24 set while ME is accessing Host CSR registers, wait
+ * if it is set and try again a number of times.
+ **/
+static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail,
+ unsigned int i)
+{
+ unsigned int j = 0;
+
+ while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) &&
+ (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI))
+ udelay(50);
+
+ writel(i, tail);
+
+ if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail)))
+ return E1000_ERR_SWFW_SYNC;
+
+ return 0;
+}
+
+static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i)
+{
+ u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (e1000e_update_tail_wa(hw, tail, i)) {
+ u32 rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ e_err("ME firmware caused invalid RDT - resetting\n");
+ schedule_work(&adapter->reset_task);
+ }
+}
+
+static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i)
+{
+ u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (e1000e_update_tail_wa(hw, tail, i)) {
+ u32 tctl = er32(TCTL);
+ ew32(TCTL, tctl & ~E1000_TCTL_EN);
+ e_err("ME firmware caused invalid TDT - resetting\n");
+ schedule_work(&adapter->reset_task);
+ }
+}
+
+/**
* e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
* @adapter: address of board private structure
**/
@@ -573,7 +630,10 @@ map_skb:
* such as IA-64).
*/
wmb();
- writel(i, adapter->hw.hw_addr + rx_ring->tail);
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_rdt_wa(adapter, i);
+ else
+ writel(i, adapter->hw.hw_addr + rx_ring->tail);
}
i++;
if (i == rx_ring->count)
@@ -673,7 +733,11 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
* such as IA-64).
*/
wmb();
- writel(i << 1, adapter->hw.hw_addr + rx_ring->tail);
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_rdt_wa(adapter, i << 1);
+ else
+ writel(i << 1,
+ adapter->hw.hw_addr + rx_ring->tail);
}
i++;
@@ -756,7 +820,10 @@ check_page:
* applicable for weak-ordered memory model archs,
* such as IA-64). */
wmb();
- writel(i, adapter->hw.hw_addr + rx_ring->tail);
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_rdt_wa(adapter, i);
+ else
+ writel(i, adapter->hw.hw_addr + rx_ring->tail);
}
}
@@ -2915,7 +2982,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
/* disable receives while setting up the descriptors */
rctl = er32(RCTL);
- ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
e1e_flush();
usleep_range(10000, 20000);
@@ -3394,7 +3462,8 @@ void e1000e_down(struct e1000_adapter *adapter)
/* disable receives in the hardware */
rctl = er32(RCTL);
- ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
/* flush and sleep below */
netif_stop_queue(netdev);
@@ -3403,6 +3472,7 @@ void e1000e_down(struct e1000_adapter *adapter)
tctl = er32(TCTL);
tctl &= ~E1000_TCTL_EN;
ew32(TCTL, tctl);
+
/* flush both disables and wait for them to finish */
e1e_flush();
usleep_range(10000, 20000);
@@ -4686,7 +4756,12 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
wmb();
tx_ring->next_to_use = i;
- writel(i, adapter->hw.hw_addr + tx_ring->tail);
+
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_tdt_wa(adapter, i);
+ else
+ writel(i, adapter->hw.hw_addr + tx_ring->tail);
+
/*
* we need this if more than one processor can write to our tail
* at a time, it synchronizes IO on IA64/Altix systems
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 2a6ee13..8666476 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -537,6 +537,7 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
udelay(2);
@@ -609,6 +610,7 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
E1000_KMRNCTRLSTA_OFFSET) | data;
ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
udelay(2);
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 5b631fe..e8266cc 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -44,6 +44,10 @@
#include <linux/platform_device.h>
#include <linux/phy.h>
#include <linux/fec.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
#include <asm/cacheflush.h>
@@ -66,17 +70,42 @@
#define FEC_QUIRK_ENET_MAC (1 << 0)
/* Controller needs driver to swap frame */
#define FEC_QUIRK_SWAP_FRAME (1 << 1)
+/* Controller uses gasket */
+#define FEC_QUIRK_USE_GASKET (1 << 2)
static struct platform_device_id fec_devtype[] = {
{
+ /* keep it for coldfire */
.name = DRIVER_NAME,
.driver_data = 0,
}, {
+ .name = "imx25-fec",
+ .driver_data = FEC_QUIRK_USE_GASKET,
+ }, {
+ .name = "imx27-fec",
+ .driver_data = 0,
+ }, {
.name = "imx28-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
- },
- { }
+ }, {
+ /* sentinel */
+ }
};
+MODULE_DEVICE_TABLE(platform, fec_devtype);
+
+enum imx_fec_type {
+ IMX25_FEC = 1, /* runs on i.mx25/50/53 */
+ IMX27_FEC, /* runs on i.mx27/35/51 */
+ IMX28_FEC,
+};
+
+static const struct of_device_id fec_dt_ids[] = {
+ { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
+ { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
+ { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fec_dt_ids);
static unsigned char macaddr[ETH_ALEN];
module_param_array(macaddr, byte, NULL, 0);
@@ -427,7 +456,7 @@ fec_restart(struct net_device *ndev, int duplex)
} else {
#ifdef FEC_MIIGSK_ENR
- if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
+ if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) {
/* disable the gasket and wait */
writel(0, fep->hwp + FEC_MIIGSK_ENR);
while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
@@ -436,8 +465,11 @@ fec_restart(struct net_device *ndev, int duplex)
/*
* configure the gasket:
* RMII, 50 MHz, no loopback, no echo
+ * MII, 25 MHz, no loopback, no echo
*/
- writel(1, fep->hwp + FEC_MIIGSK_CFGR);
+ writel((fep->phy_interface == PHY_INTERFACE_MODE_RMII) ?
+ 1 : 0, fep->hwp + FEC_MIIGSK_CFGR);
+
/* re-enable the gasket */
writel(2, fep->hwp + FEC_MIIGSK_ENR);
@@ -734,8 +766,22 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
*/
iap = macaddr;
+#ifdef CONFIG_OF
/*
- * 2) from flash or fuse (via platform data)
+ * 2) from device tree data
+ */
+ if (!is_valid_ether_addr(iap)) {
+ struct device_node *np = fep->pdev->dev.of_node;
+ if (np) {
+ const char *mac = of_get_mac_address(np);
+ if (mac)
+ iap = (unsigned char *) mac;
+ }
+ }
+#endif
+
+ /*
+ * 3) from flash or fuse (via platform data)
*/
if (!is_valid_ether_addr(iap)) {
#ifdef CONFIG_M5272
@@ -748,7 +794,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
}
/*
- * 3) FEC mac registers set by bootloader
+ * 4) FEC mac registers set by bootloader
*/
if (!is_valid_ether_addr(iap)) {
*((unsigned long *) &tmpaddr[0]) =
@@ -1354,6 +1400,52 @@ static int fec_enet_init(struct net_device *ndev)
return 0;
}
+#ifdef CONFIG_OF
+static int __devinit fec_get_phy_mode_dt(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ if (np)
+ return of_get_phy_mode(np);
+
+ return -ENODEV;
+}
+
+static int __devinit fec_reset_phy(struct platform_device *pdev)
+{
+ int err, phy_reset;
+ struct device_node *np = pdev->dev.of_node;
+
+ if (!np)
+ return -ENODEV;
+
+ phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
+ err = gpio_request_one(phy_reset, GPIOF_OUT_INIT_LOW, "phy-reset");
+ if (err) {
+ pr_warn("FEC: failed to get gpio phy-reset: %d\n", err);
+ return err;
+ }
+ msleep(1);
+ gpio_set_value(phy_reset, 1);
+
+ return 0;
+}
+#else /* CONFIG_OF */
+static inline int fec_get_phy_mode_dt(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+
+static inline int fec_reset_phy(struct platform_device *pdev)
+{
+ /*
+ * In case of platform probe, the reset has been done
+ * by machine code.
+ */
+ return 0;
+}
+#endif /* CONFIG_OF */
+
static int __devinit
fec_probe(struct platform_device *pdev)
{
@@ -1362,6 +1454,11 @@ fec_probe(struct platform_device *pdev)
struct net_device *ndev;
int i, irq, ret = 0;
struct resource *r;
+ const struct of_device_id *of_id;
+
+ of_id = of_match_device(fec_dt_ids, &pdev->dev);
+ if (of_id)
+ pdev->id_entry = of_id->data;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r)
@@ -1393,9 +1490,18 @@ fec_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
- pdata = pdev->dev.platform_data;
- if (pdata)
- fep->phy_interface = pdata->phy;
+ ret = fec_get_phy_mode_dt(pdev);
+ if (ret < 0) {
+ pdata = pdev->dev.platform_data;
+ if (pdata)
+ fep->phy_interface = pdata->phy;
+ else
+ fep->phy_interface = PHY_INTERFACE_MODE_MII;
+ } else {
+ fep->phy_interface = ret;
+ }
+
+ fec_reset_phy(pdev);
/* This device has up to three irqs on some platforms */
for (i = 0; i < 3; i++) {
@@ -1530,6 +1636,7 @@ static struct platform_driver fec_driver = {
#ifdef CONFIG_PM
.pm = &fec_pm_ops,
#endif
+ .of_match_table = fec_dt_ids,
},
.id_table = fec_devtype,
.probe = fec_probe,
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index e64cd9c..6d5fbd4 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -2764,7 +2764,14 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
prefetch(skb->data);
vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
- if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
+
+ /*
+ * There's need to check for NETIF_F_HW_VLAN_RX here.
+ * Even if vlan rx accel is disabled,
+ * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
+ */
+ if (dev->features & NETIF_F_HW_VLAN_RX &&
+ vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
__vlan_hwaccel_put_tag(skb, vid);
@@ -5331,15 +5338,16 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_RXCSUM;
- dev->features |= dev->hw_features;
}
np->vlanctl_bits = 0;
if (id->driver_data & DEV_HAS_VLAN) {
np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
- dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
+ dev->hw_features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
}
+ dev->features |= dev->hw_features;
+
np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
(id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
@@ -5607,6 +5615,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
goto out_error;
}
+ if (id->driver_data & DEV_HAS_VLAN)
+ nv_vlan_mode(dev, dev->features);
+
netif_carrier_off(dev);
dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 835cd25..31d5c57 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -388,12 +388,8 @@ static void gfar_init_mac(struct net_device *ndev)
if (priv->hwts_rx_en)
rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
- /* keep vlan related bits if it's enabled */
- if (ndev->features & NETIF_F_HW_VLAN_TX)
- rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
-
if (ndev->features & NETIF_F_HW_VLAN_RX)
- tctrl |= TCTRL_VLINS;
+ rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
/* Init rctrl based on our settings */
gfar_write(&regs->rctrl, rctrl);
@@ -2714,8 +2710,13 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
/* Tell the skb what kind of packet this is */
skb->protocol = eth_type_trans(skb, dev);
- /* Set vlan tag */
- if (fcb->flags & RXFCB_VLN)
+ /*
+ * There's need to check for NETIF_F_HW_VLAN_RX here.
+ * Even if vlan rx accel is disabled, on some chips
+ * RXFCB_VLN is pseudo randomly set.
+ */
+ if (dev->features & NETIF_F_HW_VLAN_RX &&
+ fcb->flags & RXFCB_VLN)
__vlan_hwaccel_put_tag(skb, fcb->vlctl);
/* Send the packet up the stack */
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 6e35069..25a8c2a 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -686,10 +686,21 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
{
unsigned int last_rule_idx = priv->cur_filer_idx;
unsigned int cmp_rqfpr;
- unsigned int local_rqfpr[MAX_FILER_IDX + 1];
- unsigned int local_rqfcr[MAX_FILER_IDX + 1];
+ unsigned int *local_rqfpr;
+ unsigned int *local_rqfcr;
int i = 0x0, k = 0x0;
int j = MAX_FILER_IDX, l = 0x0;
+ int ret = 1;
+
+ local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
+ GFP_KERNEL);
+ local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
+ GFP_KERNEL);
+ if (!local_rqfpr || !local_rqfcr) {
+ pr_err("Out of memory\n");
+ ret = 0;
+ goto err;
+ }
switch (class) {
case TCP_V4_FLOW:
@@ -706,7 +717,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
break;
default:
pr_err("Right now this class is not supported\n");
- return 0;
+ ret = 0;
+ goto err;
}
for (i = 0; i < MAX_FILER_IDX + 1; i++) {
@@ -721,7 +733,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
if (i == MAX_FILER_IDX + 1) {
pr_err("No parse rule found, can't create hash rules\n");
- return 0;
+ ret = 0;
+ goto err;
}
/* If a match was found, then it begins the starting of a cluster rule
@@ -765,7 +778,10 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
- return 1;
+err:
+ kfree(local_rqfcr);
+ kfree(local_rqfpr);
+ return ret;
}
static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
diff --git a/drivers/net/gianfar_ptp.c b/drivers/net/gianfar_ptp.c
index 1c97861..f67b8ae 100644
--- a/drivers/net/gianfar_ptp.c
+++ b/drivers/net/gianfar_ptp.c
@@ -193,14 +193,9 @@ static void set_alarm(struct etsects *etsects)
/* Caller must hold etsects->lock. */
static void set_fipers(struct etsects *etsects)
{
- u32 tmr_ctrl = gfar_read(&etsects->regs->tmr_ctrl);
-
- gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl & (~TE));
- gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc);
+ set_alarm(etsects);
gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
- set_alarm(etsects);
- gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|TE);
}
/*
@@ -511,7 +506,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
set_alarm(etsects);
- gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE);
+ gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE|FRD);
spin_unlock_irqrestore(&etsects->lock, flags);
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 0d28378..2a5a34d 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -36,7 +36,7 @@
#include <linux/tcp.h>
#include <linux/semaphore.h>
#include <linux/compat.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define SIXPACK_VERSION "Revision: 0.3.0"
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 52b1425..ce555d9 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -36,7 +36,7 @@
#include <linux/rtnetlink.h>
#include <linux/sockios.h>
#include <linux/workqueue.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 725399e..70cb7d8 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -39,6 +39,7 @@
#include <linux/bitops.h>
#include <linux/workqueue.h>
#include <linux/of.h>
+#include <linux/of_net.h>
#include <linux/slab.h>
#include <asm/processor.h>
@@ -2506,18 +2507,6 @@ static int __devinit emac_init_config(struct emac_instance *dev)
{
struct device_node *np = dev->ofdev->dev.of_node;
const void *p;
- unsigned int plen;
- const char *pm, *phy_modes[] = {
- [PHY_MODE_NA] = "",
- [PHY_MODE_MII] = "mii",
- [PHY_MODE_RMII] = "rmii",
- [PHY_MODE_SMII] = "smii",
- [PHY_MODE_RGMII] = "rgmii",
- [PHY_MODE_TBI] = "tbi",
- [PHY_MODE_GMII] = "gmii",
- [PHY_MODE_RTBI] = "rtbi",
- [PHY_MODE_SGMII] = "sgmii",
- };
/* Read config from device-tree */
if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
@@ -2566,23 +2555,9 @@ static int __devinit emac_init_config(struct emac_instance *dev)
dev->mal_burst_size = 256;
/* PHY mode needs some decoding */
- dev->phy_mode = PHY_MODE_NA;
- pm = of_get_property(np, "phy-mode", &plen);
- if (pm != NULL) {
- int i;
- for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
- if (!strcasecmp(pm, phy_modes[i])) {
- dev->phy_mode = i;
- break;
- }
- }
-
- /* Backward compat with non-final DT */
- if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
- u32 nmode = *(const u32 *)pm;
- if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
- dev->phy_mode = nmode;
- }
+ dev->phy_mode = of_get_phy_mode(np);
+ if (dev->phy_mode < 0)
+ dev->phy_mode = PHY_MODE_NA;
/* Check EMAC version */
if (of_device_is_compatible(np, "ibm,emac4sync")) {
diff --git a/drivers/net/ibm_newemac/emac.h b/drivers/net/ibm_newemac/emac.h
index 8a61b59..1568278 100644
--- a/drivers/net/ibm_newemac/emac.h
+++ b/drivers/net/ibm_newemac/emac.h
@@ -26,6 +26,7 @@
#define __IBM_NEWEMAC_H
#include <linux/types.h>
+#include <linux/phy.h>
/* EMAC registers Write Access rules */
struct emac_regs {
@@ -106,15 +107,15 @@ struct emac_regs {
/*
* PHY mode settings (EMAC <-> ZMII/RGMII bridge <-> PHY)
*/
-#define PHY_MODE_NA 0
-#define PHY_MODE_MII 1
-#define PHY_MODE_RMII 2
-#define PHY_MODE_SMII 3
-#define PHY_MODE_RGMII 4
-#define PHY_MODE_TBI 5
-#define PHY_MODE_GMII 6
-#define PHY_MODE_RTBI 7
-#define PHY_MODE_SGMII 8
+#define PHY_MODE_NA PHY_INTERFACE_MODE_NA
+#define PHY_MODE_MII PHY_INTERFACE_MODE_MII
+#define PHY_MODE_RMII PHY_INTERFACE_MODE_RMII
+#define PHY_MODE_SMII PHY_INTERFACE_MODE_SMII
+#define PHY_MODE_RGMII PHY_INTERFACE_MODE_RGMII
+#define PHY_MODE_TBI PHY_INTERFACE_MODE_TBI
+#define PHY_MODE_GMII PHY_INTERFACE_MODE_GMII
+#define PHY_MODE_RTBI PHY_INTERFACE_MODE_RTBI
+#define PHY_MODE_SGMII PHY_INTERFACE_MODE_SGMII
/* EMACx_MR0 */
#define EMAC_MR0_RXI 0x80000000
diff --git a/drivers/net/ibm_newemac/phy.c b/drivers/net/ibm_newemac/phy.c
index ac9d964..ab4e596 100644
--- a/drivers/net/ibm_newemac/phy.c
+++ b/drivers/net/ibm_newemac/phy.c
@@ -28,12 +28,15 @@
#include "emac.h"
#include "phy.h"
-static inline int phy_read(struct mii_phy *phy, int reg)
+#define phy_read _phy_read
+#define phy_write _phy_write
+
+static inline int _phy_read(struct mii_phy *phy, int reg)
{
return phy->mdio_read(phy->dev, phy->address, reg);
}
-static inline void phy_write(struct mii_phy *phy, int reg, int val)
+static inline void _phy_write(struct mii_phy *phy, int reg, int val)
{
phy->mdio_write(phy->dev, phy->address, reg, val);
}
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 838c5b6..3e66792 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -43,7 +43,7 @@
#include <linux/ipv6.h>
#include <linux/slab.h>
#include <asm/hvcall.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/vio.h>
#include <asm/iommu.h>
#include <asm/firmware.h>
@@ -395,7 +395,7 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
}
/* recycle the current buffer on the rx queue */
-static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
+static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
{
u32 q_index = adapter->rx_queue.index;
u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
@@ -403,6 +403,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
unsigned int index = correlator & 0xffffffffUL;
union ibmveth_buf_desc desc;
unsigned long lpar_rc;
+ int ret = 1;
BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
BUG_ON(index >= adapter->rx_buff_pool[pool].size);
@@ -410,7 +411,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
if (!adapter->rx_buff_pool[pool].active) {
ibmveth_rxq_harvest_buffer(adapter);
ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
- return;
+ goto out;
}
desc.fields.flags_len = IBMVETH_BUF_VALID |
@@ -423,12 +424,16 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
"during recycle rc=%ld", lpar_rc);
ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
+ ret = 0;
}
if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
adapter->rx_queue.index = 0;
adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
}
+
+out:
+ return ret;
}
static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
@@ -1084,8 +1089,9 @@ restart_poll:
if (rx_flush)
ibmveth_flush_buffer(skb->data,
length + offset);
+ if (!ibmveth_rxq_recycle_buffer(adapter))
+ kfree_skb(skb);
skb = new_skb;
- ibmveth_rxq_recycle_buffer(adapter);
} else {
ibmveth_rxq_harvest_buffer(adapter);
skb_reserve(skb, offset);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 6e82dd3..46b5f5f 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -183,7 +183,7 @@ static void ifb_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
- dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
random_ether_addr(dev->dev_addr);
}
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
index 7dcd65c..4040712 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/igb/e1000_nvm.c
@@ -285,6 +285,7 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
/* Clear SK and CS */
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
wr32(E1000_EECD, eecd);
+ wrfl();
udelay(1);
timeout = NVM_MAX_RETRY_SPI;
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index ff244ce..414b022 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1225,6 +1225,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Disable all the interrupts */
wr32(E1000_IMC, ~0);
+ wrfl();
msleep(10);
/* Define all writable bits for ICS */
@@ -1268,6 +1269,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMC, mask);
wr32(E1000_ICS, mask);
+ wrfl();
msleep(10);
if (adapter->test_icr & mask) {
@@ -1289,6 +1291,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMS, mask);
wr32(E1000_ICS, mask);
+ wrfl();
msleep(10);
if (!(adapter->test_icr & mask)) {
@@ -1310,6 +1313,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMC, ~mask);
wr32(E1000_ICS, ~mask);
+ wrfl();
msleep(10);
if (adapter->test_icr & mask) {
@@ -1321,6 +1325,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Disable all the interrupts */
wr32(E1000_IMC, ~0);
+ wrfl();
msleep(10);
/* Unhook test interrupt handler */
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index dc59905..40d4c40 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1052,6 +1052,7 @@ msi_only:
kfree(adapter->vf_data);
adapter->vf_data = NULL;
wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
+ wrfl();
msleep(100);
dev_info(&adapter->pdev->dev, "IOV Disabled\n");
}
@@ -2022,7 +2023,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
if (hw->bus.func == 0)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
- else if (hw->mac.type == e1000_82580)
+ else if (hw->mac.type >= e1000_82580)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
&eeprom_data);
@@ -2198,6 +2199,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
kfree(adapter->vf_data);
adapter->vf_data = NULL;
wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
+ wrfl();
msleep(100);
dev_info(&pdev->dev, "IOV Disabled\n");
}
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 1330c8e..40ed066 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1226,6 +1226,7 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter)
/* disable transmits */
txdctl = er32(TXDCTL(0));
ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
+ e1e_flush();
msleep(10);
/* Setup the HW Tx Head and Tail descriptor pointers */
@@ -1306,6 +1307,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter)
/* disable receives */
rxdctl = er32(RXDCTL(0));
ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
+ e1e_flush();
msleep(10);
rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 4488bd5..8266067 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -22,6 +22,8 @@
* - DMA transfer support
* - FIFO mode support
*/
+#include <linux/io.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 52a7c86..ed7d7d6 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -12,6 +12,8 @@
* published by the Free Software Foundation.
*/
+#include <linux/io.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -511,7 +513,7 @@ static void sh_sir_tx(struct sh_sir_self *self, int phase)
static int sh_sir_read_data(struct sh_sir_self *self)
{
- u16 val;
+ u16 val = 0;
int timeout = 1024;
while (timeout--) {
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 954f6e93..8b1c348 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -2405,8 +2405,6 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
* addresses making a subsystem device table necessary.
*/
#ifdef CONFIG_PCI
-#define PCIID_VENDOR_INTEL 0x8086
-#define PCIID_VENDOR_ALI 0x10b9
static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = {
/*
* Subsystems needing entries:
@@ -2416,7 +2414,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
*/
{
/* Guessed entry */
- .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
.device = 0x24cc,
.subvendor = 0x103c,
.subdevice = 0x08bc,
@@ -2429,7 +2427,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
.name = "HP nx5000 family",
},
{
- .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
.device = 0x24cc,
.subvendor = 0x103c,
.subdevice = 0x088c,
@@ -2443,7 +2441,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
.name = "HP nc8000 family",
},
{
- .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
.device = 0x24cc,
.subvendor = 0x103c,
.subdevice = 0x0890,
@@ -2456,7 +2454,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
.name = "HP nc6000 family",
},
{
- .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
.device = 0x24cc,
.subvendor = 0x0e11,
.subdevice = 0x0860,
@@ -2471,7 +2469,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
},
{
/* Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge */
- .vendor = PCIID_VENDOR_INTEL,
+ .vendor = PCI_VENDOR_ID_INTEL,
.device = 0x24c0,
.subvendor = 0x1179,
.subdevice = 0xffff, /* 0xffff is "any" */
@@ -2484,7 +2482,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
.name = "Toshiba laptop with Intel 82801DB/DBL LPC bridge",
},
{
- .vendor = PCIID_VENDOR_INTEL, /* Intel 82801CAM ISA bridge */
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801CAM ISA bridge */
.device = 0x248c,
.subvendor = 0x1179,
.subdevice = 0xffff, /* 0xffff is "any" */
@@ -2498,7 +2496,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
},
{
/* 82801DBM (ICH4-M) LPC Interface Bridge */
- .vendor = PCIID_VENDOR_INTEL,
+ .vendor = PCI_VENDOR_ID_INTEL,
.device = 0x24cc,
.subvendor = 0x1179,
.subdevice = 0xffff, /* 0xffff is "any" */
@@ -2512,7 +2510,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
},
{
/* ALi M1533/M1535 PCI to ISA Bridge [Aladdin IV/V/V+] */
- .vendor = PCIID_VENDOR_ALI,
+ .vendor = PCI_VENDOR_ID_AL,
.device = 0x1533,
.subvendor = 0x1179,
.subdevice = 0xffff, /* 0xffff is "any" */
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index c982ab9..38b362b 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -57,6 +57,7 @@ ixgb_raise_clock(struct ixgb_hw *hw,
*/
*eecd_reg = *eecd_reg | IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, *eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
}
@@ -75,6 +76,7 @@ ixgb_lower_clock(struct ixgb_hw *hw,
*/
*eecd_reg = *eecd_reg & ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, *eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
}
@@ -112,6 +114,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw,
eecd_reg |= IXGB_EECD_DI;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
@@ -206,21 +209,25 @@ ixgb_standby_eeprom(struct ixgb_hw *hw)
/* Deselect EEPROM */
eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK);
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
/* Clock high */
eecd_reg |= IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
/* Select EEPROM */
eecd_reg |= IXGB_EECD_CS;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
/* Clock low */
eecd_reg &= ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
}
@@ -239,11 +246,13 @@ ixgb_clock_eeprom(struct ixgb_hw *hw)
/* Rising edge of clock */
eecd_reg |= IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
/* Falling edge of clock */
eecd_reg &= ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
}
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index 6cb2e42..3d61a9e 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -149,6 +149,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
*/
IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN);
IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN);
+ IXGB_WRITE_FLUSH(hw);
msleep(IXGB_DELAY_BEFORE_RESET);
/* Issue a global reset to the MAC. This will reset the chip's
@@ -1220,6 +1221,7 @@ ixgb_optics_reset_bcm(struct ixgb_hw *hw)
ctrl &= ~IXGB_CTRL0_SDP2;
ctrl |= IXGB_CTRL0_SDP3;
IXGB_WRITE_REG(hw, CTRL0, ctrl);
+ IXGB_WRITE_FLUSH(hw);
/* SerDes needs extra delay */
msleep(IXGB_SUN_PHY_RESET_DELAY);
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 3b3dd4d..34f30ec 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -213,6 +213,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
switch (hw->phy.type) {
case ixgbe_phy_tn:
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
+ phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
phy->ops.get_firmware_version =
&ixgbe_get_phy_firmware_version_tnx;
break;
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 777051f..fc1375f 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -2632,6 +2632,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ IXGBE_WRITE_FLUSH(hw);
usleep_range(10000, 20000);
}
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index dc64955..82d4244 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1378,6 +1378,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
/* Disable all the interrupts */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
/* Test each interrupt */
@@ -1398,6 +1399,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
~mask & 0x00007FFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
~mask & 0x00007FFF);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
if (adapter->test_icr & mask) {
@@ -1415,6 +1417,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
adapter->test_icr = 0;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
if (!(adapter->test_icr &mask)) {
@@ -1435,6 +1438,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
~mask & 0x00007FFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
~mask & 0x00007FFF);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
if (adapter->test_icr) {
@@ -1446,6 +1450,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
/* Disable all the interrupts */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
/* Unhook test interrupt handler */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 1be6175..2279039 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -184,6 +184,7 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+ IXGBE_WRITE_FLUSH(hw);
/* take a breather then clean up driver data */
msleep(100);
@@ -1005,7 +1006,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
unsigned long event = *(unsigned long *)data;
- if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
+ if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
return 0;
switch (event) {
@@ -1458,8 +1459,10 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb,
staterr);
- if (!ddp_bytes)
+ if (!ddp_bytes) {
+ dev_kfree_skb_any(skb);
goto next_desc;
+ }
}
#endif /* IXGBE_FCOE */
ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 735f686..f7ca351 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1585,6 +1585,7 @@ static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
*i2cctl |= IXGBE_I2C_CLK_OUT;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
/* SCL rise time (1000ns) */
udelay(IXGBE_I2C_T_RISE);
@@ -1605,6 +1606,7 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
*i2cctl &= ~IXGBE_I2C_CLK_OUT;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
/* SCL fall time (300ns) */
udelay(IXGBE_I2C_T_FALL);
@@ -1628,6 +1630,7 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
*i2cctl &= ~IXGBE_I2C_DATA_OUT;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
/* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
index bec30ed..2696c78 100644
--- a/drivers/net/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -162,6 +162,7 @@ mac_reset_top:
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+ IXGBE_WRITE_FLUSH(hw);
msleep(50);
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 0fcdc25..dc4e305 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -322,6 +322,9 @@ static void macb_tx(struct macb *bp)
for (i = 0; i < TX_RING_SIZE; i++)
bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ /* Add wrap bit */
+ bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+
/* free transmit buffer in upper layer*/
for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
struct ring_info *rp = &bp->tx_skb[tail];
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index ba631fc..05172c3 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -572,7 +572,7 @@ void macvlan_common_setup(struct net_device *dev)
{
ether_setup(dev);
- dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
dev->netdev_ops = &macvlan_netdev_ops;
dev->destructor = free_netdev;
dev->header_ops = &macvlan_hard_header_ops,
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index 5e71091..5ada5b46 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -128,7 +128,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
memset(context, 0, sizeof *context);
context->base_qpn = cpu_to_be32(base_qpn);
- context->n_mac = 0x7;
+ context->n_mac = 0x2;
context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
base_qpn);
context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index c94b342..f0ee35d 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1117,6 +1117,8 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
info->port = port;
mlx4_init_mac_table(dev, &info->mac_table);
mlx4_init_vlan_table(dev, &info->vlan_table);
+ info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
+ (port - 1) * (1 << log_num_mac);
sprintf(info->dev_name, "mlx4_port%d", port);
info->port_attr.attr.name = info->dev_name;
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 1f95afd..609e0ec 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -258,9 +258,12 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
if (validate_index(dev, table, index))
goto out;
- table->entries[index] = 0;
- mlx4_set_port_mac_table(dev, port, table->entries);
- --table->total;
+ /* Check whether this address has reference count */
+ if (!(--table->refs[index])) {
+ table->entries[index] = 0;
+ mlx4_set_port_mac_table(dev, port, table->entries);
+ --table->total;
+ }
out:
mutex_unlock(&table->mutex);
}
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index cd6c231..ed47585 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -9201,7 +9201,7 @@ static int __devinit niu_ldg_init(struct niu *np)
first_chan = 0;
for (i = 0; i < port; i++)
- first_chan += parent->rxchan_per_port[port];
+ first_chan += parent->rxchan_per_port[i];
num_chan = parent->rxchan_per_port[port];
for (i = first_chan; i < (first_chan + num_chan); i++) {
@@ -9217,7 +9217,7 @@ static int __devinit niu_ldg_init(struct niu *np)
first_chan = 0;
for (i = 0; i < port; i++)
- first_chan += parent->txchan_per_port[port];
+ first_chan += parent->txchan_per_port[i];
num_chan = parent->txchan_per_port[port];
for (i = first_chan; i < (first_chan + num_chan); i++) {
err = niu_ldg_assign_ldn(np, parent,
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 1cd9394..cffbc03 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -809,7 +809,7 @@ static int smc91c92_config(struct pcmcia_device *link)
struct net_device *dev = link->priv;
struct smc_private *smc = netdev_priv(dev);
char *name;
- int i, j, rev;
+ int i, rev, j = 0;
unsigned int ioaddr;
u_long mir;
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 8b3090d..80b6f36 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -82,7 +82,7 @@ static int cards_found;
/*
* VLB I/O addresses
*/
-static unsigned int pcnet32_portlist[] __initdata =
+static unsigned int pcnet32_portlist[] =
{ 0x300, 0x320, 0x340, 0x360, 0 };
static int pcnet32_debug;
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 2cd8dc5..cb6e0b4 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -34,8 +34,7 @@
#define PAGESEL 0x13
#define LAYER4 0x02
#define LAYER2 0x01
-#define MAX_RXTS 4
-#define MAX_TXTS 4
+#define MAX_RXTS 64
#define N_EXT_TS 1
#define PSF_PTPVER 2
#define PSF_EVNT 0x4000
@@ -218,7 +217,7 @@ static void phy2rxts(struct phy_rxts *p, struct rxts *rxts)
rxts->seqid = p->seqid;
rxts->msgtype = (p->msgtype >> 12) & 0xf;
rxts->hash = p->msgtype & 0x0fff;
- rxts->tmo = jiffies + HZ;
+ rxts->tmo = jiffies + 2;
}
static u64 phy2txts(struct phy_txts *p)
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 0620ba9..04bb8fc 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -25,8 +25,9 @@
/* DP83865 phy identifier values */
#define DP83865_PHY_ID 0x20005c7a
-#define DP83865_INT_MASK_REG 0x15
-#define DP83865_INT_MASK_STATUS 0x14
+#define DP83865_INT_STATUS 0x14
+#define DP83865_INT_MASK 0x15
+#define DP83865_INT_CLEAR 0x17
#define DP83865_INT_REMOTE_FAULT 0x0008
#define DP83865_INT_ANE_COMPLETED 0x0010
@@ -68,21 +69,25 @@ static int ns_config_intr(struct phy_device *phydev)
int err;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- err = phy_write(phydev, DP83865_INT_MASK_REG,
+ err = phy_write(phydev, DP83865_INT_MASK,
DP83865_INT_MASK_DEFAULT);
else
- err = phy_write(phydev, DP83865_INT_MASK_REG, 0);
+ err = phy_write(phydev, DP83865_INT_MASK, 0);
return err;
}
static int ns_ack_interrupt(struct phy_device *phydev)
{
- int ret = phy_read(phydev, DP83865_INT_MASK_STATUS);
+ int ret = phy_read(phydev, DP83865_INT_STATUS);
if (ret < 0)
return ret;
- return 0;
+ /* Clear the interrupt status bit by writing a “1â€
+ * to the corresponding bit in INT_CLEAR (2:0 are reserved) */
+ ret = phy_write(phydev, DP83865_INT_CLEAR, ret & ~0x7);
+
+ return ret;
}
static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index a475957..3cbda08 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -33,7 +33,7 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 4609bc0..10e5d98 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -48,7 +48,7 @@
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <net/slhc_vj.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/nsproxy.h>
#include <net/net_namespace.h>
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 7d9c650..02339b3 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -239,6 +239,7 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
{ PCI_VENDOR_ID_LINKSYS, 0x1032,
@@ -1091,6 +1092,21 @@ rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type)
rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type);
}
+struct exgmac_reg {
+ u16 addr;
+ u16 mask;
+ u32 val;
+};
+
+static void rtl_write_exgmac_batch(void __iomem *ioaddr,
+ const struct exgmac_reg *r, int len)
+{
+ while (len-- > 0) {
+ rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC);
+ r++;
+ }
+}
+
static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
{
u8 value = 0xff;
@@ -3116,6 +3132,18 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
RTL_W32(MAC0, low);
RTL_R32(MAC0);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+ const struct exgmac_reg e[] = {
+ { .addr = 0xe0, ERIAR_MASK_1111, .val = low },
+ { .addr = 0xe4, ERIAR_MASK_1111, .val = high },
+ { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 },
+ { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 |
+ low >> 16 },
+ };
+
+ rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e));
+ }
+
RTL_W8(Cfg9346, Cfg9346_Lock);
spin_unlock_irq(&tp->lock);
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 86ac38c..3bb1311 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -80,13 +80,13 @@ static int rionet_capable = 1;
*/
static struct rio_dev **rionet_active;
-#define is_rionet_capable(pef, src_ops, dst_ops) \
- ((pef & RIO_PEF_INB_MBOX) && \
- (pef & RIO_PEF_INB_DOORBELL) && \
+#define is_rionet_capable(src_ops, dst_ops) \
+ ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
+ (dst_ops & RIO_DST_OPS_DATA_MSG) && \
(src_ops & RIO_SRC_OPS_DOORBELL) && \
(dst_ops & RIO_DST_OPS_DOORBELL))
#define dev_rionet_capable(dev) \
- is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops)
+ is_rionet_capable(dev->src_ops, dev->dst_ops)
#define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001)
#define RIONET_GET_DESTID(x) (*(u16 *)(x + 4))
@@ -282,7 +282,6 @@ static int rionet_open(struct net_device *ndev)
{
int i, rc = 0;
struct rionet_peer *peer, *tmp;
- u32 pwdcsr;
struct rionet_private *rnet = netdev_priv(ndev);
if (netif_msg_ifup(rnet))
@@ -332,13 +331,8 @@ static int rionet_open(struct net_device *ndev)
continue;
}
- /*
- * If device has initialized inbound doorbells,
- * send a join message
- */
- rio_read_config_32(peer->rdev, RIO_WRITE_PORT_CSR, &pwdcsr);
- if (pwdcsr & RIO_DOORBELL_AVAIL)
- rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
+ /* Send a join message */
+ rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
}
out:
@@ -492,7 +486,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
{
int rc = -ENODEV;
- u32 lpef, lsrc_ops, ldst_ops;
+ u32 lsrc_ops, ldst_ops;
struct rionet_peer *peer;
struct net_device *ndev = NULL;
@@ -515,12 +509,11 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
* on later probes
*/
if (!rionet_check) {
- rio_local_read_config_32(rdev->net->hport, RIO_PEF_CAR, &lpef);
rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
&lsrc_ops);
rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
&ldst_ops);
- if (!is_rionet_capable(lpef, lsrc_ops, ldst_ops)) {
+ if (!is_rionet_capable(lsrc_ops, ldst_ops)) {
printk(KERN_ERR
"%s: local device is not network capable\n",
DRV_NAME);
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index ad35c21..1c1666e 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -21,6 +21,7 @@
*/
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
@@ -30,6 +31,7 @@
#include <linux/phy.h>
#include <linux/cache.h>
#include <linux/io.h>
+#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 8ad7bfb..3c0f131 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1825,6 +1825,16 @@ static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
}
+static int sis190_mac_addr(struct net_device *dev, void *p)
+{
+ int rc;
+
+ rc = eth_mac_addr(dev, p);
+ if (!rc)
+ sis190_init_rxfilter(dev);
+ return rc;
+}
+
static const struct net_device_ops sis190_netdev_ops = {
.ndo_open = sis190_open,
.ndo_stop = sis190_close,
@@ -1833,7 +1843,7 @@ static const struct net_device_ops sis190_netdev_ops = {
.ndo_tx_timeout = sis190_tx_timeout,
.ndo_set_multicast_list = sis190_set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = sis190_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = sis190_netpoll,
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index f11b3f3..4c61753 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -367,7 +367,7 @@ static void sl_bump(struct slip *sl)
memcpy(skb_put(skb, count), sl->rbuff, count);
skb_reset_mac_header(skb);
skb->protocol = htons(ETH_P_IP);
- netif_rx(skb);
+ netif_rx_ni(skb);
dev->stats.rx_packets++;
}
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index be745ae..ade35dd 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -46,14 +46,15 @@
#include <asm/byteorder.h>
#include <asm/uaccess.h>
#include <asm/irq.h>
-#include <asm/prom.h>
#ifdef CONFIG_SPARC
#include <asm/idprom.h>
+#include <asm/prom.h>
#endif
#ifdef CONFIG_PPC_PMAC
#include <asm/pci-bridge.h>
+#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#endif
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 8035765..dc3fbf6 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -190,6 +190,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
/* minimum number of free TX descriptors required to wake up TX process */
#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
+#define TG3_TX_BD_DMA_MAX 4096
#define TG3_RAW_IP_ALIGN 2
@@ -4824,7 +4825,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
txq = netdev_get_tx_queue(tp->dev, index);
while (sw_idx != hw_idx) {
- struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
+ struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
struct sk_buff *skb = ri->skb;
int i, tx_bug = 0;
@@ -4840,6 +4841,12 @@ static void tg3_tx(struct tg3_napi *tnapi)
ri->skb = NULL;
+ while (ri->fragmented) {
+ ri->fragmented = false;
+ sw_idx = NEXT_TX(sw_idx);
+ ri = &tnapi->tx_buffers[sw_idx];
+ }
+
sw_idx = NEXT_TX(sw_idx);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
@@ -4851,6 +4858,13 @@ static void tg3_tx(struct tg3_napi *tnapi)
dma_unmap_addr(ri, mapping),
skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
+
+ while (ri->fragmented) {
+ ri->fragmented = false;
+ sw_idx = NEXT_TX(sw_idx);
+ ri = &tnapi->tx_buffers[sw_idx];
+ }
+
sw_idx = NEXT_TX(sw_idx);
}
@@ -5901,40 +5915,100 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
#endif
}
-static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
- dma_addr_t mapping, int len, u32 flags,
- u32 mss_and_is_end)
+static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
+ dma_addr_t mapping, u32 len, u32 flags,
+ u32 mss, u32 vlan)
+{
+ txbd->addr_hi = ((u64) mapping >> 32);
+ txbd->addr_lo = ((u64) mapping & 0xffffffff);
+ txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
+ txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
+}
+
+static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
+ dma_addr_t map, u32 len, u32 flags,
+ u32 mss, u32 vlan)
{
- struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
- int is_end = (mss_and_is_end & 0x1);
- u32 mss = (mss_and_is_end >> 1);
- u32 vlan_tag = 0;
+ struct tg3 *tp = tnapi->tp;
+ bool hwbug = false;
+
+ if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
+ hwbug = 1;
+
+ if (tg3_4g_overflow_test(map, len))
+ hwbug = 1;
+
+ if (tg3_40bit_overflow_test(tp, map, len))
+ hwbug = 1;
+
+ if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
+ u32 tmp_flag = flags & ~TXD_FLAG_END;
+ while (len > TG3_TX_BD_DMA_MAX) {
+ u32 frag_len = TG3_TX_BD_DMA_MAX;
+ len -= TG3_TX_BD_DMA_MAX;
+
+ if (len) {
+ tnapi->tx_buffers[*entry].fragmented = true;
+ /* Avoid the 8byte DMA problem */
+ if (len <= 8) {
+ len += TG3_TX_BD_DMA_MAX / 2;
+ frag_len = TG3_TX_BD_DMA_MAX / 2;
+ }
+ } else
+ tmp_flag = flags;
+
+ if (*budget) {
+ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+ frag_len, tmp_flag, mss, vlan);
+ (*budget)--;
+ *entry = NEXT_TX(*entry);
+ } else {
+ hwbug = 1;
+ break;
+ }
+
+ map += frag_len;
+ }
- if (is_end)
- flags |= TXD_FLAG_END;
- if (flags & TXD_FLAG_VLAN) {
- vlan_tag = flags >> 16;
- flags &= 0xffff;
+ if (len) {
+ if (*budget) {
+ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+ len, flags, mss, vlan);
+ (*budget)--;
+ *entry = NEXT_TX(*entry);
+ } else {
+ hwbug = 1;
+ }
+ }
+ } else {
+ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+ len, flags, mss, vlan);
+ *entry = NEXT_TX(*entry);
}
- vlan_tag |= (mss << TXD_MSS_SHIFT);
- txd->addr_hi = ((u64) mapping >> 32);
- txd->addr_lo = ((u64) mapping & 0xffffffff);
- txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
- txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
+ return hwbug;
}
-static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
- struct sk_buff *skb, int last)
+static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
{
int i;
- u32 entry = tnapi->tx_prod;
- struct ring_info *txb = &tnapi->tx_buffers[entry];
+ struct sk_buff *skb;
+ struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
+
+ skb = txb->skb;
+ txb->skb = NULL;
pci_unmap_single(tnapi->tp->pdev,
dma_unmap_addr(txb, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
+
+ while (txb->fragmented) {
+ txb->fragmented = false;
+ entry = NEXT_TX(entry);
+ txb = &tnapi->tx_buffers[entry];
+ }
+
for (i = 0; i < last; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -5944,18 +6018,24 @@ static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
pci_unmap_page(tnapi->tp->pdev,
dma_unmap_addr(txb, mapping),
frag->size, PCI_DMA_TODEVICE);
+
+ while (txb->fragmented) {
+ txb->fragmented = false;
+ entry = NEXT_TX(entry);
+ txb = &tnapi->tx_buffers[entry];
+ }
}
}
/* Workaround 4GB and 40-bit hardware DMA bugs. */
static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
struct sk_buff *skb,
- u32 base_flags, u32 mss)
+ u32 *entry, u32 *budget,
+ u32 base_flags, u32 mss, u32 vlan)
{
struct tg3 *tp = tnapi->tp;
struct sk_buff *new_skb;
dma_addr_t new_addr = 0;
- u32 entry = tnapi->tx_prod;
int ret = 0;
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
@@ -5976,24 +6056,22 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
PCI_DMA_TODEVICE);
/* Make sure the mapping succeeded */
if (pci_dma_mapping_error(tp->pdev, new_addr)) {
- ret = -1;
dev_kfree_skb(new_skb);
-
- /* Make sure new skb does not cross any 4G boundaries.
- * Drop the packet if it does.
- */
- } else if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
- pci_unmap_single(tp->pdev, new_addr, new_skb->len,
- PCI_DMA_TODEVICE);
ret = -1;
- dev_kfree_skb(new_skb);
} else {
- tnapi->tx_buffers[entry].skb = new_skb;
- dma_unmap_addr_set(&tnapi->tx_buffers[entry],
+ base_flags |= TXD_FLAG_END;
+
+ tnapi->tx_buffers[*entry].skb = new_skb;
+ dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
mapping, new_addr);
- tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
- base_flags, 1 | (mss << 1));
+ if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
+ new_skb->len, base_flags,
+ mss, vlan)) {
+ tg3_tx_skb_unmap(tnapi, *entry, 0);
+ dev_kfree_skb(new_skb);
+ ret = -1;
+ }
}
}
@@ -6051,7 +6129,8 @@ tg3_tso_bug_end:
static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
- u32 len, entry, base_flags, mss;
+ u32 len, entry, base_flags, mss, vlan = 0;
+ u32 budget;
int i = -1, would_hit_hwbug;
dma_addr_t mapping;
struct tg3_napi *tnapi;
@@ -6063,12 +6142,14 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (tg3_flag(tp, ENABLE_TSS))
tnapi++;
+ budget = tg3_tx_avail(tnapi);
+
/* We are running in BH disabled context with netif_tx_lock
* and TX reclaim runs via tp->napi.poll inside of a software
* interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice!
*/
- if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
+ if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
if (!netif_tx_queue_stopped(txq)) {
netif_tx_stop_queue(txq);
@@ -6153,9 +6234,12 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- if (vlan_tx_tag_present(skb))
- base_flags |= (TXD_FLAG_VLAN |
- (vlan_tx_tag_get(skb) << 16));
+#ifdef BCM_KERNEL_SUPPORTS_8021Q
+ if (vlan_tx_tag_present(skb)) {
+ base_flags |= TXD_FLAG_VLAN;
+ vlan = vlan_tx_tag_get(skb);
+ }
+#endif
if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
!mss && skb->len > VLAN_ETH_FRAME_LEN)
@@ -6174,25 +6258,23 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
would_hit_hwbug = 0;
- if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
- would_hit_hwbug = 1;
-
- if (tg3_4g_overflow_test(mapping, len))
- would_hit_hwbug = 1;
-
- if (tg3_40bit_overflow_test(tp, mapping, len))
- would_hit_hwbug = 1;
-
if (tg3_flag(tp, 5701_DMA_BUG))
would_hit_hwbug = 1;
- tg3_set_txd(tnapi, entry, mapping, len, base_flags,
- (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
-
- entry = NEXT_TX(entry);
+ if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
+ ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
+ mss, vlan))
+ would_hit_hwbug = 1;
/* Now loop through additional data fragments, and queue them. */
if (skb_shinfo(skb)->nr_frags > 0) {
+ u32 tmp_mss = mss;
+
+ if (!tg3_flag(tp, HW_TSO_1) &&
+ !tg3_flag(tp, HW_TSO_2) &&
+ !tg3_flag(tp, HW_TSO_3))
+ tmp_mss = 0;
+
last = skb_shinfo(skb)->nr_frags - 1;
for (i = 0; i <= last; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -6209,39 +6291,25 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (pci_dma_mapping_error(tp->pdev, mapping))
goto dma_error;
- if (tg3_flag(tp, SHORT_DMA_BUG) &&
- len <= 8)
+ if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
+ len, base_flags |
+ ((i == last) ? TXD_FLAG_END : 0),
+ tmp_mss, vlan))
would_hit_hwbug = 1;
-
- if (tg3_4g_overflow_test(mapping, len))
- would_hit_hwbug = 1;
-
- if (tg3_40bit_overflow_test(tp, mapping, len))
- would_hit_hwbug = 1;
-
- if (tg3_flag(tp, HW_TSO_1) ||
- tg3_flag(tp, HW_TSO_2) ||
- tg3_flag(tp, HW_TSO_3))
- tg3_set_txd(tnapi, entry, mapping, len,
- base_flags, (i == last)|(mss << 1));
- else
- tg3_set_txd(tnapi, entry, mapping, len,
- base_flags, (i == last));
-
- entry = NEXT_TX(entry);
}
}
if (would_hit_hwbug) {
- tg3_skb_error_unmap(tnapi, skb, i);
+ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
/* If the workaround fails due to memory/mapping
* failure, silently drop this packet.
*/
- if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
+ entry = tnapi->tx_prod;
+ budget = tg3_tx_avail(tnapi);
+ if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
+ base_flags, mss, vlan))
goto out_unlock;
-
- entry = NEXT_TX(tnapi->tx_prod);
}
skb_tx_timestamp(skb);
@@ -6269,7 +6337,7 @@ out_unlock:
return NETDEV_TX_OK;
dma_error:
- tg3_skb_error_unmap(tnapi, skb, i);
+ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
dev_kfree_skb(skb);
tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
return NETDEV_TX_OK;
@@ -6602,35 +6670,13 @@ static void tg3_free_rings(struct tg3 *tp)
if (!tnapi->tx_buffers)
continue;
- for (i = 0; i < TG3_TX_RING_SIZE; ) {
- struct ring_info *txp;
- struct sk_buff *skb;
- unsigned int k;
-
- txp = &tnapi->tx_buffers[i];
- skb = txp->skb;
+ for (i = 0; i < TG3_TX_RING_SIZE; i++) {
+ struct sk_buff *skb = tnapi->tx_buffers[i].skb;
- if (skb == NULL) {
- i++;
+ if (!skb)
continue;
- }
-
- pci_unmap_single(tp->pdev,
- dma_unmap_addr(txp, mapping),
- skb_headlen(skb),
- PCI_DMA_TODEVICE);
- txp->skb = NULL;
- i++;
-
- for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
- txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
- pci_unmap_page(tp->pdev,
- dma_unmap_addr(txp, mapping),
- skb_shinfo(skb)->frags[k].size,
- PCI_DMA_TODEVICE);
- i++;
- }
+ tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
dev_kfree_skb_any(skb);
}
@@ -6762,9 +6808,9 @@ static int tg3_alloc_consistent(struct tg3 *tp)
*/
if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
(i && tg3_flag(tp, ENABLE_TSS))) {
- tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
- TG3_TX_RING_SIZE,
- GFP_KERNEL);
+ tnapi->tx_buffers = kzalloc(
+ sizeof(struct tg3_tx_ring_info) *
+ TG3_TX_RING_SIZE, GFP_KERNEL);
if (!tnapi->tx_buffers)
goto err_out;
@@ -8360,7 +8406,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Program the jumbo buffer descriptor ring control
* blocks on those devices that have them.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
(tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
@@ -11204,6 +11250,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
{
u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
+ u32 budget;
struct sk_buff *skb, *rx_skb;
u8 *tx_data;
dma_addr_t map;
@@ -11363,6 +11410,10 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
return -EIO;
}
+ val = tnapi->tx_prod;
+ tnapi->tx_buffers[val].skb = skb;
+ dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
+
tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
rnapi->coal_now);
@@ -11370,8 +11421,13 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
- tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
- base_flags, (mss << 1) | 1);
+ budget = tg3_tx_avail(tnapi);
+ if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
+ base_flags | TXD_FLAG_END, mss, 0)) {
+ tnapi->tx_buffers[val].skb = NULL;
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
tnapi->tx_prod++;
@@ -11394,7 +11450,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
break;
}
- pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
+ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
dev_kfree_skb(skb);
if (tx_idx != tnapi->tx_prod)
@@ -13817,7 +13873,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_flag_set(tp, 5705_PLUS);
/* Determine TSO capabilities */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
; /* Do nothing. HW bug. */
else if (tg3_flag(tp, 57765_PLUS))
tg3_flag_set(tp, HW_TSO_3);
@@ -13880,11 +13936,14 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tg3_flag(tp, 5755_PLUS))
tg3_flag_set(tp, SHORT_DMA_BUG);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ tg3_flag_set(tp, 4K_FIFO_LIMIT);
+
if (tg3_flag(tp, 5717_PLUS))
tg3_flag_set(tp, LRG_PROD_RING_CAP);
if (tg3_flag(tp, 57765_PLUS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
+ tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
tg3_flag_set(tp, USE_JUMBO_BDFLAG);
if (!tg3_flag(tp, 5705_PLUS) ||
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 691539b..2ea456d 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2652,6 +2652,12 @@ struct ring_info {
DEFINE_DMA_UNMAP_ADDR(mapping);
};
+struct tg3_tx_ring_info {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ bool fragmented;
+};
+
struct tg3_link_config {
/* Describes what we're trying to get. */
u32 advertising;
@@ -2816,7 +2822,7 @@ struct tg3_napi {
u32 last_tx_cons;
u32 prodmbox;
struct tg3_tx_buffer_desc *tx_ring;
- struct ring_info *tx_buffers;
+ struct tg3_tx_ring_info *tx_buffers;
dma_addr_t status_mapping;
dma_addr_t rx_rcb_mapping;
@@ -2899,6 +2905,7 @@ enum TG3_FLAGS {
TG3_FLAG_57765_PLUS,
TG3_FLAG_APE_HAS_NCSI,
TG3_FLAG_5717_PLUS,
+ TG3_FLAG_4K_FIFO_LIMIT,
/* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 9a6b382..71f3d1a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -528,6 +528,7 @@ static void tun_net_init(struct net_device *dev)
dev->netdev_ops = &tap_netdev_ops;
/* Ethernet TAP Device */
ether_setup(dev);
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
random_ether_addr(dev->dev_addr);
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 5250288..c5c4b4d 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -314,12 +314,11 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb_pull(skb, 4);
while (skb->len > 0) {
- if ((short)(header & 0x0000ffff) !=
- ~((short)((header & 0xffff0000) >> 16))) {
+ if ((header & 0x07ff) != ((~header >> 16) & 0x07ff))
netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
- }
+
/* get the packet length */
- size = (u16) (header & 0x0000ffff);
+ size = (u16) (header & 0x000007ff);
if ((skb->len) - ((size + 1) & 0xfffe) == 0) {
u8 alignment = (unsigned long)skb->data & 0x3;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index fd622a6..f06fb78 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -53,7 +53,7 @@
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc.h>
-#define DRIVER_VERSION "01-June-2011"
+#define DRIVER_VERSION "04-Aug-2011"
/* CDC NCM subclass 3.2.1 */
#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
@@ -163,35 +163,8 @@ cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
}
-static int
-cdc_ncm_do_request(struct cdc_ncm_ctx *ctx, struct usb_cdc_notification *req,
- void *data, u16 flags, u16 *actlen, u16 timeout)
-{
- int err;
-
- err = usb_control_msg(ctx->udev, (req->bmRequestType & USB_DIR_IN) ?
- usb_rcvctrlpipe(ctx->udev, 0) :
- usb_sndctrlpipe(ctx->udev, 0),
- req->bNotificationType, req->bmRequestType,
- req->wValue,
- req->wIndex, data,
- req->wLength, timeout);
-
- if (err < 0) {
- if (actlen)
- *actlen = 0;
- return err;
- }
-
- if (actlen)
- *actlen = err;
-
- return 0;
-}
-
static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
{
- struct usb_cdc_notification req;
u32 val;
u8 flags;
u8 iface_no;
@@ -200,14 +173,14 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_GET_NTB_PARAMETERS;
- req.wValue = 0;
- req.wIndex = cpu_to_le16(iface_no);
- req.wLength = cpu_to_le16(sizeof(ctx->ncm_parm));
-
- err = cdc_ncm_do_request(ctx, &req, &ctx->ncm_parm, 0, NULL, 1000);
- if (err) {
+ err = usb_control_msg(ctx->udev,
+ usb_rcvctrlpipe(ctx->udev, 0),
+ USB_CDC_GET_NTB_PARAMETERS,
+ USB_TYPE_CLASS | USB_DIR_IN
+ | USB_RECIP_INTERFACE,
+ 0, iface_no, &ctx->ncm_parm,
+ sizeof(ctx->ncm_parm), 10000);
+ if (err < 0) {
pr_debug("failed GET_NTB_PARAMETERS\n");
return 1;
}
@@ -253,31 +226,43 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
/* inform device about NTB input size changes */
if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
- USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE;
- req.wValue = 0;
- req.wIndex = cpu_to_le16(iface_no);
if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
- struct usb_cdc_ncm_ndp_input_size ndp_in_sz;
-
- req.wLength = 8;
- ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
- ndp_in_sz.wNtbInMaxDatagrams =
- cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX);
- ndp_in_sz.wReserved = 0;
- err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL,
- 1000);
- } else {
- __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+ struct usb_cdc_ncm_ndp_input_size *ndp_in_sz;
- req.wLength = 4;
- err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0,
- NULL, 1000);
- }
+ ndp_in_sz = kzalloc(sizeof(*ndp_in_sz), GFP_KERNEL);
+ if (!ndp_in_sz) {
+ err = -ENOMEM;
+ goto size_err;
+ }
- if (err)
+ err = usb_control_msg(ctx->udev,
+ usb_sndctrlpipe(ctx->udev, 0),
+ USB_CDC_SET_NTB_INPUT_SIZE,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ 0, iface_no, ndp_in_sz, 8, 1000);
+ kfree(ndp_in_sz);
+ } else {
+ __le32 *dwNtbInMaxSize;
+ dwNtbInMaxSize = kzalloc(sizeof(*dwNtbInMaxSize),
+ GFP_KERNEL);
+ if (!dwNtbInMaxSize) {
+ err = -ENOMEM;
+ goto size_err;
+ }
+ *dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+
+ err = usb_control_msg(ctx->udev,
+ usb_sndctrlpipe(ctx->udev, 0),
+ USB_CDC_SET_NTB_INPUT_SIZE,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ 0, iface_no, dwNtbInMaxSize, 4, 1000);
+ kfree(dwNtbInMaxSize);
+ }
+size_err:
+ if (err < 0)
pr_debug("Setting NTB Input Size failed\n");
}
@@ -332,29 +317,24 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
/* set CRC Mode */
if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
- USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_SET_CRC_MODE;
- req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
- req.wIndex = cpu_to_le16(iface_no);
- req.wLength = 0;
-
- err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
- if (err)
+ err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0),
+ USB_CDC_SET_CRC_MODE,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_CRC_NOT_APPENDED,
+ iface_no, NULL, 0, 1000);
+ if (err < 0)
pr_debug("Setting CRC mode off failed\n");
}
/* set NTB format, if both formats are supported */
if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
- USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
- req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
- req.wIndex = cpu_to_le16(iface_no);
- req.wLength = 0;
-
- err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
- if (err)
+ err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0),
+ USB_CDC_SET_NTB_FORMAT, USB_TYPE_CLASS
+ | USB_DIR_OUT | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_NTB16_FORMAT,
+ iface_no, NULL, 0, 1000);
+ if (err < 0)
pr_debug("Setting NTB format to 16-bit failed\n");
}
@@ -362,23 +342,29 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
/* set Max Datagram Size (MTU) */
if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
- __le16 max_datagram_size;
+ __le16 *max_datagram_size;
u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN |
- USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
- req.wValue = 0;
- req.wIndex = cpu_to_le16(iface_no);
- req.wLength = cpu_to_le16(2);
+ max_datagram_size = kzalloc(sizeof(*max_datagram_size),
+ GFP_KERNEL);
+ if (!max_datagram_size) {
+ err = -ENOMEM;
+ goto max_dgram_err;
+ }
- err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL,
- 1000);
- if (err) {
+ err = usb_control_msg(ctx->udev, usb_rcvctrlpipe(ctx->udev, 0),
+ USB_CDC_GET_MAX_DATAGRAM_SIZE,
+ USB_TYPE_CLASS | USB_DIR_IN
+ | USB_RECIP_INTERFACE,
+ 0, iface_no, max_datagram_size,
+ 2, 1000);
+ if (err < 0) {
pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
CDC_NCM_MIN_DATAGRAM_SIZE);
+ kfree(max_datagram_size);
} else {
- ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
+ ctx->max_datagram_size =
+ le16_to_cpu(*max_datagram_size);
/* Check Eth descriptor value */
if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) {
if (ctx->max_datagram_size > eth_max_sz)
@@ -395,17 +381,17 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
CDC_NCM_MIN_DATAGRAM_SIZE;
/* if value changed, update device */
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
- USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE;
- req.wValue = 0;
- req.wIndex = cpu_to_le16(iface_no);
- req.wLength = 2;
- max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
-
- err = cdc_ncm_do_request(ctx, &req, &max_datagram_size,
- 0, NULL, 1000);
- if (err)
+ err = usb_control_msg(ctx->udev,
+ usb_sndctrlpipe(ctx->udev, 0),
+ USB_CDC_SET_MAX_DATAGRAM_SIZE,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ 0,
+ iface_no, max_datagram_size,
+ 2, 1000);
+ kfree(max_datagram_size);
+max_dgram_err:
+ if (err < 0)
pr_debug("SET_MAX_DATAGRAM_SIZE failed\n");
}
@@ -671,7 +657,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
u32 rem;
u32 offset;
u32 last_offset;
- u16 n = 0;
+ u16 n = 0, index;
u8 ready2send = 0;
/* if there is a remaining skb, it gets priority */
@@ -859,8 +845,8 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
- ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
- ctx->tx_ndp_modulus);
+ index = ALIGN(sizeof(struct usb_cdc_ncm_nth16), ctx->tx_ndp_modulus);
+ ctx->tx_ncm.nth16.wNdpIndex = cpu_to_le16(index);
memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
ctx->tx_seq++;
@@ -873,12 +859,11 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */
- memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex,
+ memcpy(((u8 *)skb_out->data) + index,
&(ctx->tx_ncm.ndp16),
sizeof(ctx->tx_ncm.ndp16));
- memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex +
- sizeof(ctx->tx_ncm.ndp16),
+ memcpy(((u8 *)skb_out->data) + index + sizeof(ctx->tx_ncm.ndp16),
&(ctx->tx_ncm.dpe16),
(ctx->tx_curr_frame_num + 1) *
sizeof(struct usb_cdc_ncm_dpe16));
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 041fb7d..ef3b236 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -977,7 +977,6 @@ static void rtl8150_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (dev) {
set_bit(RTL8150_UNPLUG, &dev->flags);
- tasklet_disable(&dev->tl);
tasklet_kill(&dev->tl);
unregister_netdev(dev->netdev);
unlink_all_urbs(dev);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 7f78db7..5b23767 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -263,6 +263,8 @@ static void veth_setup(struct net_device *dev)
{
ether_setup(dev);
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+
dev->netdev_ops = &veth_netdev_ops;
dev->ethtool_ops = &veth_ethtool_ops;
dev->features |= NETIF_F_LLTX;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index deb1eca..7c5336c 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -515,10 +515,6 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)
mac_set_cam_mask(regs, vptr->mCAMmask);
/* Enable VCAMs */
-
- if (test_bit(0, vptr->active_vlans))
- WORD_REG_BITS_ON(MCFG_RTGOPT, &regs->MCFG);
-
for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
mac_set_vlan_cam(regs, i, (u8 *) &vid);
vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 1cbacb3..0959583 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1929,14 +1929,17 @@ static void
vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
- unsigned long flags;
- VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
- spin_lock_irqsave(&adapter->cmd_lock, flags);
- VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
- VMXNET3_CMD_UPDATE_VLAN_FILTERS);
- spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ if (!(netdev->flags & IFF_PROMISC)) {
+ u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+ unsigned long flags;
+
+ VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ }
set_bit(vid, adapter->active_vlans);
}
@@ -1946,14 +1949,17 @@ static void
vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
- unsigned long flags;
- VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
- spin_lock_irqsave(&adapter->cmd_lock, flags);
- VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
- VMXNET3_CMD_UPDATE_VLAN_FILTERS);
- spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ if (!(netdev->flags & IFF_PROMISC)) {
+ u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+ unsigned long flags;
+
+ VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ }
clear_bit(vid, adapter->active_vlans);
}
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index b25c922..eb20281 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1074,9 +1074,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
used = pvc_is_used(pvc);
- if (type == ARPHRD_ETHER)
+ if (type == ARPHRD_ETHER) {
dev = alloc_netdev(0, "pvceth%d", ether_setup);
- else
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ } else
dev = alloc_netdev(0, "pvc%d", pvc_setup);
if (!dev) {
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 5eacc65..c421a61 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -155,7 +155,7 @@
#include <linux/netdevice.h>
#include <linux/completion.h>
#include <linux/rwsem.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <net/wimax.h>
#include <linux/wimax/i2400m.h>
#include <asm/byteorder.h>
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 55cf71f..e1b3e3c 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2823,6 +2823,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
dev->wireless_data = &ai->wireless_data;
dev->irq = irq;
dev->base_addr = port;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
SET_NETDEV_DEV(dev, dmdev);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index f54dff4..c3119a6 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1735,6 +1735,8 @@ ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
if (dma_mapping_error(ah->dev, bf->skbaddr)) {
ATH5K_ERR(ah, "beacon DMA mapping failed\n");
+ dev_kfree_skb_any(skb);
+ bf->skb = NULL;
return -EIO;
}
@@ -1819,8 +1821,6 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
ath5k_txbuf_free_skb(ah, avf->bbuf);
avf->bbuf->skb = skb;
ret = ath5k_beacon_setup(ah, avf->bbuf);
- if (ret)
- avf->bbuf->skb = NULL;
out:
return ret;
}
@@ -1840,6 +1840,7 @@ ath5k_beacon_send(struct ath5k_hw *ah)
struct ath5k_vif *avf;
struct ath5k_buf *bf;
struct sk_buff *skb;
+ int err;
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n");
@@ -1888,11 +1889,6 @@ ath5k_beacon_send(struct ath5k_hw *ah)
avf = (void *)vif->drv_priv;
bf = avf->bbuf;
- if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION ||
- ah->opmode == NL80211_IFTYPE_MONITOR)) {
- ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL);
- return;
- }
/*
* Stop any current dma and put the new frame on the queue.
@@ -1906,8 +1902,17 @@ ath5k_beacon_send(struct ath5k_hw *ah)
/* refresh the beacon for AP or MESH mode */
if (ah->opmode == NL80211_IFTYPE_AP ||
- ah->opmode == NL80211_IFTYPE_MESH_POINT)
- ath5k_beacon_update(ah->hw, vif);
+ ah->opmode == NL80211_IFTYPE_MESH_POINT) {
+ err = ath5k_beacon_update(ah->hw, vif);
+ if (err)
+ return;
+ }
+
+ if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION ||
+ ah->opmode == NL80211_IFTYPE_MONITOR)) {
+ ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf->skb);
+ return;
+ }
trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 9ff7c30..44d9d8d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -309,11 +309,7 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
u8 i;
u32 val;
- if (ah->is_pciexpress != true)
- return;
-
- /* Do not touch SerDes registers */
- if (ah->config.pcie_powersave_enable == 2)
+ if (ah->is_pciexpress != true || ah->aspm_enabled != true)
return;
/* Nothing to do on restore for 11N */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index d109c25..1b94003 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -69,7 +69,7 @@ static int ar9003_hw_power_interpolate(int32_t x,
static const struct ar9300_eeprom ar9300_default = {
.eepromVersion = 2,
.templateVersion = 2,
- .macAddr = {1, 2, 3, 4, 5, 6},
+ .macAddr = {0, 2, 3, 4, 5, 6},
.custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
.baseEepHeader = {
@@ -307,7 +307,7 @@ static const struct ar9300_eeprom ar9300_default = {
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
- { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
@@ -884,7 +884,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
- { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
@@ -2040,7 +2040,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
- { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
@@ -3734,7 +3734,7 @@ static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
}
} else {
reg_pmu_set = (5 << 1) | (7 << 4) |
- (1 << 8) | (2 << 14) |
+ (2 << 8) | (2 << 14) |
(6 << 17) | (1 << 20) |
(3 << 24) | (1 << 28);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 8efdec2..ad2bb2b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -519,11 +519,7 @@ static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
int restore,
int power_off)
{
- if (ah->is_pciexpress != true)
- return;
-
- /* Do not touch SerDes registers */
- if (ah->config.pcie_powersave_enable == 2)
+ if (ah->is_pciexpress != true || ah->aspm_enabled != true)
return;
/* Nothing to do on restore for 11N */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 6de3f0b..5c59042 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -850,7 +850,7 @@
#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240)
#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c)
-#define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM_BASE + 0x450 + ((_i) << 2))
+#define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM1_BASE + 0x450 + ((_i) << 2))
/*
* Channel 2 Register Map
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8006ce0..8dcefe7 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -318,6 +318,14 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
}
+static void ath9k_hw_aspm_init(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (common->bus_ops->aspm_init)
+ common->bus_ops->aspm_init(common);
+}
+
/* This should work for all families including legacy */
static bool ath9k_hw_chip_test(struct ath_hw *ah)
{
@@ -378,7 +386,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.additional_swba_backoff = 0;
ah->config.ack_6mb = 0x0;
ah->config.cwm_ignore_extcca = 0;
- ah->config.pcie_powersave_enable = 0;
ah->config.pcie_clock_req = 0;
ah->config.pcie_waen = 0;
ah->config.analog_shiftreg = 1;
@@ -598,7 +605,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (ah->is_pciexpress)
- ath9k_hw_configpcipowersave(ah, 0, 0);
+ ath9k_hw_aspm_init(ah);
else
ath9k_hw_disablepcie(ah);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 6acd0f9..c798890 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -219,7 +219,6 @@ struct ath9k_ops_config {
int additional_swba_backoff;
int ack_6mb;
u32 cwm_ignore_extcca;
- u8 pcie_powersave_enable;
bool pcieSerDesWrite;
u8 pcie_clock_req;
u32 pcie_waen;
@@ -673,6 +672,7 @@ struct ath_hw {
bool sw_mgmt_crypto;
bool is_pciexpress;
+ bool aspm_enabled;
bool is_monitoring;
bool need_an_top2_fixup;
u16 tx_trig_level;
@@ -874,6 +874,7 @@ struct ath_bus_ops {
bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
void (*bt_coex_prep)(struct ath_common *common);
void (*extn_synch_en)(struct ath_common *common);
+ void (*aspm_init)(struct ath_common *common);
};
static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index ac51071..aa0ff7e 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -670,8 +670,10 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
static void ath9k_init_txpower_limits(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath9k_channel *curchan = ah->curchan;
+ ah->txchainmask = common->tx_chainmask;
if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 9098aaa..6530694 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2283,7 +2283,11 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
mutex_lock(&sc->mutex);
ah->coverage_class = coverage_class;
+
+ ath9k_ps_wakeup(sc);
ath9k_hw_init_global_settings(ah);
+ ath9k_ps_restore(sc);
+
mutex_unlock(&sc->mutex);
}
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 3bad0b2..be4ea13 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -16,6 +16,7 @@
#include <linux/nl80211.h>
#include <linux/pci.h>
+#include <linux/pci-aspm.h>
#include <linux/ath9k_platform.h>
#include "ath9k.h"
@@ -115,12 +116,38 @@ static void ath_pci_extn_synch_enable(struct ath_common *common)
pci_write_config_byte(pdev, sc->sc_ah->caps.pcie_lcr_offset, lnkctl);
}
+static void ath_pci_aspm_init(struct ath_common *common)
+{
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct pci_dev *pdev = to_pci_dev(sc->dev);
+ struct pci_dev *parent;
+ int pos;
+ u8 aspm;
+
+ if (!pci_is_pcie(pdev))
+ return;
+
+ parent = pdev->bus->self;
+ if (WARN_ON(!parent))
+ return;
+
+ pos = pci_pcie_cap(parent);
+ pci_read_config_byte(parent, pos + PCI_EXP_LNKCTL, &aspm);
+ if (aspm & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) {
+ ah->aspm_enabled = true;
+ /* Initialize PCIe PM and SERDES registers. */
+ ath9k_hw_configpcipowersave(ah, 0, 0);
+ }
+}
+
static const struct ath_bus_ops ath_pci_bus_ops = {
.ath_bus_type = ATH_PCI,
.read_cachesize = ath_pci_read_cachesize,
.eeprom_read = ath_pci_eeprom_read,
.bt_coex_prep = ath_pci_bt_coex_prep,
.extn_synch_en = ath_pci_extn_synch_enable,
+ .aspm_init = ath_pci_aspm_init,
};
static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 0122930..0474e663 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1066,8 +1066,10 @@ static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
* the high througput speed in 802.11n networks.
*/
- if (!is_main_vif(ar, vif))
+ if (!is_main_vif(ar, vif)) {
+ mutex_lock(&ar->mutex);
goto err_softw;
+ }
/*
* While the hardware supports *catch-all* key, for offloading
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index d2293dc..3cab843 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -28,7 +28,7 @@ config B43
config B43_BCMA
bool "Support for BCMA bus"
- depends on B43 && BCMA && BROKEN
+ depends on B43 && BCMA
default y
config B43_SSB
diff --git a/drivers/net/wireless/b43/bus.c b/drivers/net/wireless/b43/bus.c
index 64c3f65..05f6c7b 100644
--- a/drivers/net/wireless/b43/bus.c
+++ b/drivers/net/wireless/b43/bus.c
@@ -244,10 +244,12 @@ void b43_bus_set_wldev(struct b43_bus_dev *dev, void *wldev)
#ifdef CONFIG_B43_BCMA
case B43_BUS_BCMA:
bcma_set_drvdata(dev->bdev, wldev);
+ break;
#endif
#ifdef CONFIG_B43_SSB
case B43_BUS_SSB:
ssb_set_drvdata(dev->sdev, wldev);
+ break;
#endif
}
}
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 83cba22..481e534 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -795,9 +795,23 @@ static u64 supported_dma_mask(struct b43_wldev *dev)
u32 tmp;
u16 mmio_base;
- tmp = b43_read32(dev, SSB_TMSHIGH);
- if (tmp & SSB_TMSHIGH_DMA64)
- return DMA_BIT_MASK(64);
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST);
+ if (tmp & BCMA_IOST_DMA64)
+ return DMA_BIT_MASK(64);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
+ if (tmp & SSB_TMSHIGH_DMA64)
+ return DMA_BIT_MASK(64);
+ break;
+#endif
+ }
+
mmio_base = b43_dmacontroller_base(0, 0);
b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 032d466..26f1ab8 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -5350,6 +5350,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
{
struct b43_wl *wl = ssb_get_devtypedata(sdev);
struct b43_wldev *wldev = ssb_get_drvdata(sdev);
+ struct b43_bus_dev *dev = wldev->dev;
/* We must cancel any work here before unregistering from ieee80211,
* as the ieee80211 unreg will destroy the workqueue. */
@@ -5365,14 +5366,14 @@ static void b43_ssb_remove(struct ssb_device *sdev)
ieee80211_unregister_hw(wl->hw);
}
- b43_one_core_detach(wldev->dev);
+ b43_one_core_detach(dev);
if (list_empty(&wl->devlist)) {
b43_leds_unregister(wl);
/* Last core on the chip unregistered.
* We can destroy common struct b43_wl.
*/
- b43_wireless_exit(wldev->dev, wl);
+ b43_wireless_exit(dev, wl);
}
}
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 17a130d..a610a35 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -8,7 +8,7 @@
#include <linux/stringify.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/io.h>
#include <linux/ssb/ssb.h>
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index f89c342..686941c 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -5,7 +5,7 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/linkage.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "b43legacy.h"
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index c052a0d..5441ad1 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -648,6 +648,8 @@ static const struct pcmcia_device_id hostap_cs_ids[] = {
0x74c5e40d),
PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "Intersil",
0x4b801a17),
+ PCMCIA_DEVICE_MANF_CARD_PROD_ID3(0x0156, 0x0002, "Version 01.02",
+ 0x4b74baa0),
PCMCIA_MFC_DEVICE_PROD_ID12(0, "SanDisk", "ConnectPlus",
0x7a954bd9, 0x74be00c6),
PCMCIA_DEVICE_PROD_ID123(
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index d508482..89a116f 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -855,6 +855,7 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
iface = netdev_priv(dev);
ether_setup(dev);
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
/* kernel callbacks */
if (iface) {
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
index dab67a1..73fe3cd 100644
--- a/drivers/net/wireless/iwlegacy/iwl-3945.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945.c
@@ -1746,7 +1746,11 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
}
memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
-
+ /*
+ * We do not commit tx power settings while channel changing,
+ * do it now if tx power changed.
+ */
+ iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
return 0;
}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
index bd4b000..ecdc6e5 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.c
@@ -1235,7 +1235,12 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
iwl_legacy_print_rx_config_cmd(priv, ctx);
- goto set_tx_power;
+ /*
+ * We do not commit tx power settings while channel changing,
+ * do it now if tx power changed.
+ */
+ iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
+ return 0;
}
/* If we are currently associated and the new config requires
@@ -1315,7 +1320,6 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
iwl4965_init_sensitivity(priv);
-set_tx_power:
/* If we issue a new RXON command which required a tune then we must
* send a new TXPOWER command or we won't be able to Tx any frames */
ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 3eeb12e..c95cefd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -365,6 +365,7 @@ static struct iwl_base_params iwl5000_base_params = {
.chain_noise_scale = 1000,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
+ .no_idle_support = true,
};
static struct iwl_ht_params iwl5000_ht_params = {
.ht_greenfield_support = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 3e6bb73..02817a4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -135,6 +135,7 @@ struct iwl_mod_params {
* @temperature_kelvin: temperature report by uCode in kelvin
* @max_event_log_size: size of event log buffer size for ucode event logging
* @shadow_reg_enable: HW shadhow register bit
+ * @no_idle_support: do not support idle mode
*/
struct iwl_base_params {
int eeprom_size;
@@ -156,6 +157,7 @@ struct iwl_base_params {
bool temperature_kelvin;
u32 max_event_log_size;
const bool shadow_reg_enable;
+ const bool no_idle_support;
};
/*
* @advanced_bt_coexist: support advanced bt coexist
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
index fb7e436..2fdbffa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -134,6 +134,7 @@ static void iwl_pci_apm_config(struct iwl_bus *bus)
static void iwl_pci_set_drv_data(struct iwl_bus *bus, void *drv_data)
{
bus->drv_data = drv_data;
+ pci_set_drvdata(IWL_BUS_GET_PCI_DEV(bus), drv_data);
}
static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[],
@@ -454,8 +455,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
}
- pci_set_drvdata(pdev, bus);
-
bus->dev = &pdev->dev;
bus->irq = pdev->irq;
bus->ops = &pci_ops;
@@ -479,26 +478,22 @@ out_no_pci:
return err;
}
-static void iwl_pci_down(struct iwl_bus *bus)
-{
- struct iwl_pci_bus *pci_bus = (struct iwl_pci_bus *) bus->bus_specific;
-
- pci_disable_msi(pci_bus->pci_dev);
- pci_iounmap(pci_bus->pci_dev, pci_bus->hw_base);
- pci_release_regions(pci_bus->pci_dev);
- pci_disable_device(pci_bus->pci_dev);
- pci_set_drvdata(pci_bus->pci_dev, NULL);
-
- kfree(bus);
-}
-
static void __devexit iwl_pci_remove(struct pci_dev *pdev)
{
- struct iwl_bus *bus = pci_get_drvdata(pdev);
+ struct iwl_priv *priv = pci_get_drvdata(pdev);
+ struct iwl_bus *bus = priv->bus;
+ struct iwl_pci_bus *pci_bus = IWL_BUS_GET_PCI_BUS(bus);
+ struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
- iwl_remove(bus->drv_data);
+ iwl_remove(priv);
- iwl_pci_down(bus);
+ pci_disable_msi(pci_dev);
+ pci_iounmap(pci_dev, pci_bus->hw_base);
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
+ pci_set_drvdata(pci_dev, NULL);
+
+ kfree(bus);
}
#ifdef CONFIG_PM
@@ -506,20 +501,20 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
static int iwl_pci_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_bus *bus = pci_get_drvdata(pdev);
+ struct iwl_priv *priv = pci_get_drvdata(pdev);
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
* WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
*/
- return iwl_suspend(bus->drv_data);
+ return iwl_suspend(priv);
}
static int iwl_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_bus *bus = pci_get_drvdata(pdev);
+ struct iwl_priv *priv = pci_get_drvdata(pdev);
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
@@ -532,7 +527,7 @@ static int iwl_pci_resume(struct device *device)
*/
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
- return iwl_resume(bus->drv_data);
+ return iwl_resume(priv);
}
static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 3ec619c..cd64df0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -349,7 +349,8 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
if (priv->wowlan)
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
- else if (priv->hw->conf.flags & IEEE80211_CONF_IDLE)
+ else if (!priv->cfg->base_params->no_idle_support &&
+ priv->hw->conf.flags & IEEE80211_CONF_IDLE)
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
else if (iwl_tt_is_low_power_state(priv)) {
/* in thermal throttling low power state */
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index 3f7fc4a..d7dbc00 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -239,7 +239,6 @@ static int orinoco_cs_resume(struct pcmcia_device *link)
static const struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */
- PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), /* Lucent Orinoco and old Intersil */
PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */
PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), /* Nortel Networks eMobility 802.11 Wireless Adapter */
PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */
@@ -272,6 +271,7 @@ static const struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26),
PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b),
PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
+ PCMCIA_DEVICE_MANF_CARD_PROD_ID3(0x0156, 0x0002, "Version 01.01", 0xd27deb1a), /* Lucent Orinoco */
#ifdef CONFIG_HERMES_PRISM
/* Only entries that certainly identify Prism chipset */
PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */
@@ -321,6 +321,9 @@ static const struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2),
PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b),
PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39),
+
+ /* This may be Agere or Intersil Firmware */
+ PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002),
#endif
PCMCIA_DEVICE_NULL,
};
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 84ab7d1..ef67f67 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -703,8 +703,7 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
/*
* Add space for the TXWI in front of the skb.
*/
- skb_push(entry->skb, TXWI_DESC_SIZE);
- memset(entry->skb, 0, TXWI_DESC_SIZE);
+ memset(skb_push(entry->skb, TXWI_DESC_SIZE), 0, TXWI_DESC_SIZE);
/*
* Register descriptor details in skb frame descriptor.
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 5075593..dbf501c 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -464,6 +464,15 @@ static bool rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
int wcid, ack, pid;
int tx_wcid, tx_ack, tx_pid;
+ if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
+ !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) {
+ WARNING(entry->queue->rt2x00dev,
+ "Data pending for entry %u in queue %u\n",
+ entry->entry_idx, entry->queue->qid);
+ cond_resched();
+ return false;
+ }
+
wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
@@ -529,12 +538,11 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
if (rt2800usb_txdone_entry_check(entry, reg))
break;
+ entry = NULL;
}
- if (!entry || rt2x00queue_empty(queue))
- break;
-
- rt2800_txdone_entry(entry, reg);
+ if (entry)
+ rt2800_txdone_entry(entry, reg);
}
}
@@ -558,8 +566,10 @@ static void rt2800usb_work_txdone(struct work_struct *work)
while (!rt2x00queue_empty(queue)) {
entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
- if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
+ if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
+ !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
break;
+
if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
else if (rt2x00queue_status_timeout(entry))
@@ -921,6 +931,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07d1, 0x3c16) },
/* Draytek */
{ USB_DEVICE(0x07fa, 0x7712) },
+ /* DVICO */
+ { USB_DEVICE(0x0fe9, 0xb307) },
/* Edimax */
{ USB_DEVICE(0x7392, 0x7711) },
{ USB_DEVICE(0x7392, 0x7717) },
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 15cdc7e..4cdf247 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -355,7 +355,8 @@ static inline enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *
return CIPHER_NONE;
}
-static inline void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry,
+static inline void rt2x00crypto_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
+ struct sk_buff *skb,
struct txentry_desc *txdesc)
{
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 8efab39..4ccf238 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -113,7 +113,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* due to possible race conditions in mac80211.
*/
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
- goto exit_fail;
+ goto exit_free_skb;
/*
* Use the ATIM queue if appropriate and present.
@@ -127,7 +127,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
ERROR(rt2x00dev,
"Attempt to send packet over invalid queue %d.\n"
"Please file bug report to %s.\n", qid, DRV_PROJECT);
- goto exit_fail;
+ goto exit_free_skb;
}
/*
@@ -159,6 +159,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
exit_fail:
rt2x00queue_pause_queue(queue);
+ exit_free_skb:
dev_kfree_skb_any(skb);
}
EXPORT_SYMBOL_GPL(rt2x00mac_tx);
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index b6b4542..1e31050 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -262,23 +262,20 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
struct queue_entry *entry = (struct queue_entry *)urb->context;
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
+ if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
return;
-
- if (rt2x00dev->ops->lib->tx_dma_done)
- rt2x00dev->ops->lib->tx_dma_done(entry);
-
- /*
- * Report the frame as DMA done
- */
- rt2x00lib_dmadone(entry);
-
/*
* Check if the frame was correctly uploaded
*/
if (urb->status)
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
+ /*
+ * Report the frame as DMA done
+ */
+ rt2x00lib_dmadone(entry);
+ if (rt2x00dev->ops->lib->tx_dma_done)
+ rt2x00dev->ops->lib->tx_dma_done(entry);
/*
* Schedule the delayed work for reading the TX status
* from the device.
@@ -874,18 +871,8 @@ int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
{
struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
struct rt2x00_dev *rt2x00dev = hw->priv;
- int retval;
-
- retval = rt2x00lib_suspend(rt2x00dev, state);
- if (retval)
- return retval;
- /*
- * Decrease usbdev refcount.
- */
- usb_put_dev(interface_to_usbdev(usb_intf));
-
- return 0;
+ return rt2x00lib_suspend(rt2x00dev, state);
}
EXPORT_SYMBOL_GPL(rt2x00usb_suspend);
@@ -894,8 +881,6 @@ int rt2x00usb_resume(struct usb_interface *usb_intf)
struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
struct rt2x00_dev *rt2x00dev = hw->priv;
- usb_get_dev(interface_to_usbdev(usb_intf));
-
return rt2x00lib_resume(rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00usb_resume);
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 6a93939..0baeb89 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2420,6 +2420,7 @@ static struct usb_device_id rt73usb_device_table[] = {
/* Buffalo */
{ USB_DEVICE(0x0411, 0x00d8) },
{ USB_DEVICE(0x0411, 0x00d9) },
+ { USB_DEVICE(0x0411, 0x00e6) },
{ USB_DEVICE(0x0411, 0x00f4) },
{ USB_DEVICE(0x0411, 0x0116) },
{ USB_DEVICE(0x0411, 0x0119) },
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index c1ea65e..f8648b7 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1696,15 +1696,17 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn);
- /*find bridge info */
- pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
- for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
- if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
- pcipriv->ndis_adapter.pcibridge_vendor = tmp;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("Pci Bridge Vendor is found index: %d\n",
- tmp));
- break;
+ if (bridge_pdev) {
+ /*find bridge info if available */
+ pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
+ for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
+ if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
+ pcipriv->ndis_adapter.pcibridge_vendor = tmp;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Pci Bridge Vendor is found index:"
+ " %d\n", tmp));
+ break;
+ }
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 942f7a3..ef63c0d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -281,6 +281,8 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)},
/* 8188CE-VAU USB minCard (b/g mode only) */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
+ /* 8188RU in Alfa AWUS036NHR */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
/* 8188 Combo for BC4 */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
@@ -303,20 +305,23 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
/* HP - Lite-On ,8188CUS Slim Combo */
{RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
{RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/
{RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/
{RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/
{RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/
{RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/
- {RTL_USB_DEVICE(0x3358, 0x13d3, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/
+ {RTL_USB_DEVICE(0x13d3, 0x3358, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/
/* Russian customer -Azwave (8188CE-VAU b/g mode only) */
- {RTL_USB_DEVICE(0x3359, 0x13d3, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(0x13d3, 0x3359, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(0x4855, 0x0090, rtl92cu_hal_cfg)}, /* Feixun */
+ {RTL_USB_DEVICE(0x4855, 0x0091, rtl92cu_hal_cfg)}, /* NetweeN-Feixun */
+ {RTL_USB_DEVICE(0x9846, 0x9041, rtl92cu_hal_cfg)}, /* Netgear Cameo */
/****** 8192CU ********/
{RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
{RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/
{RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
- {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Abocom -Abocom*/
{RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
{RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
{RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
diff --git a/drivers/net/wireless/wl1251/acx.c b/drivers/net/wireless/wl1251/acx.c
index ef8370e..ad87a1a 100644
--- a/drivers/net/wireless/wl1251/acx.c
+++ b/drivers/net/wireless/wl1251/acx.c
@@ -140,8 +140,6 @@ int wl1251_acx_sleep_auth(struct wl1251 *wl, u8 sleep_auth)
auth->sleep_auth = sleep_auth;
ret = wl1251_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth));
- if (ret < 0)
- return ret;
out:
kfree(auth);
@@ -681,10 +679,8 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl)
ret = wl1251_cmd_configure(wl, ACX_CCA_THRESHOLD,
detection, sizeof(*detection));
- if (ret < 0) {
+ if (ret < 0)
wl1251_warning("failed to set cca threshold: %d", ret);
- return ret;
- }
out:
kfree(detection);
diff --git a/drivers/net/wireless/wl1251/cmd.c b/drivers/net/wireless/wl1251/cmd.c
index 81f164b..d14d69d 100644
--- a/drivers/net/wireless/wl1251/cmd.c
+++ b/drivers/net/wireless/wl1251/cmd.c
@@ -241,7 +241,7 @@ int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable)
if (ret < 0) {
wl1251_error("tx %s cmd for channel %d failed",
enable ? "start" : "stop", channel);
- return ret;
+ goto out;
}
wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d",
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index 7e33f1f..34f6ab5 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -77,8 +77,6 @@ int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth)
auth->sleep_auth = sleep_auth;
ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth));
- if (ret < 0)
- return ret;
out:
kfree(auth);
@@ -624,10 +622,8 @@ int wl1271_acx_cca_threshold(struct wl1271 *wl)
ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD,
detection, sizeof(*detection));
- if (ret < 0) {
+ if (ret < 0)
wl1271_warning("failed to set cca threshold: %d", ret);
- return ret;
- }
out:
kfree(detection);
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index e58c22d..b70ae40 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -4283,6 +4283,7 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
wl->hw->wiphy->max_scan_ssids = 1;
+ wl->hw->wiphy->max_sched_scan_ssids = 1;
/*
* Maximum length of elements in scanning probe request templates
* should be the maximum length possible for a template, without
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 5cf18c2..fb1fd5a 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -164,7 +164,7 @@ static int wl1271_sdio_power_on(struct wl1271 *wl)
/* If enabled, tell runtime PM not to power off the card */
if (pm_runtime_enabled(&func->dev)) {
ret = pm_runtime_get_sync(&func->dev);
- if (ret)
+ if (ret < 0)
goto out;
} else {
/* Runtime PM is disabled: power up the card manually */
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index 5d5e1ef..4ae8eff 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -36,7 +36,6 @@ enum wl1271_tm_commands {
WL1271_TM_CMD_TEST,
WL1271_TM_CMD_INTERROGATE,
WL1271_TM_CMD_CONFIGURE,
- WL1271_TM_CMD_NVS_PUSH,
WL1271_TM_CMD_SET_PLT_MODE,
WL1271_TM_CMD_RECOVER,
@@ -139,12 +138,15 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
if (ret < 0) {
wl1271_warning("testmode cmd interrogate failed: %d", ret);
+ kfree(cmd);
return ret;
}
skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd));
- if (!skb)
+ if (!skb) {
+ kfree(cmd);
return -ENOMEM;
+ }
NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd);
@@ -187,48 +189,6 @@ static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
return 0;
}
-static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
-{
- int ret = 0;
- size_t len;
- void *buf;
-
- wl1271_debug(DEBUG_TESTMODE, "testmode cmd nvs push");
-
- if (!tb[WL1271_TM_ATTR_DATA])
- return -EINVAL;
-
- buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
- len = nla_len(tb[WL1271_TM_ATTR_DATA]);
-
- mutex_lock(&wl->mutex);
-
- kfree(wl->nvs);
-
- if ((wl->chip.id == CHIP_ID_1283_PG20) &&
- (len != sizeof(struct wl128x_nvs_file)))
- return -EINVAL;
- else if (len != sizeof(struct wl1271_nvs_file))
- return -EINVAL;
-
- wl->nvs = kzalloc(len, GFP_KERNEL);
- if (!wl->nvs) {
- wl1271_error("could not allocate memory for the nvs file");
- ret = -ENOMEM;
- goto out;
- }
-
- memcpy(wl->nvs, buf, len);
- wl->nvs_len = len;
-
- wl1271_debug(DEBUG_TESTMODE, "testmode pushed nvs");
-
-out:
- mutex_unlock(&wl->mutex);
-
- return ret;
-}
-
static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
{
u32 val;
@@ -285,8 +245,6 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
return wl1271_tm_cmd_interrogate(wl, tb);
case WL1271_TM_CMD_CONFIGURE:
return wl1271_tm_cmd_configure(wl, tb);
- case WL1271_TM_CMD_NVS_PUSH:
- return wl1271_tm_cmd_nvs_push(wl, tb);
case WL1271_TM_CMD_SET_PLT_MODE:
return wl1271_tm_cmd_set_plt_mode(wl, tb);
case WL1271_TM_CMD_RECOVER:
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index 0372315..c77e054 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -1596,7 +1596,7 @@ static void pn533_disconnect(struct usb_interface *interface)
usb_free_urb(dev->out_urb);
kfree(dev);
- nfc_dev_info(&dev->interface->dev, "NXP PN533 NFC device disconnected");
+ nfc_dev_info(&interface->dev, "NXP PN533 NFC device disconnected");
}
static struct usb_driver pn533_driver = {
diff --git a/drivers/of/address.c b/drivers/of/address.c
index da1f4b9..72c33fb 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -610,6 +610,6 @@ void __iomem *of_iomap(struct device_node *np, int index)
if (of_address_to_resource(np, index, &res))
return NULL;
- return ioremap(res.start, 1 + res.end - res.start);
+ return ioremap(res.start, resource_size(&res));
}
EXPORT_SYMBOL(of_iomap);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 02ed367..3ff22e3 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -610,8 +610,9 @@ EXPORT_SYMBOL(of_find_node_by_phandle);
*
* The out_value is modified only if a valid u32 value can be decoded.
*/
-int of_property_read_u32_array(const struct device_node *np, char *propname,
- u32 *out_values, size_t sz)
+int of_property_read_u32_array(const struct device_node *np,
+ const char *propname, u32 *out_values,
+ size_t sz)
{
struct property *prop = of_find_property(np, propname, NULL);
const __be32 *val;
@@ -645,7 +646,7 @@ EXPORT_SYMBOL_GPL(of_property_read_u32_array);
*
* The out_string pointer is modified only if a valid string can be decoded.
*/
-int of_property_read_string(struct device_node *np, char *propname,
+int of_property_read_string(struct device_node *np, const char *propname,
const char **out_string)
{
struct property *prop = of_find_property(np, propname, NULL);
diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c
index 3007662..ef0105f 100644
--- a/drivers/of/gpio.c
+++ b/drivers/of/gpio.c
@@ -127,8 +127,8 @@ EXPORT_SYMBOL(of_gpio_count);
* gpio chips. This function performs only one sanity check: whether gpio
* is less than ngpios (that is specified in the gpio_chip).
*/
-static int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
- const void *gpio_spec, u32 *flags)
+int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
+ const void *gpio_spec, u32 *flags)
{
const __be32 *gpio = gpio_spec;
const u32 n = be32_to_cpup(gpio);
@@ -152,6 +152,7 @@ static int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
return n;
}
+EXPORT_SYMBOL(of_gpio_simple_xlate);
/**
* of_mm_gpiochip_add - Add memory mapped GPIO chip (bank)
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index 86f334a..bb18471 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -8,6 +8,51 @@
#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/of_net.h>
+#include <linux/phy.h>
+
+/**
+ * It maps 'enum phy_interface_t' found in include/linux/phy.h
+ * into the device tree binding of 'phy-mode', so that Ethernet
+ * device driver can get phy interface from device tree.
+ */
+static const char *phy_modes[] = {
+ [PHY_INTERFACE_MODE_NA] = "",
+ [PHY_INTERFACE_MODE_MII] = "mii",
+ [PHY_INTERFACE_MODE_GMII] = "gmii",
+ [PHY_INTERFACE_MODE_SGMII] = "sgmii",
+ [PHY_INTERFACE_MODE_TBI] = "tbi",
+ [PHY_INTERFACE_MODE_RMII] = "rmii",
+ [PHY_INTERFACE_MODE_RGMII] = "rgmii",
+ [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
+ [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
+ [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
+ [PHY_INTERFACE_MODE_RTBI] = "rtbi",
+ [PHY_INTERFACE_MODE_SMII] = "smii",
+};
+
+/**
+ * of_get_phy_mode - Get phy mode for given device_node
+ * @np: Pointer to the given device_node
+ *
+ * The function gets phy interface string from property 'phy-mode',
+ * and return its index in phy_modes table, or errno in error case.
+ */
+const int of_get_phy_mode(struct device_node *np)
+{
+ const char *pm;
+ int err, i;
+
+ err = of_property_read_string(np, "phy-mode", &pm);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
+ if (!strcasecmp(pm, phy_modes[i]))
+ return i;
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(of_get_phy_mode);
/**
* Search the device tree for the best MAC address to use. 'mac-address' is
diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
index 0b54e46..38b6fc0 100644
--- a/drivers/oprofile/oprofile_stats.h
+++ b/drivers/oprofile/oprofile_stats.h
@@ -10,7 +10,7 @@
#ifndef OPROFILE_STATS_H
#define OPROFILE_STATS_H
-#include <asm/atomic.h>
+#include <linux/atomic.h>
struct oprofile_stat_struct {
atomic_t sample_lost_no_mm;
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 8f3faf3..095f29e 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -408,7 +408,7 @@ got_one:
}
EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware);
-static int is_ejectable(acpi_handle handle)
+static int pcihp_is_ejectable(acpi_handle handle)
{
acpi_status status;
acpi_handle tmp;
@@ -442,7 +442,7 @@ int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle)
return 0;
if (bridge_handle != parent_handle)
return 0;
- return is_ejectable(handle);
+ return pcihp_is_ejectable(handle);
}
EXPORT_SYMBOL_GPL(acpi_pci_check_ejectable);
@@ -450,7 +450,7 @@ static acpi_status
check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv)
{
int *found = (int *)context;
- if (is_ejectable(handle)) {
+ if (pcihp_is_ejectable(handle)) {
*found = 1;
return AE_CTRL_TERMINATE;
}
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index a70fa89..2202857 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -110,7 +110,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
}
-static struct acpi_dock_ops acpiphp_dock_ops = {
+static const struct acpi_dock_ops acpiphp_dock_ops = {
.handler = handle_hotplug_event_func,
};
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index d703e73..3fadf2f 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -32,7 +32,7 @@
#include <linux/pci_hotplug.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include "cpci_hotplug.h"
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 4952c3b..f1ce99c 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -840,8 +840,9 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Need to read VID early b/c it's used to differentiate CPQ and INTC
* discovery
*/
- rc = pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor_id);
- if (rc || ((vendor_id != PCI_VENDOR_ID_COMPAQ) && (vendor_id != PCI_VENDOR_ID_INTEL))) {
+ vendor_id = pdev->vendor;
+ if ((vendor_id != PCI_VENDOR_ID_COMPAQ) &&
+ (vendor_id != PCI_VENDOR_ID_INTEL)) {
err(msg_HPC_non_compaq_or_intel);
rc = -ENODEV;
goto err_disable_device;
@@ -868,11 +869,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* TODO: This code can be made to support non-Compaq or Intel
* subsystem IDs
*/
- rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid);
- if (rc) {
- err("%s : pci_read_config_word failed\n", __func__);
- goto err_disable_device;
- }
+ subsystem_vid = pdev->subsystem_vendor;
dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
err(msg_HPC_non_compaq_or_intel);
@@ -887,11 +884,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_disable_device;
}
- rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid);
- if (rc) {
- err("%s : pci_read_config_word failed\n", __func__);
- goto err_free_ctrl;
- }
+ subsystem_deviceid = pdev->subsystem_device;
info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid);
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 085dbb5..1e9c9aa 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -213,6 +213,9 @@ static int board_added(struct slot *p_slot)
goto err_exit;
}
+ /* Wait for 1 second after checking link training status */
+ msleep(1000);
+
/* Check for a power fault */
if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) {
ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 50a23da..96dc473 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -275,16 +275,9 @@ int pciehp_check_link_status(struct controller *ctrl)
* hot-plug capable downstream port. But old controller might
* not implement it. In this case, we wait for 1000 ms.
*/
- if (ctrl->link_active_reporting){
- /* Wait for Data Link Layer Link Active bit to be set */
+ if (ctrl->link_active_reporting)
pcie_wait_link_active(ctrl);
- /*
- * We must wait for 100 ms after the Data Link Layer
- * Link Active bit reads 1b before initiating a
- * configuration access to the hot added device.
- */
- msleep(100);
- } else
+ else
msleep(1000);
retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
index 749fdf0..3ffd9c1 100644
--- a/drivers/pci/hotplug/pcihp_slot.c
+++ b/drivers/pci/hotplug/pcihp_slot.c
@@ -158,47 +158,6 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
*/
}
-/* Program PCIE MaxPayload setting on device: ensure parent maxpayload <= device */
-static int pci_set_payload(struct pci_dev *dev)
-{
- int pos, ppos;
- u16 pctl, psz;
- u16 dctl, dsz, dcap, dmax;
- struct pci_dev *parent;
-
- parent = dev->bus->self;
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
- if (!pos)
- return 0;
-
- /* Read Device MaxPayload capability and setting */
- pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &dctl);
- pci_read_config_word(dev, pos + PCI_EXP_DEVCAP, &dcap);
- dsz = (dctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
- dmax = (dcap & PCI_EXP_DEVCAP_PAYLOAD);
-
- /* Read Parent MaxPayload setting */
- ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
- if (!ppos)
- return 0;
- pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
- psz = (pctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
-
- /* If parent payload > device max payload -> error
- * If parent payload > device payload -> set speed
- * If parent payload <= device payload -> do nothing
- */
- if (psz > dmax)
- return -1;
- else if (psz > dsz) {
- dev_info(&dev->dev, "Setting MaxPayload to %d\n", 128 << psz);
- pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
- (dctl & ~PCI_EXP_DEVCTL_PAYLOAD) +
- (psz << 5));
- }
- return 0;
-}
-
void pci_configure_slot(struct pci_dev *dev)
{
struct pci_dev *cdev;
@@ -210,9 +169,9 @@ void pci_configure_slot(struct pci_dev *dev)
(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
return;
- ret = pci_set_payload(dev);
- if (ret)
- dev_warn(&dev->dev, "could not set device max payload\n");
+ if (dev->bus && dev->bus->self)
+ pcie_bus_configure_settings(dev->bus,
+ dev->bus->self->pcie_mpss);
memset(&hpp, 0, sizeof(hpp));
ret = pci_get_hp_params(dev, &hpp);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index c94d37e..f092993 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -55,7 +55,7 @@ struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
*/
if (bus->bridge->of_node)
return of_node_get(bus->bridge->of_node);
- if (bus->bridge->parent->of_node)
+ if (bus->bridge->parent && bus->bridge->parent->of_node)
return of_node_get(bus->bridge->parent->of_node);
return NULL;
}
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index 77cb2a1..81525ae 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -55,7 +55,7 @@ enum smbios_attr_enum {
SMBIOS_ATTR_INSTANCE_SHOW,
};
-static mode_t
+static size_t
find_smbios_instance_string(struct pci_dev *pdev, char *buf,
enum smbios_attr_enum attribute)
{
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 692671b..4e84fd4 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -77,6 +77,8 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
+
/*
* The default CLS is used if arch didn't set CLS explicitly and not
* all pci devices agree on the same value. Arch can override either
@@ -1905,7 +1907,7 @@ void pci_enable_ari(struct pci_dev *dev)
{
int pos;
u32 cap;
- u16 ctrl;
+ u16 flags, ctrl;
struct pci_dev *bridge;
if (!pci_is_pcie(dev) || dev->devfn)
@@ -1923,6 +1925,11 @@ void pci_enable_ari(struct pci_dev *dev)
if (!pos)
return;
+ /* ARI is a PCIe v2 feature */
+ pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
+ if ((flags & PCI_EXP_FLAGS_VERS) < 2)
+ return;
+
pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
if (!(cap & PCI_EXP_DEVCAP2_ARI))
return;
@@ -3186,7 +3193,7 @@ EXPORT_SYMBOL(pcie_get_readrq);
* @rq: maximum memory read count in bytes
* valid values are 128, 256, 512, 1024, 2048, 4096
*
- * If possible sets maximum read byte count
+ * If possible sets maximum memory read request in bytes
*/
int pcie_set_readrq(struct pci_dev *dev, int rq)
{
@@ -3209,7 +3216,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
ctl &= ~PCI_EXP_DEVCTL_READRQ;
ctl |= v;
- err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl);
+ err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
}
out:
@@ -3218,6 +3225,67 @@ out:
EXPORT_SYMBOL(pcie_set_readrq);
/**
+ * pcie_get_mps - get PCI Express maximum payload size
+ * @dev: PCI device to query
+ *
+ * Returns maximum payload size in bytes
+ * or appropriate error value.
+ */
+int pcie_get_mps(struct pci_dev *dev)
+{
+ int ret, cap;
+ u16 ctl;
+
+ cap = pci_pcie_cap(dev);
+ if (!cap)
+ return -EINVAL;
+
+ ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
+ if (!ret)
+ ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
+
+ return ret;
+}
+
+/**
+ * pcie_set_mps - set PCI Express maximum payload size
+ * @dev: PCI device to query
+ * @mps: maximum payload size in bytes
+ * valid values are 128, 256, 512, 1024, 2048, 4096
+ *
+ * If possible sets maximum payload size
+ */
+int pcie_set_mps(struct pci_dev *dev, int mps)
+{
+ int cap, err = -EINVAL;
+ u16 ctl, v;
+
+ if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
+ goto out;
+
+ v = ffs(mps) - 8;
+ if (v > dev->pcie_mpss)
+ goto out;
+ v <<= 5;
+
+ cap = pci_pcie_cap(dev);
+ if (!cap)
+ goto out;
+
+ err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
+ if (err)
+ goto out;
+
+ if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
+ ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ ctl |= v;
+ err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
+ }
+out:
+ return err;
+}
+
+/**
* pci_select_bars - Make BAR mask from the type of resource
* @dev: the PCI device for which BAR mask is made
* @flags: resource type mask to be selected
@@ -3500,6 +3568,10 @@ static int __init pci_setup(char *str)
pci_hotplug_io_size = memparse(str + 9, &str);
} else if (!strncmp(str, "hpmemsize=", 10)) {
pci_hotplug_mem_size = memparse(str + 10, &str);
+ } else if (!strncmp(str, "pcie_bus_safe", 13)) {
+ pcie_bus_config = PCIE_BUS_SAFE;
+ } else if (!strncmp(str, "pcie_bus_perf", 13)) {
+ pcie_bus_config = PCIE_BUS_PERFORMANCE;
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index c8cee76..b74084e 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -283,6 +283,8 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
#endif /* CONFIG_PCI_IOV */
+extern unsigned long pci_cardbus_resource_alignment(struct resource *);
+
static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
struct resource *res)
{
@@ -292,6 +294,8 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
return pci_sriov_resource_alignment(dev, resno);
#endif
+ if (dev->class >> 8 == PCI_CLASS_BRIDGE_CARDBUS)
+ return pci_cardbus_resource_alignment(res);
return resource_alignment(res);
}
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 43421fb..9674e9f 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -24,6 +24,7 @@
#include <linux/suspend.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/kfifo.h>
#include "aerdrv.h"
static int forceload;
@@ -445,8 +446,7 @@ static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev)
return drv;
}
-static pci_ers_result_t reset_link(struct pcie_device *aerdev,
- struct pci_dev *dev)
+static pci_ers_result_t reset_link(struct pci_dev *dev)
{
struct pci_dev *udev;
pci_ers_result_t status;
@@ -486,7 +486,6 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
/**
* do_recovery - handle nonfatal/fatal error recovery process
- * @aerdev: pointer to a pcie_device data structure of root port
* @dev: pointer to a pci_dev data structure of agent detecting an error
* @severity: error severity type
*
@@ -494,8 +493,7 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
* error detected message to all downstream drivers within a hierarchy in
* question and return the returned code.
*/
-static void do_recovery(struct pcie_device *aerdev, struct pci_dev *dev,
- int severity)
+static void do_recovery(struct pci_dev *dev, int severity)
{
pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
enum pci_channel_state state;
@@ -511,7 +509,7 @@ static void do_recovery(struct pcie_device *aerdev, struct pci_dev *dev,
report_error_detected);
if (severity == AER_FATAL) {
- result = reset_link(aerdev, dev);
+ result = reset_link(dev);
if (result != PCI_ERS_RESULT_RECOVERED)
goto failed;
}
@@ -576,9 +574,73 @@ static void handle_error_source(struct pcie_device *aerdev,
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
info->status);
} else
- do_recovery(aerdev, dev, info->severity);
+ do_recovery(dev, info->severity);
}
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+static void aer_recover_work_func(struct work_struct *work);
+
+#define AER_RECOVER_RING_ORDER 4
+#define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER)
+
+struct aer_recover_entry
+{
+ u8 bus;
+ u8 devfn;
+ u16 domain;
+ int severity;
+};
+
+static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
+ AER_RECOVER_RING_SIZE);
+/*
+ * Mutual exclusion for writers of aer_recover_ring, reader side don't
+ * need lock, because there is only one reader and lock is not needed
+ * between reader and writer.
+ */
+static DEFINE_SPINLOCK(aer_recover_ring_lock);
+static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
+
+void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
+ int severity)
+{
+ unsigned long flags;
+ struct aer_recover_entry entry = {
+ .bus = bus,
+ .devfn = devfn,
+ .domain = domain,
+ .severity = severity,
+ };
+
+ spin_lock_irqsave(&aer_recover_ring_lock, flags);
+ if (kfifo_put(&aer_recover_ring, &entry))
+ schedule_work(&aer_recover_work);
+ else
+ pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
+ domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
+}
+EXPORT_SYMBOL_GPL(aer_recover_queue);
+
+static void aer_recover_work_func(struct work_struct *work)
+{
+ struct aer_recover_entry entry;
+ struct pci_dev *pdev;
+
+ while (kfifo_get(&aer_recover_ring, &entry)) {
+ pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
+ entry.devfn);
+ if (!pdev) {
+ pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
+ entry.domain, entry.bus,
+ PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
+ continue;
+ }
+ do_recovery(pdev, entry.severity);
+ }
+}
+#endif
+
/**
* get_device_error_info - read error status from dev and store it to info
* @dev: pointer to the device expected to have a error record
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index b07a42e..3ea5173 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -204,7 +204,7 @@ void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
-static int cper_severity_to_aer(int cper_severity)
+int cper_severity_to_aer(int cper_severity)
{
switch (cper_severity) {
case CPER_SEV_RECOVERABLE:
@@ -215,6 +215,7 @@ static int cper_severity_to_aer(int cper_severity)
return AER_CORRECTABLE;
}
}
+EXPORT_SYMBOL_GPL(cper_severity_to_aer);
void cper_print_aer(const char *prefix, int cper_severity,
struct aer_capability_regs *aer)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 9ab492f..f3f94a5 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -68,21 +68,6 @@ static int __init pcibus_class_init(void)
}
postcore_initcall(pcibus_class_init);
-/*
- * Translate the low bits of the PCI base
- * to the resource type
- */
-static inline unsigned int pci_calc_resource_flags(unsigned int flags)
-{
- if (flags & PCI_BASE_ADDRESS_SPACE_IO)
- return IORESOURCE_IO;
-
- if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
- return IORESOURCE_MEM | IORESOURCE_PREFETCH;
-
- return IORESOURCE_MEM;
-}
-
static u64 pci_size(u64 base, u64 maxbase, u64 mask)
{
u64 size = mask & maxbase; /* Find the significant bits */
@@ -101,18 +86,39 @@ static u64 pci_size(u64 base, u64 maxbase, u64 mask)
return size;
}
-static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar)
+static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
{
+ u32 mem_type;
+ unsigned long flags;
+
if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
- res->flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
- return pci_bar_io;
+ flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
+ flags |= IORESOURCE_IO;
+ return flags;
}
- res->flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
+ flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
+ flags |= IORESOURCE_MEM;
+ if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
+ flags |= IORESOURCE_PREFETCH;
- if (res->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
- return pci_bar_mem64;
- return pci_bar_mem32;
+ mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
+ switch (mem_type) {
+ case PCI_BASE_ADDRESS_MEM_TYPE_32:
+ break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_1M:
+ dev_info(&dev->dev, "1M mem BAR treated as 32-bit BAR\n");
+ break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_64:
+ flags |= IORESOURCE_MEM_64;
+ break;
+ default:
+ dev_warn(&dev->dev,
+ "mem unknown type %x treated as 32-bit BAR\n",
+ mem_type);
+ break;
+ }
+ return flags;
}
/**
@@ -165,9 +171,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
l = 0;
if (type == pci_bar_unknown) {
- type = decode_bar(res, l);
- res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
- if (type == pci_bar_io) {
+ res->flags = decode_bar(dev, l);
+ res->flags |= IORESOURCE_SIZEALIGN;
+ if (res->flags & IORESOURCE_IO) {
l &= PCI_BASE_ADDRESS_IO_MASK;
mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
} else {
@@ -180,7 +186,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
mask = (u32)PCI_ROM_ADDRESS_MASK;
}
- if (type == pci_bar_mem64) {
+ if (res->flags & IORESOURCE_MEM_64) {
u64 l64 = l;
u64 sz64 = sz;
u64 mask64 = mask | (u64)~0 << 32;
@@ -204,7 +210,6 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
goto fail;
}
- res->flags |= IORESOURCE_MEM_64;
if ((sizeof(resource_size_t) < 8) && l) {
/* Address above 32-bit boundary; disable the BAR */
pci_write_config_dword(dev, pos, 0);
@@ -230,7 +235,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
}
out:
- return (type == pci_bar_mem64) ? 1 : 0;
+ return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
fail:
res->flags = 0;
goto out;
@@ -284,10 +289,6 @@ static void __devinit pci_read_bridge_io(struct pci_bus *child)
if (!res->end)
res->end = limit + 0xfff;
dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
- } else {
- dev_printk(KERN_DEBUG, &dev->dev,
- " bridge window [io %#06lx-%#06lx] (disabled)\n",
- base, limit);
}
}
@@ -308,10 +309,6 @@ static void __devinit pci_read_bridge_mmio(struct pci_bus *child)
res->start = base;
res->end = limit + 0xfffff;
dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
- } else {
- dev_printk(KERN_DEBUG, &dev->dev,
- " bridge window [mem %#010lx-%#010lx] (disabled)\n",
- base, limit + 0xfffff);
}
}
@@ -359,10 +356,6 @@ static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child)
res->start = base;
res->end = limit + 0xfffff;
dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
- } else {
- dev_printk(KERN_DEBUG, &dev->dev,
- " bridge window [mem %#010lx-%#010lx pref] (disabled)\n",
- base, limit + 0xfffff);
}
}
@@ -725,12 +718,14 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
pci_write_config_word(dev, PCI_STATUS, 0xffff);
/* Prevent assigning a bus number that already exists.
- * This can happen when a bridge is hot-plugged */
- if (pci_find_bus(pci_domain_nr(bus), max+1))
- goto out;
- child = pci_add_new_bus(bus, dev, ++max);
- if (!child)
- goto out;
+ * This can happen when a bridge is hot-plugged, so in
+ * this case we only re-scan this bus. */
+ child = pci_find_bus(pci_domain_nr(bus), max+1);
+ if (!child) {
+ child = pci_add_new_bus(bus, dev, ++max);
+ if (!child)
+ goto out;
+ }
buses = (buses & 0xff000000)
| ((unsigned int)(child->primary) << 0)
| ((unsigned int)(child->secondary) << 8)
@@ -861,6 +856,8 @@ void set_pcie_port_type(struct pci_dev *pdev)
pdev->pcie_cap = pos;
pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
+ pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
+ pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
}
void set_pcie_hotplug_bridge(struct pci_dev *pdev)
@@ -1331,6 +1328,151 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
return nr;
}
+static int pcie_find_smpss(struct pci_dev *dev, void *data)
+{
+ u8 *smpss = data;
+
+ if (!pci_is_pcie(dev))
+ return 0;
+
+ /* For PCIE hotplug enabled slots not connected directly to a
+ * PCI-E root port, there can be problems when hotplugging
+ * devices. This is due to the possibility of hotplugging a
+ * device into the fabric with a smaller MPS that the devices
+ * currently running have configured. Modifying the MPS on the
+ * running devices could cause a fatal bus error due to an
+ * incoming frame being larger than the newly configured MPS.
+ * To work around this, the MPS for the entire fabric must be
+ * set to the minimum size. Any devices hotplugged into this
+ * fabric will have the minimum MPS set. If the PCI hotplug
+ * slot is directly connected to the root port and there are not
+ * other devices on the fabric (which seems to be the most
+ * common case), then this is not an issue and MPS discovery
+ * will occur as normal.
+ */
+ if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) ||
+ (dev->bus->self &&
+ dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT)))
+ *smpss = 0;
+
+ if (*smpss > dev->pcie_mpss)
+ *smpss = dev->pcie_mpss;
+
+ return 0;
+}
+
+static void pcie_write_mps(struct pci_dev *dev, int mps)
+{
+ int rc, dev_mpss;
+
+ dev_mpss = 128 << dev->pcie_mpss;
+
+ if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
+ if (dev->bus->self) {
+ dev_dbg(&dev->bus->dev, "Bus MPSS %d\n",
+ 128 << dev->bus->self->pcie_mpss);
+
+ /* For "MPS Force Max", the assumption is made that
+ * downstream communication will never be larger than
+ * the MRRS. So, the MPS only needs to be configured
+ * for the upstream communication. This being the case,
+ * walk from the top down and set the MPS of the child
+ * to that of the parent bus.
+ */
+ mps = 128 << dev->bus->self->pcie_mpss;
+ if (mps > dev_mpss)
+ dev_warn(&dev->dev, "MPS configured higher than"
+ " maximum supported by the device. If"
+ " a bus issue occurs, try running with"
+ " pci=pcie_bus_safe.\n");
+ }
+
+ dev->pcie_mpss = ffs(mps) - 8;
+ }
+
+ rc = pcie_set_mps(dev, mps);
+ if (rc)
+ dev_err(&dev->dev, "Failed attempting to set the MPS\n");
+}
+
+static void pcie_write_mrrs(struct pci_dev *dev, int mps)
+{
+ int rc, mrrs, dev_mpss;
+
+ /* In the "safe" case, do not configure the MRRS. There appear to be
+ * issues with setting MRRS to 0 on a number of devices.
+ */
+
+ if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
+ return;
+
+ dev_mpss = 128 << dev->pcie_mpss;
+
+ /* For Max performance, the MRRS must be set to the largest supported
+ * value. However, it cannot be configured larger than the MPS the
+ * device or the bus can support. This assumes that the largest MRRS
+ * available on the device cannot be smaller than the device MPSS.
+ */
+ mrrs = min(mps, dev_mpss);
+
+ /* MRRS is a R/W register. Invalid values can be written, but a
+ * subsequent read will verify if the value is acceptable or not.
+ * If the MRRS value provided is not acceptable (e.g., too large),
+ * shrink the value until it is acceptable to the HW.
+ */
+ while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
+ dev_warn(&dev->dev, "Attempting to modify the PCI-E MRRS value"
+ " to %d. If any issues are encountered, please try "
+ "running with pci=pcie_bus_safe\n", mrrs);
+ rc = pcie_set_readrq(dev, mrrs);
+ if (rc)
+ dev_err(&dev->dev,
+ "Failed attempting to set the MRRS\n");
+
+ mrrs /= 2;
+ }
+}
+
+static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
+{
+ int mps = 128 << *(u8 *)data;
+
+ if (!pci_is_pcie(dev))
+ return 0;
+
+ dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
+ pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
+
+ pcie_write_mps(dev, mps);
+ pcie_write_mrrs(dev, mps);
+
+ dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
+ pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
+
+ return 0;
+}
+
+/* pcie_bus_configure_mps requires that pci_walk_bus work in a top-down,
+ * parents then children fashion. If this changes, then this code will not
+ * work as designed.
+ */
+void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
+{
+ u8 smpss = mpss;
+
+ if (!pci_is_pcie(bus->self))
+ return;
+
+ if (pcie_bus_config == PCIE_BUS_SAFE) {
+ pcie_find_smpss(bus->self, &smpss);
+ pci_walk_bus(bus, pcie_find_smpss, &smpss);
+ }
+
+ pcie_bus_configure_set(bus->self, &smpss);
+ pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
+}
+EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
+
unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
{
unsigned int devfn, pass, max = bus->secondary;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 9995842..784da9d 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -34,6 +34,7 @@ struct resource_list_x {
resource_size_t start;
resource_size_t end;
resource_size_t add_size;
+ resource_size_t min_align;
unsigned long flags;
};
@@ -65,7 +66,7 @@ void pci_realloc(void)
*/
static void add_to_list(struct resource_list_x *head,
struct pci_dev *dev, struct resource *res,
- resource_size_t add_size)
+ resource_size_t add_size, resource_size_t min_align)
{
struct resource_list_x *list = head;
struct resource_list_x *ln = list->next;
@@ -84,13 +85,16 @@ static void add_to_list(struct resource_list_x *head,
tmp->end = res->end;
tmp->flags = res->flags;
tmp->add_size = add_size;
+ tmp->min_align = min_align;
list->next = tmp;
}
static void add_to_failed_list(struct resource_list_x *head,
struct pci_dev *dev, struct resource *res)
{
- add_to_list(head, dev, res, 0);
+ add_to_list(head, dev, res,
+ 0 /* dont care */,
+ 0 /* dont care */);
}
static void __dev_sort_resources(struct pci_dev *dev,
@@ -121,18 +125,18 @@ static inline void reset_resource(struct resource *res)
}
/**
- * adjust_resources_sorted() - satisfy any additional resource requests
+ * reassign_resources_sorted() - satisfy any additional resource requests
*
- * @add_head : head of the list tracking requests requiring additional
+ * @realloc_head : head of the list tracking requests requiring additional
* resources
* @head : head of the list tracking requests with allocated
* resources
*
- * Walk through each element of the add_head and try to procure
+ * Walk through each element of the realloc_head and try to procure
* additional resources for the element, provided the element
* is in the head list.
*/
-static void adjust_resources_sorted(struct resource_list_x *add_head,
+static void reassign_resources_sorted(struct resource_list_x *realloc_head,
struct resource_list *head)
{
struct resource *res;
@@ -141,8 +145,8 @@ static void adjust_resources_sorted(struct resource_list_x *add_head,
resource_size_t add_size;
int idx;
- prev = add_head;
- for (list = add_head->next; list;) {
+ prev = realloc_head;
+ for (list = realloc_head->next; list;) {
res = list->res;
/* skip resource that has been reset */
if (!res->flags)
@@ -159,13 +163,17 @@ static void adjust_resources_sorted(struct resource_list_x *add_head,
idx = res - &list->dev->resource[0];
add_size=list->add_size;
- if (!resource_size(res) && add_size) {
- res->end = res->start + add_size - 1;
- if(pci_assign_resource(list->dev, idx))
+ if (!resource_size(res)) {
+ res->start = list->start;
+ res->end = res->start + add_size - 1;
+ if(pci_assign_resource(list->dev, idx))
reset_resource(res);
- } else if (add_size) {
- adjust_resource(res, res->start,
- resource_size(res) + add_size);
+ } else {
+ resource_size_t align = list->min_align;
+ res->flags |= list->flags & (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
+ if (pci_reassign_resource(list->dev, idx, add_size, align))
+ dev_printk(KERN_DEBUG, &list->dev->dev, "failed to add optional resources res=%pR\n",
+ res);
}
out:
tmp = list;
@@ -210,16 +218,16 @@ static void assign_requested_resources_sorted(struct resource_list *head,
}
static void __assign_resources_sorted(struct resource_list *head,
- struct resource_list_x *add_head,
+ struct resource_list_x *realloc_head,
struct resource_list_x *fail_head)
{
/* Satisfy the must-have resource requests */
assign_requested_resources_sorted(head, fail_head);
- /* Try to satisfy any additional nice-to-have resource
+ /* Try to satisfy any additional optional resource
requests */
- if (add_head)
- adjust_resources_sorted(add_head, head);
+ if (realloc_head)
+ reassign_resources_sorted(realloc_head, head);
free_list(resource_list, head);
}
@@ -235,7 +243,7 @@ static void pdev_assign_resources_sorted(struct pci_dev *dev,
}
static void pbus_assign_resources_sorted(const struct pci_bus *bus,
- struct resource_list_x *add_head,
+ struct resource_list_x *realloc_head,
struct resource_list_x *fail_head)
{
struct pci_dev *dev;
@@ -245,7 +253,7 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus,
list_for_each_entry(dev, &bus->devices, bus_list)
__dev_sort_resources(dev, &head);
- __assign_resources_sorted(&head, add_head, fail_head);
+ __assign_resources_sorted(&head, realloc_head, fail_head);
}
void pci_setup_cardbus(struct pci_bus *bus)
@@ -336,7 +344,6 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
/* Clear upper 16 bits of I/O base/limit. */
io_upper16 = 0;
l = 0x00f0;
- dev_info(&bridge->dev, " bridge window [io disabled]\n");
}
/* Temporarily disable the I/O range before updating PCI_IO_BASE. */
pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
@@ -362,7 +369,6 @@ static void pci_setup_bridge_mmio(struct pci_bus *bus)
dev_info(&bridge->dev, " bridge window %pR\n", res);
} else {
l = 0x0000fff0;
- dev_info(&bridge->dev, " bridge window [mem disabled]\n");
}
pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
}
@@ -393,7 +399,6 @@ static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
dev_info(&bridge->dev, " bridge window %pR\n", res);
} else {
l = 0x0000fff0;
- dev_info(&bridge->dev, " bridge window [mem pref disabled]\n");
}
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
@@ -543,13 +548,27 @@ static resource_size_t calculate_memsize(resource_size_t size,
return size;
}
+static resource_size_t get_res_add_size(struct resource_list_x *realloc_head,
+ struct resource *res)
+{
+ struct resource_list_x *list;
+
+ /* check if it is in realloc_head list */
+ for (list = realloc_head->next; list && list->res != res;
+ list = list->next);
+ if (list)
+ return list->add_size;
+
+ return 0;
+}
+
/**
* pbus_size_io() - size the io window of a given bus
*
* @bus : the bus
* @min_size : the minimum io window that must to be allocated
* @add_size : additional optional io window
- * @add_head : track the additional io window on this list
+ * @realloc_head : track the additional io window on this list
*
* Sizing the IO windows of the PCI-PCI bridge is trivial,
* since these windows have 4K granularity and the IO ranges
@@ -557,11 +576,12 @@ static resource_size_t calculate_memsize(resource_size_t size,
* We must be careful with the ISA aliasing though.
*/
static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
- resource_size_t add_size, struct resource_list_x *add_head)
+ resource_size_t add_size, struct resource_list_x *realloc_head)
{
struct pci_dev *dev;
struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
unsigned long size = 0, size0 = 0, size1 = 0;
+ resource_size_t children_add_size = 0;
if (!b_res)
return;
@@ -582,11 +602,16 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
size += r_size;
else
size1 += r_size;
+
+ if (realloc_head)
+ children_add_size += get_res_add_size(realloc_head, r);
}
}
size0 = calculate_iosize(size, min_size, size1,
resource_size(b_res), 4096);
- size1 = (!add_head || (add_head && !add_size)) ? size0 :
+ if (children_add_size > add_size)
+ add_size = children_add_size;
+ size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
calculate_iosize(size, min_size+add_size, size1,
resource_size(b_res), 4096);
if (!size0 && !size1) {
@@ -601,8 +626,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
b_res->start = 4096;
b_res->end = b_res->start + size0 - 1;
b_res->flags |= IORESOURCE_STARTALIGN;
- if (size1 > size0 && add_head)
- add_to_list(add_head, bus->self, b_res, size1-size0);
+ if (size1 > size0 && realloc_head)
+ add_to_list(realloc_head, bus->self, b_res, size1-size0, 4096);
}
/**
@@ -611,7 +636,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
* @bus : the bus
* @min_size : the minimum memory window that must to be allocated
* @add_size : additional optional memory window
- * @add_head : track the additional memory window on this list
+ * @realloc_head : track the additional memory window on this list
*
* Calculate the size of the bus and minimal alignment which
* guarantees that all child resources fit in this size.
@@ -619,7 +644,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
unsigned long type, resource_size_t min_size,
resource_size_t add_size,
- struct resource_list_x *add_head)
+ struct resource_list_x *realloc_head)
{
struct pci_dev *dev;
resource_size_t min_align, align, size, size0, size1;
@@ -627,6 +652,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
int order, max_order;
struct resource *b_res = find_free_bus_resource(bus, type);
unsigned int mem64_mask = 0;
+ resource_size_t children_add_size = 0;
if (!b_res)
return 0;
@@ -648,6 +674,16 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
if (r->parent || (r->flags & mask) != type)
continue;
r_size = resource_size(r);
+#ifdef CONFIG_PCI_IOV
+ /* put SRIOV requested res to the optional list */
+ if (realloc_head && i >= PCI_IOV_RESOURCES &&
+ i <= PCI_IOV_RESOURCE_END) {
+ r->end = r->start - 1;
+ add_to_list(realloc_head, dev, r, r_size, 0/* dont' care */);
+ children_add_size += r_size;
+ continue;
+ }
+#endif
/* For bridges size != alignment */
align = pci_resource_alignment(dev, r);
order = __ffs(align) - 20;
@@ -668,6 +704,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
if (order > max_order)
max_order = order;
mem64_mask &= r->flags & IORESOURCE_MEM_64;
+
+ if (realloc_head)
+ children_add_size += get_res_add_size(realloc_head, r);
}
}
align = 0;
@@ -684,7 +723,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
align += aligns[order];
}
size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
- size1 = (!add_head || (add_head && !add_size)) ? size0 :
+ if (children_add_size > add_size)
+ add_size = children_add_size;
+ size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
calculate_memsize(size, min_size+add_size, 0,
resource_size(b_res), min_align);
if (!size0 && !size1) {
@@ -698,12 +739,22 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
b_res->start = min_align;
b_res->end = size0 + min_align - 1;
b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask;
- if (size1 > size0 && add_head)
- add_to_list(add_head, bus->self, b_res, size1-size0);
+ if (size1 > size0 && realloc_head)
+ add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align);
return 1;
}
-static void pci_bus_size_cardbus(struct pci_bus *bus)
+unsigned long pci_cardbus_resource_alignment(struct resource *res)
+{
+ if (res->flags & IORESOURCE_IO)
+ return pci_cardbus_io_size;
+ if (res->flags & IORESOURCE_MEM)
+ return pci_cardbus_mem_size;
+ return 0;
+}
+
+static void pci_bus_size_cardbus(struct pci_bus *bus,
+ struct resource_list_x *realloc_head)
{
struct pci_dev *bridge = bus->self;
struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
@@ -714,12 +765,14 @@ static void pci_bus_size_cardbus(struct pci_bus *bus)
* a fixed amount of bus space for CardBus bridges.
*/
b_res[0].start = 0;
- b_res[0].end = pci_cardbus_io_size - 1;
b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
+ if (realloc_head)
+ add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size, 0 /* dont care */);
b_res[1].start = 0;
- b_res[1].end = pci_cardbus_io_size - 1;
b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
+ if (realloc_head)
+ add_to_list(realloc_head, bridge, b_res+1, pci_cardbus_io_size, 0 /* dont care */);
/*
* Check whether prefetchable memory is supported
@@ -739,21 +792,31 @@ static void pci_bus_size_cardbus(struct pci_bus *bus)
*/
if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
b_res[2].start = 0;
- b_res[2].end = pci_cardbus_mem_size - 1;
b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN;
+ if (realloc_head)
+ add_to_list(realloc_head, bridge, b_res+2, pci_cardbus_mem_size, 0 /* dont care */);
b_res[3].start = 0;
- b_res[3].end = pci_cardbus_mem_size - 1;
b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
+ if (realloc_head)
+ add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size, 0 /* dont care */);
} else {
b_res[3].start = 0;
- b_res[3].end = pci_cardbus_mem_size * 2 - 1;
b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
+ if (realloc_head)
+ add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size * 2, 0 /* dont care */);
}
+
+ /* set the size of the resource to zero, so that the resource does not
+ * get assigned during required-resource allocation cycle but gets assigned
+ * during the optional-resource allocation cycle.
+ */
+ b_res[0].start = b_res[1].start = b_res[2].start = b_res[3].start = 1;
+ b_res[0].end = b_res[1].end = b_res[2].end = b_res[3].end = 0;
}
void __ref __pci_bus_size_bridges(struct pci_bus *bus,
- struct resource_list_x *add_head)
+ struct resource_list_x *realloc_head)
{
struct pci_dev *dev;
unsigned long mask, prefmask;
@@ -766,12 +829,12 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus,
switch (dev->class >> 8) {
case PCI_CLASS_BRIDGE_CARDBUS:
- pci_bus_size_cardbus(b);
+ pci_bus_size_cardbus(b, realloc_head);
break;
case PCI_CLASS_BRIDGE_PCI:
default:
- __pci_bus_size_bridges(b, add_head);
+ __pci_bus_size_bridges(b, realloc_head);
break;
}
}
@@ -795,7 +858,7 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus,
* Follow thru
*/
default:
- pbus_size_io(bus, 0, additional_io_size, add_head);
+ pbus_size_io(bus, 0, additional_io_size, realloc_head);
/* If the bridge supports prefetchable range, size it
separately. If it doesn't, or its prefetchable window
has already been allocated by arch code, try
@@ -803,11 +866,11 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus,
resources. */
mask = IORESOURCE_MEM;
prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
- if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, add_head))
+ if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, realloc_head))
mask = prefmask; /* Success, size non-prefetch only. */
else
additional_mem_size += additional_mem_size;
- pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, add_head);
+ pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, realloc_head);
break;
}
}
@@ -819,20 +882,20 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
EXPORT_SYMBOL(pci_bus_size_bridges);
static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
- struct resource_list_x *add_head,
+ struct resource_list_x *realloc_head,
struct resource_list_x *fail_head)
{
struct pci_bus *b;
struct pci_dev *dev;
- pbus_assign_resources_sorted(bus, add_head, fail_head);
+ pbus_assign_resources_sorted(bus, realloc_head, fail_head);
list_for_each_entry(dev, &bus->devices, bus_list) {
b = dev->subordinate;
if (!b)
continue;
- __pci_bus_assign_resources(b, add_head, fail_head);
+ __pci_bus_assign_resources(b, realloc_head, fail_head);
switch (dev->class >> 8) {
case PCI_CLASS_BRIDGE_PCI:
@@ -1042,7 +1105,7 @@ void __init
pci_assign_unassigned_resources(void)
{
struct pci_bus *bus;
- struct resource_list_x add_list; /* list of resources that
+ struct resource_list_x realloc_list; /* list of resources that
want additional resources */
int tried_times = 0;
enum release_type rel_type = leaf_only;
@@ -1055,7 +1118,7 @@ pci_assign_unassigned_resources(void)
head.next = NULL;
- add_list.next = NULL;
+ realloc_list.next = NULL;
pci_try_num = max_depth + 1;
printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n",
@@ -1065,12 +1128,12 @@ again:
/* Depth first, calculate sizes and alignments of all
subordinate buses. */
list_for_each_entry(bus, &pci_root_buses, node)
- __pci_bus_size_bridges(bus, &add_list);
+ __pci_bus_size_bridges(bus, &realloc_list);
/* Depth last, allocate resources and update the hardware. */
list_for_each_entry(bus, &pci_root_buses, node)
- __pci_bus_assign_resources(bus, &add_list, &head);
- BUG_ON(add_list.next);
+ __pci_bus_assign_resources(bus, &realloc_list, &head);
+ BUG_ON(realloc_list.next);
tried_times++;
/* any device complain? */
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c
index eec9738..eb219a1 100644
--- a/drivers/pci/setup-irq.c
+++ b/drivers/pci/setup-irq.c
@@ -21,7 +21,7 @@
static void __init
pdev_fixup_irq(struct pci_dev *dev,
u8 (*swizzle)(struct pci_dev *, u8 *),
- int (*map_irq)(struct pci_dev *, u8, u8))
+ int (*map_irq)(const struct pci_dev *, u8, u8))
{
u8 pin, slot;
int irq = 0;
@@ -56,7 +56,7 @@ pdev_fixup_irq(struct pci_dev *dev,
void __init
pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *),
- int (*map_irq)(struct pci_dev *, u8, u8))
+ int (*map_irq)(const struct pci_dev *, u8, u8))
{
struct pci_dev *dev = NULL;
for_each_pci_dev(dev)
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index bc0e6ee..51a9095 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -74,8 +74,7 @@ void pci_update_resource(struct pci_dev *dev, int resno)
resno, new, check);
}
- if ((new & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
- (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) {
+ if (res->flags & IORESOURCE_MEM_64) {
new = region.start >> 16 >> 16;
pci_write_config_dword(dev, reg + 4, new);
pci_read_config_dword(dev, reg + 4, &check);
@@ -129,16 +128,16 @@ void pci_disable_bridge_window(struct pci_dev *dev)
}
#endif /* CONFIG_PCI_QUIRKS */
+
+
static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
- int resno)
+ int resno, resource_size_t size, resource_size_t align)
{
struct resource *res = dev->resource + resno;
- resource_size_t size, min, align;
+ resource_size_t min;
int ret;
- size = resource_size(res);
min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
- align = pci_resource_alignment(dev, res);
/* First, try exact prefetching match.. */
ret = pci_bus_alloc_resource(bus, res, size, align, min,
@@ -155,56 +154,101 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
ret = pci_bus_alloc_resource(bus, res, size, align, min, 0,
pcibios_align_resource, dev);
}
+ return ret;
+}
- if (ret < 0 && dev->fw_addr[resno]) {
- struct resource *root, *conflict;
- resource_size_t start, end;
+static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
+ int resno, resource_size_t size)
+{
+ struct resource *root, *conflict;
+ resource_size_t start, end;
+ int ret = 0;
- /*
- * If we failed to assign anything, let's try the address
- * where firmware left it. That at least has a chance of
- * working, which is better than just leaving it disabled.
- */
+ if (res->flags & IORESOURCE_IO)
+ root = &ioport_resource;
+ else
+ root = &iomem_resource;
+
+ start = res->start;
+ end = res->end;
+ res->start = dev->fw_addr[resno];
+ res->end = res->start + size - 1;
+ dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n",
+ resno, res);
+ conflict = request_resource_conflict(root, res);
+ if (conflict) {
+ dev_info(&dev->dev,
+ "BAR %d: %pR conflicts with %s %pR\n", resno,
+ res, conflict->name, conflict);
+ res->start = start;
+ res->end = end;
+ ret = 1;
+ }
+ return ret;
+}
+
+static int _pci_assign_resource(struct pci_dev *dev, int resno, int size, resource_size_t min_align)
+{
+ struct resource *res = dev->resource + resno;
+ struct pci_bus *bus;
+ int ret;
+ char *type;
- if (res->flags & IORESOURCE_IO)
- root = &ioport_resource;
+ bus = dev->bus;
+ while ((ret = __pci_assign_resource(bus, dev, resno, size, min_align))) {
+ if (!bus->parent || !bus->self->transparent)
+ break;
+ bus = bus->parent;
+ }
+
+ if (ret) {
+ if (res->flags & IORESOURCE_MEM)
+ if (res->flags & IORESOURCE_PREFETCH)
+ type = "mem pref";
+ else
+ type = "mem";
+ else if (res->flags & IORESOURCE_IO)
+ type = "io";
else
- root = &iomem_resource;
-
- start = res->start;
- end = res->end;
- res->start = dev->fw_addr[resno];
- res->end = res->start + size - 1;
- dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n",
- resno, res);
- conflict = request_resource_conflict(root, res);
- if (conflict) {
- dev_info(&dev->dev,
- "BAR %d: %pR conflicts with %s %pR\n", resno,
- res, conflict->name, conflict);
- res->start = start;
- res->end = end;
- } else
- ret = 0;
+ type = "unknown";
+ dev_info(&dev->dev,
+ "BAR %d: can't assign %s (size %#llx)\n",
+ resno, type, (unsigned long long) resource_size(res));
}
+ return ret;
+}
+
+int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize,
+ resource_size_t min_align)
+{
+ struct resource *res = dev->resource + resno;
+ resource_size_t new_size;
+ int ret;
+
+ if (!res->parent) {
+ dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resouce %pR "
+ "\n", resno, res);
+ return -EINVAL;
+ }
+
+ new_size = resource_size(res) + addsize + min_align;
+ ret = _pci_assign_resource(dev, resno, new_size, min_align);
if (!ret) {
res->flags &= ~IORESOURCE_STARTALIGN;
dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
if (resno < PCI_BRIDGE_RESOURCES)
pci_update_resource(dev, resno);
}
-
return ret;
}
int pci_assign_resource(struct pci_dev *dev, int resno)
{
struct resource *res = dev->resource + resno;
- resource_size_t align;
+ resource_size_t align, size;
struct pci_bus *bus;
int ret;
- char *type;
align = pci_resource_alignment(dev, res);
if (!align) {
@@ -214,34 +258,27 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
}
bus = dev->bus;
- while ((ret = __pci_assign_resource(bus, dev, resno))) {
- if (bus->parent && bus->self->transparent)
- bus = bus->parent;
- else
- bus = NULL;
- if (bus)
- continue;
- break;
- }
+ size = resource_size(res);
+ ret = _pci_assign_resource(dev, resno, size, align);
- if (ret) {
- if (res->flags & IORESOURCE_MEM)
- if (res->flags & IORESOURCE_PREFETCH)
- type = "mem pref";
- else
- type = "mem";
- else if (res->flags & IORESOURCE_IO)
- type = "io";
- else
- type = "unknown";
- dev_info(&dev->dev,
- "BAR %d: can't assign %s (size %#llx)\n",
- resno, type, (unsigned long long) resource_size(res));
- }
+ /*
+ * If we failed to assign anything, let's try the address
+ * where firmware left it. That at least has a chance of
+ * working, which is better than just leaving it disabled.
+ */
+ if (ret < 0 && dev->fw_addr[resno])
+ ret = pci_revert_fw_address(res, dev, resno, size);
+ if (!ret) {
+ res->flags &= ~IORESOURCE_STARTALIGN;
+ dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
+ if (resno < PCI_BRIDGE_RESOURCES)
+ pci_update_resource(dev, resno);
+ }
return ret;
}
+
/* Sort resources by alignment */
void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
{
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 492b7d8..6fa215a 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -16,7 +16,7 @@
#include <xen/interface/io/pciif.h>
#include <asm/xen/pci.h>
#include <linux/interrupt.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/time.h>
diff --git a/drivers/pcmcia/pxa2xx_balloon3.c b/drivers/pcmcia/pxa2xx_balloon3.c
index 4c3e94c..f56d7de 100644
--- a/drivers/pcmcia/pxa2xx_balloon3.c
+++ b/drivers/pcmcia/pxa2xx_balloon3.c
@@ -103,22 +103,12 @@ static int balloon3_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void balloon3_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void balloon3_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level balloon3_pcmcia_ops = {
.owner = THIS_MODULE,
.hw_init = balloon3_pcmcia_hw_init,
.hw_shutdown = balloon3_pcmcia_hw_shutdown,
.socket_state = balloon3_pcmcia_socket_state,
.configure_socket = balloon3_pcmcia_configure_socket,
- .socket_init = balloon3_pcmcia_socket_init,
- .socket_suspend = balloon3_pcmcia_socket_suspend,
.first = 0,
.nr = 1,
};
diff --git a/drivers/pcmcia/pxa2xx_cm_x255.c b/drivers/pcmcia/pxa2xx_cm_x255.c
index 05913d0..63f4d52 100644
--- a/drivers/pcmcia/pxa2xx_cm_x255.c
+++ b/drivers/pcmcia/pxa2xx_cm_x255.c
@@ -102,23 +102,12 @@ static int cmx255_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void cmx255_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void cmx255_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
-
static struct pcmcia_low_level cmx255_pcmcia_ops __initdata = {
.owner = THIS_MODULE,
.hw_init = cmx255_pcmcia_hw_init,
.hw_shutdown = cmx255_pcmcia_shutdown,
.socket_state = cmx255_pcmcia_socket_state,
.configure_socket = cmx255_pcmcia_configure_socket,
- .socket_init = cmx255_pcmcia_socket_init,
- .socket_suspend = cmx255_pcmcia_socket_suspend,
.nr = 1,
};
diff --git a/drivers/pcmcia/pxa2xx_cm_x270.c b/drivers/pcmcia/pxa2xx_cm_x270.c
index 5662646..6ee42b4 100644
--- a/drivers/pcmcia/pxa2xx_cm_x270.c
+++ b/drivers/pcmcia/pxa2xx_cm_x270.c
@@ -82,23 +82,12 @@ static int cmx270_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void cmx270_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void cmx270_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
-
static struct pcmcia_low_level cmx270_pcmcia_ops __initdata = {
.owner = THIS_MODULE,
.hw_init = cmx270_pcmcia_hw_init,
.hw_shutdown = cmx270_pcmcia_shutdown,
.socket_state = cmx270_pcmcia_socket_state,
.configure_socket = cmx270_pcmcia_configure_socket,
- .socket_init = cmx270_pcmcia_socket_init,
- .socket_suspend = cmx270_pcmcia_socket_suspend,
.nr = 1,
};
diff --git a/drivers/pcmcia/pxa2xx_colibri.c b/drivers/pcmcia/pxa2xx_colibri.c
index 443cb7f..c6dec57 100644
--- a/drivers/pcmcia/pxa2xx_colibri.c
+++ b/drivers/pcmcia/pxa2xx_colibri.c
@@ -116,14 +116,6 @@ colibri_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void colibri_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void colibri_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level colibri_pcmcia_ops = {
.owner = THIS_MODULE,
@@ -135,9 +127,6 @@ static struct pcmcia_low_level colibri_pcmcia_ops = {
.socket_state = colibri_pcmcia_socket_state,
.configure_socket = colibri_pcmcia_configure_socket,
-
- .socket_init = colibri_pcmcia_socket_init,
- .socket_suspend = colibri_pcmcia_socket_suspend,
};
static struct platform_device *colibri_pcmcia_device;
diff --git a/drivers/pcmcia/pxa2xx_mainstone.c b/drivers/pcmcia/pxa2xx_mainstone.c
index 92016fe..aded706c 100644
--- a/drivers/pcmcia/pxa2xx_mainstone.c
+++ b/drivers/pcmcia/pxa2xx_mainstone.c
@@ -128,22 +128,12 @@ static int mst_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return ret;
}
-static void mst_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void mst_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level mst_pcmcia_ops __initdata = {
.owner = THIS_MODULE,
.hw_init = mst_pcmcia_hw_init,
.hw_shutdown = mst_pcmcia_hw_shutdown,
.socket_state = mst_pcmcia_socket_state,
.configure_socket = mst_pcmcia_configure_socket,
- .socket_init = mst_pcmcia_socket_init,
- .socket_suspend = mst_pcmcia_socket_suspend,
.nr = 2,
};
diff --git a/drivers/pcmcia/pxa2xx_palmld.c b/drivers/pcmcia/pxa2xx_palmld.c
index 69f7367..d589ad1 100644
--- a/drivers/pcmcia/pxa2xx_palmld.c
+++ b/drivers/pcmcia/pxa2xx_palmld.c
@@ -65,14 +65,6 @@ static int palmld_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void palmld_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void palmld_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level palmld_pcmcia_ops = {
.owner = THIS_MODULE,
@@ -84,9 +76,6 @@ static struct pcmcia_low_level palmld_pcmcia_ops = {
.socket_state = palmld_pcmcia_socket_state,
.configure_socket = palmld_pcmcia_configure_socket,
-
- .socket_init = palmld_pcmcia_socket_init,
- .socket_suspend = palmld_pcmcia_socket_suspend,
};
static struct platform_device *palmld_pcmcia_device;
diff --git a/drivers/pcmcia/pxa2xx_palmtc.c b/drivers/pcmcia/pxa2xx_palmtc.c
index d0ad6a7..9c6a04b 100644
--- a/drivers/pcmcia/pxa2xx_palmtc.c
+++ b/drivers/pcmcia/pxa2xx_palmtc.c
@@ -117,14 +117,6 @@ static int palmtc_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return ret;
}
-static void palmtc_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void palmtc_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level palmtc_pcmcia_ops = {
.owner = THIS_MODULE,
@@ -136,9 +128,6 @@ static struct pcmcia_low_level palmtc_pcmcia_ops = {
.socket_state = palmtc_pcmcia_socket_state,
.configure_socket = palmtc_pcmcia_configure_socket,
-
- .socket_init = palmtc_pcmcia_socket_init,
- .socket_suspend = palmtc_pcmcia_socket_suspend,
};
static struct platform_device *palmtc_pcmcia_device;
diff --git a/drivers/pcmcia/pxa2xx_palmtx.c b/drivers/pcmcia/pxa2xx_palmtx.c
index 1a25804..80645a6 100644
--- a/drivers/pcmcia/pxa2xx_palmtx.c
+++ b/drivers/pcmcia/pxa2xx_palmtx.c
@@ -67,14 +67,6 @@ palmtx_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void palmtx_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void palmtx_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level palmtx_pcmcia_ops = {
.owner = THIS_MODULE,
@@ -86,9 +78,6 @@ static struct pcmcia_low_level palmtx_pcmcia_ops = {
.socket_state = palmtx_pcmcia_socket_state,
.configure_socket = palmtx_pcmcia_configure_socket,
-
- .socket_init = palmtx_pcmcia_socket_init,
- .socket_suspend = palmtx_pcmcia_socket_suspend,
};
static struct platform_device *palmtx_pcmcia_device;
diff --git a/drivers/pcmcia/pxa2xx_stargate2.c b/drivers/pcmcia/pxa2xx_stargate2.c
index d08802f..9396222 100644
--- a/drivers/pcmcia/pxa2xx_stargate2.c
+++ b/drivers/pcmcia/pxa2xx_stargate2.c
@@ -28,7 +28,6 @@
#include "soc_common.h"
-#define SG2_S0_BUFF_CTL 120
#define SG2_S0_POWER_CTL 108
#define SG2_S0_GPIO_RESET 82
#define SG2_S0_GPIO_DETECT 53
@@ -38,6 +37,11 @@ static struct pcmcia_irqs irqs[] = {
{ 0, IRQ_GPIO(SG2_S0_GPIO_DETECT), "PCMCIA0 CD" },
};
+static struct gpio sg2_pcmcia_gpios[] = {
+ { SG2_S0_GPIO_RESET, GPIOF_OUT_INIT_HIGH, "PCMCIA Reset" },
+ { SG2_S0_POWER_CTL, GPIOF_OUT_INIT_HIGH, "PCMCIA Power Ctrl" },
+};
+
static int sg2_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
skt->socket.pci_irq = IRQ_GPIO(SG2_S0_GPIO_READY);
@@ -122,37 +126,23 @@ static int __init sg2_pcmcia_init(void)
if (!sg2_pcmcia_device)
return -ENOMEM;
- ret = gpio_request(SG2_S0_BUFF_CTL, "SG2 CF buff ctl");
+ ret = gpio_request_array(sg2_pcmcia_gpios, ARRAY_SIZE(sg2_pcmcia_gpios));
if (ret)
goto error_put_platform_device;
- ret = gpio_request(SG2_S0_POWER_CTL, "SG2 CF power ctl");
- if (ret)
- goto error_free_gpio_buff_ctl;
- ret = gpio_request(SG2_S0_GPIO_RESET, "SG2 CF reset");
- if (ret)
- goto error_free_gpio_power_ctl;
- /* Set gpio directions */
- gpio_direction_output(SG2_S0_BUFF_CTL, 0);
- gpio_direction_output(SG2_S0_POWER_CTL, 1);
- gpio_direction_output(SG2_S0_GPIO_RESET, 1);
ret = platform_device_add_data(sg2_pcmcia_device,
&sg2_pcmcia_ops,
sizeof(sg2_pcmcia_ops));
if (ret)
- goto error_free_gpio_reset;
+ goto error_free_gpios;
ret = platform_device_add(sg2_pcmcia_device);
if (ret)
- goto error_free_gpio_reset;
+ goto error_free_gpios;
return 0;
-error_free_gpio_reset:
- gpio_free(SG2_S0_GPIO_RESET);
-error_free_gpio_power_ctl:
- gpio_free(SG2_S0_POWER_CTL);
-error_free_gpio_buff_ctl:
- gpio_free(SG2_S0_BUFF_CTL);
+error_free_gpios:
+ gpio_free_array(sg2_pcmcia_gpios, ARRAY_SIZE(sg2_pcmcia_gpios));
error_put_platform_device:
platform_device_put(sg2_pcmcia_device);
@@ -162,9 +152,7 @@ error_put_platform_device:
static void __exit sg2_pcmcia_exit(void)
{
platform_device_unregister(sg2_pcmcia_device);
- gpio_free(SG2_S0_BUFF_CTL);
- gpio_free(SG2_S0_POWER_CTL);
- gpio_free(SG2_S0_GPIO_RESET);
+ gpio_free_array(sg2_pcmcia_gpios, ARRAY_SIZE(sg2_pcmcia_gpios));
}
fs_initcall(sg2_pcmcia_init);
diff --git a/drivers/pcmcia/pxa2xx_viper.c b/drivers/pcmcia/pxa2xx_viper.c
index a51f207..1064b1c 100644
--- a/drivers/pcmcia/pxa2xx_viper.c
+++ b/drivers/pcmcia/pxa2xx_viper.c
@@ -136,22 +136,12 @@ static int viper_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void viper_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void viper_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level viper_pcmcia_ops = {
.owner = THIS_MODULE,
.hw_init = viper_pcmcia_hw_init,
.hw_shutdown = viper_pcmcia_hw_shutdown,
.socket_state = viper_pcmcia_socket_state,
.configure_socket = viper_pcmcia_configure_socket,
- .socket_init = viper_pcmcia_socket_init,
- .socket_suspend = viper_pcmcia_socket_suspend,
.nr = 1,
};
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index 768f957..a0a9c2a 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -186,8 +186,8 @@ static int soc_common_pcmcia_sock_init(struct pcmcia_socket *sock)
struct soc_pcmcia_socket *skt = to_soc_pcmcia_socket(sock);
debug(skt, 2, "initializing socket\n");
-
- skt->ops->socket_init(skt);
+ if (skt->ops->socket_init)
+ skt->ops->socket_init(skt);
return 0;
}
@@ -207,7 +207,8 @@ static int soc_common_pcmcia_suspend(struct pcmcia_socket *sock)
debug(skt, 2, "suspending socket\n");
- skt->ops->socket_suspend(skt);
+ if (skt->ops->socket_suspend)
+ skt->ops->socket_suspend(skt);
return 0;
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 45e0191..1e88d47 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -769,4 +769,12 @@ config INTEL_OAKTRAIL
enable/disable the Camera, WiFi, BT etc. devices. If in doubt, say Y
here; it will only load on supported platforms.
+config SAMSUNG_Q10
+ tristate "Samsung Q10 Extras"
+ depends on SERIO_I8042
+ select BACKLIGHT_CLASS_DEVICE
+ ---help---
+ This driver provides support for backlight control on Samsung Q10
+ and related laptops, including Dell Latitude X200.
+
endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index afc1f83..293a320 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -44,3 +44,4 @@ obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
obj-$(CONFIG_MXM_WMI) += mxm-wmi.o
obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o
obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
+obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index e1c4938..af2bb20 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -99,6 +99,7 @@ enum acer_wmi_event_ids {
static const struct key_entry acer_wmi_keymap[] = {
{KE_KEY, 0x01, {KEY_WLAN} }, /* WiFi */
{KE_KEY, 0x03, {KEY_WLAN} }, /* WiFi */
+ {KE_KEY, 0x04, {KEY_WLAN} }, /* WiFi */
{KE_KEY, 0x12, {KEY_BLUETOOTH} }, /* BT */
{KE_KEY, 0x21, {KEY_PROG1} }, /* Backup */
{KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */
@@ -304,6 +305,10 @@ static struct quirk_entry quirk_fujitsu_amilo_li_1718 = {
.wireless = 2,
};
+static struct quirk_entry quirk_lenovo_ideapad_s205 = {
+ .wireless = 3,
+};
+
/* The Aspire One has a dummy ACPI-WMI interface - disable it */
static struct dmi_system_id __devinitdata acer_blacklist[] = {
{
@@ -450,6 +455,15 @@ static struct dmi_system_id acer_quirks[] = {
},
.driver_data = &quirk_medion_md_98300,
},
+ {
+ .callback = dmi_matched,
+ .ident = "Lenovo Ideapad S205",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "10382LG"),
+ },
+ .driver_data = &quirk_lenovo_ideapad_s205,
+ },
{}
};
@@ -542,6 +556,12 @@ struct wmi_interface *iface)
return AE_ERROR;
*value = result & 0x1;
return AE_OK;
+ case 3:
+ err = ec_read(0x78, &result);
+ if (err)
+ return AE_ERROR;
+ *value = result & 0x1;
+ return AE_OK;
default:
err = ec_read(0xA, &result);
if (err)
@@ -1266,8 +1286,13 @@ static void acer_rfkill_update(struct work_struct *ignored)
acpi_status status;
status = get_u32(&state, ACER_CAP_WIRELESS);
- if (ACPI_SUCCESS(status))
- rfkill_set_sw_state(wireless_rfkill, !state);
+ if (ACPI_SUCCESS(status)) {
+ if (quirks->wireless == 3) {
+ rfkill_set_hw_state(wireless_rfkill, !state);
+ } else {
+ rfkill_set_sw_state(wireless_rfkill, !state);
+ }
+ }
if (has_cap(ACER_CAP_BLUETOOTH)) {
status = get_u32(&state, ACER_CAP_BLUETOOTH);
@@ -1400,6 +1425,9 @@ static ssize_t show_bool_threeg(struct device *dev,
{
u32 result; \
acpi_status status;
+
+ pr_info("This threeg sysfs will be removed in 2012"
+ " - used by: %s\n", current->comm);
if (wmi_has_guid(WMID_GUID3))
status = wmid3_get_device_status(&result,
ACER_WMID3_GDS_THREEG);
@@ -1415,8 +1443,10 @@ static ssize_t set_bool_threeg(struct device *dev,
{
u32 tmp = simple_strtoul(buf, NULL, 10);
acpi_status status = set_u32(tmp, ACER_CAP_THREEG);
- if (ACPI_FAILURE(status))
- return -EINVAL;
+ pr_info("This threeg sysfs will be removed in 2012"
+ " - used by: %s\n", current->comm);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
return count;
}
static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg,
@@ -1425,6 +1455,8 @@ static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg,
static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
char *buf)
{
+ pr_info("This interface sysfs will be removed in 2012"
+ " - used by: %s\n", current->comm);
switch (interface->type) {
case ACER_AMW0:
return sprintf(buf, "AMW0\n");
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index fca3489..760c6d7 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -182,6 +182,7 @@ static const struct bios_settings_t bios_tbl[] = {
{"Acer", "Aspire 1810T", "v1.3308", 0x55, 0x58, {0x9e, 0x00} },
{"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
{"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
+ {"Acer", "Aspire 1810TZ", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
/* Acer 531 */
{"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00} },
/* Gateway */
@@ -703,15 +704,15 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Peter Feuerer");
MODULE_DESCRIPTION("Aspire One temperature and fan driver");
MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:");
-MODULE_ALIAS("dmi:*:*Acer*:pnAspire 1410*:");
-MODULE_ALIAS("dmi:*:*Acer*:pnAspire 1810*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1410*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1810*:");
MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:");
MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:");
MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:");
-MODULE_ALIAS("dmi:*:*Packard Bell*:pnAOA*:");
-MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOA*:");
-MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOTMU*:");
-MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOTMA*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnAOA*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOA*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMU*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMA*:");
module_init(acerhdf_init);
module_exit(acerhdf_exit);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index d65df92..fa6d7ec 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -70,11 +70,10 @@ MODULE_LICENSE("GPL");
* WAPF defines the behavior of the Fn+Fx wlan key
* The significance of values is yet to be found, but
* most of the time:
- * 0x0 will do nothing
- * 0x1 will allow to control the device with Fn+Fx key.
- * 0x4 will send an ACPI event (0x88) while pressing the Fn+Fx key
- * 0x5 like 0x1 or 0x4
- * So, if something doesn't work as you want, just try other values =)
+ * Bit | Bluetooth | WLAN
+ * 0 | Hardware | Hardware
+ * 1 | Hardware | Software
+ * 4 | Software | Software
*/
static uint wapf = 1;
module_param(wapf, uint, 0444);
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 0580d99..b0859d4 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -38,6 +38,24 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("wmi:"ASUS_NB_WMI_EVENT_GUID);
+/*
+ * WAPF defines the behavior of the Fn+Fx wlan key
+ * The significance of values is yet to be found, but
+ * most of the time:
+ * Bit | Bluetooth | WLAN
+ * 0 | Hardware | Hardware
+ * 1 | Hardware | Software
+ * 4 | Software | Software
+ */
+static uint wapf;
+module_param(wapf, uint, 0444);
+MODULE_PARM_DESC(wapf, "WAPF value");
+
+static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
+{
+ driver->wapf = wapf;
+}
+
static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
@@ -53,16 +71,16 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x51, { KEY_WWW } },
{ KE_KEY, 0x55, { KEY_CALC } },
{ KE_KEY, 0x5C, { KEY_F15 } }, /* Power Gear key */
- { KE_KEY, 0x5D, { KEY_WLAN } },
- { KE_KEY, 0x5E, { KEY_WLAN } },
- { KE_KEY, 0x5F, { KEY_WLAN } },
+ { KE_KEY, 0x5D, { KEY_WLAN } }, /* Wireless console Toggle */
+ { KE_KEY, 0x5E, { KEY_WLAN } }, /* Wireless console Enable */
+ { KE_KEY, 0x5F, { KEY_WLAN } }, /* Wireless console Disable */
{ KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } },
- { KE_KEY, 0x7E, { KEY_BLUETOOTH } },
{ KE_KEY, 0x7D, { KEY_BLUETOOTH } },
+ { KE_KEY, 0x7E, { KEY_BLUETOOTH } },
{ KE_KEY, 0x82, { KEY_CAMERA } },
{ KE_KEY, 0x88, { KEY_RFKILL } },
{ KE_KEY, 0x8A, { KEY_PROG1 } },
@@ -81,6 +99,7 @@ static struct asus_wmi_driver asus_nb_wmi_driver = {
.keymap = asus_nb_wmi_keymap,
.input_name = "Asus WMI hotkeys",
.input_phys = ASUS_NB_WMI_FILE "/input0",
+ .quirks = asus_nb_wmi_quirks,
};
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 65b66aa..95cba9e 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -44,6 +44,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/platform_device.h>
+#include <linux/thermal.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -66,6 +67,8 @@ MODULE_LICENSE("GPL");
#define NOTIFY_BRNUP_MAX 0x1f
#define NOTIFY_BRNDOWN_MIN 0x20
#define NOTIFY_BRNDOWN_MAX 0x2e
+#define NOTIFY_KBD_BRTUP 0xc4
+#define NOTIFY_KBD_BRTDWN 0xc5
/* WMI Methods */
#define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */
@@ -93,6 +96,7 @@ MODULE_LICENSE("GPL");
/* Wireless */
#define ASUS_WMI_DEVID_HW_SWITCH 0x00010001
#define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002
+#define ASUS_WMI_DEVID_CWAP 0x00010003
#define ASUS_WMI_DEVID_WLAN 0x00010011
#define ASUS_WMI_DEVID_BLUETOOTH 0x00010013
#define ASUS_WMI_DEVID_GPS 0x00010015
@@ -102,6 +106,12 @@ MODULE_LICENSE("GPL");
/* Leds */
/* 0x000200XX and 0x000400XX */
+#define ASUS_WMI_DEVID_LED1 0x00020011
+#define ASUS_WMI_DEVID_LED2 0x00020012
+#define ASUS_WMI_DEVID_LED3 0x00020013
+#define ASUS_WMI_DEVID_LED4 0x00020014
+#define ASUS_WMI_DEVID_LED5 0x00020015
+#define ASUS_WMI_DEVID_LED6 0x00020016
/* Backlight and Brightness */
#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011
@@ -174,13 +184,18 @@ struct asus_wmi {
struct led_classdev tpd_led;
int tpd_led_wk;
+ struct led_classdev kbd_led;
+ int kbd_led_wk;
struct workqueue_struct *led_workqueue;
struct work_struct tpd_led_work;
+ struct work_struct kbd_led_work;
struct asus_rfkill wlan;
struct asus_rfkill bluetooth;
struct asus_rfkill wimax;
struct asus_rfkill wwan3g;
+ struct asus_rfkill gps;
+ struct asus_rfkill uwb;
struct hotplug_slot *hotplug_slot;
struct mutex hotplug_lock;
@@ -205,6 +220,7 @@ static int asus_wmi_input_init(struct asus_wmi *asus)
asus->inputdev->phys = asus->driver->input_phys;
asus->inputdev->id.bustype = BUS_HOST;
asus->inputdev->dev.parent = &asus->platform_device->dev;
+ set_bit(EV_REP, asus->inputdev->evbit);
err = sparse_keymap_setup(asus->inputdev, asus->driver->keymap, NULL);
if (err)
@@ -359,30 +375,80 @@ static enum led_brightness tpd_led_get(struct led_classdev *led_cdev)
return read_tpd_led_state(asus);
}
-static int asus_wmi_led_init(struct asus_wmi *asus)
+static void kbd_led_update(struct work_struct *work)
{
- int rv;
+ int ctrl_param = 0;
+ struct asus_wmi *asus;
- if (read_tpd_led_state(asus) < 0)
- return 0;
+ asus = container_of(work, struct asus_wmi, kbd_led_work);
- asus->led_workqueue = create_singlethread_workqueue("led_workqueue");
- if (!asus->led_workqueue)
- return -ENOMEM;
- INIT_WORK(&asus->tpd_led_work, tpd_led_update);
+ /*
+ * bits 0-2: level
+ * bit 7: light on/off
+ */
+ if (asus->kbd_led_wk > 0)
+ ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
- asus->tpd_led.name = "asus::touchpad";
- asus->tpd_led.brightness_set = tpd_led_set;
- asus->tpd_led.brightness_get = tpd_led_get;
- asus->tpd_led.max_brightness = 1;
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL);
+}
- rv = led_classdev_register(&asus->platform_device->dev, &asus->tpd_led);
- if (rv) {
- destroy_workqueue(asus->led_workqueue);
- return rv;
+static int kbd_led_read(struct asus_wmi *asus, int *level, int *env)
+{
+ int retval;
+
+ /*
+ * bits 0-2: level
+ * bit 7: light on/off
+ * bit 8-10: environment (0: dark, 1: normal, 2: light)
+ * bit 17: status unknown
+ */
+ retval = asus_wmi_get_devstate_bits(asus, ASUS_WMI_DEVID_KBD_BACKLIGHT,
+ 0xFFFF);
+
+ /* Unknown status is considered as off */
+ if (retval == 0x8000)
+ retval = 0;
+
+ if (retval >= 0) {
+ if (level)
+ *level = retval & 0x80 ? retval & 0x7F : 0;
+ if (env)
+ *env = (retval >> 8) & 0x7F;
+ retval = 0;
}
- return 0;
+ return retval;
+}
+
+static void kbd_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct asus_wmi *asus;
+
+ asus = container_of(led_cdev, struct asus_wmi, kbd_led);
+
+ if (value > asus->kbd_led.max_brightness)
+ value = asus->kbd_led.max_brightness;
+ else if (value < 0)
+ value = 0;
+
+ asus->kbd_led_wk = value;
+ queue_work(asus->led_workqueue, &asus->kbd_led_work);
+}
+
+static enum led_brightness kbd_led_get(struct led_classdev *led_cdev)
+{
+ struct asus_wmi *asus;
+ int retval, value;
+
+ asus = container_of(led_cdev, struct asus_wmi, kbd_led);
+
+ retval = kbd_led_read(asus, &value, NULL);
+
+ if (retval < 0)
+ return retval;
+
+ return value;
}
static void asus_wmi_led_exit(struct asus_wmi *asus)
@@ -393,6 +459,48 @@ static void asus_wmi_led_exit(struct asus_wmi *asus)
destroy_workqueue(asus->led_workqueue);
}
+static int asus_wmi_led_init(struct asus_wmi *asus)
+{
+ int rv = 0;
+
+ asus->led_workqueue = create_singlethread_workqueue("led_workqueue");
+ if (!asus->led_workqueue)
+ return -ENOMEM;
+
+ if (read_tpd_led_state(asus) >= 0) {
+ INIT_WORK(&asus->tpd_led_work, tpd_led_update);
+
+ asus->tpd_led.name = "asus::touchpad";
+ asus->tpd_led.brightness_set = tpd_led_set;
+ asus->tpd_led.brightness_get = tpd_led_get;
+ asus->tpd_led.max_brightness = 1;
+
+ rv = led_classdev_register(&asus->platform_device->dev,
+ &asus->tpd_led);
+ if (rv)
+ goto error;
+ }
+
+ if (kbd_led_read(asus, NULL, NULL) >= 0) {
+ INIT_WORK(&asus->kbd_led_work, kbd_led_update);
+
+ asus->kbd_led.name = "asus::kbd_backlight";
+ asus->kbd_led.brightness_set = kbd_led_set;
+ asus->kbd_led.brightness_get = kbd_led_get;
+ asus->kbd_led.max_brightness = 3;
+
+ rv = led_classdev_register(&asus->platform_device->dev,
+ &asus->kbd_led);
+ }
+
+error:
+ if (rv)
+ asus_wmi_led_exit(asus);
+
+ return rv;
+}
+
+
/*
* PCI hotplug (for wlan rfkill)
*/
@@ -729,6 +837,16 @@ static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
rfkill_destroy(asus->wwan3g.rfkill);
asus->wwan3g.rfkill = NULL;
}
+ if (asus->gps.rfkill) {
+ rfkill_unregister(asus->gps.rfkill);
+ rfkill_destroy(asus->gps.rfkill);
+ asus->gps.rfkill = NULL;
+ }
+ if (asus->uwb.rfkill) {
+ rfkill_unregister(asus->uwb.rfkill);
+ rfkill_destroy(asus->uwb.rfkill);
+ asus->uwb.rfkill = NULL;
+ }
}
static int asus_wmi_rfkill_init(struct asus_wmi *asus)
@@ -763,6 +881,18 @@ static int asus_wmi_rfkill_init(struct asus_wmi *asus)
if (result && result != -ENODEV)
goto exit;
+ result = asus_new_rfkill(asus, &asus->gps, "asus-gps",
+ RFKILL_TYPE_GPS, ASUS_WMI_DEVID_GPS);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = asus_new_rfkill(asus, &asus->uwb, "asus-uwb",
+ RFKILL_TYPE_UWB, ASUS_WMI_DEVID_UWB);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
if (!asus->driver->hotplug_wireless)
goto exit;
@@ -797,8 +927,8 @@ exit:
* Hwmon device
*/
static ssize_t asus_hwmon_pwm1(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
u32 value;
@@ -809,7 +939,7 @@ static ssize_t asus_hwmon_pwm1(struct device *dev,
if (err < 0)
return err;
- value |= 0xFF;
+ value &= 0xFF;
if (value == 1) /* Low Speed */
value = 85;
@@ -825,7 +955,26 @@ static ssize_t asus_hwmon_pwm1(struct device *dev,
return sprintf(buf, "%d\n", value);
}
+static ssize_t asus_hwmon_temp1(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ u32 value;
+ int err;
+
+ err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_THERMAL_CTRL, &value);
+
+ if (err < 0)
+ return err;
+
+ value = KELVIN_TO_CELSIUS((value & 0xFFFF)) * 1000;
+
+ return sprintf(buf, "%d\n", value);
+}
+
static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL, 0);
static ssize_t
show_name(struct device *dev, struct device_attribute *attr, char *buf)
@@ -836,12 +985,13 @@ static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_pwm1.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_name.dev_attr.attr,
NULL
};
static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
- struct attribute *attr, int idx)
+ struct attribute *attr, int idx)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct platform_device *pdev = to_platform_device(dev->parent);
@@ -852,6 +1002,8 @@ static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
if (attr == &sensor_dev_attr_pwm1.dev_attr.attr)
dev_id = ASUS_WMI_DEVID_FAN_CTRL;
+ else if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr)
+ dev_id = ASUS_WMI_DEVID_THERMAL_CTRL;
if (dev_id != -1) {
int err = asus_wmi_get_devstate(asus, dev_id, &value);
@@ -869,9 +1021,13 @@ static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
* - reverved bits are non-zero
* - sfun and presence bit are not set
*/
- if (value != ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000
+ if (value == ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000
|| (!asus->sfun && !(value & ASUS_WMI_DSTS_PRESENCE_BIT)))
ok = false;
+ } else if (dev_id == ASUS_WMI_DEVID_THERMAL_CTRL) {
+ /* If value is zero, something is clearly wrong */
+ if (value == 0)
+ ok = false;
}
return ok ? attr->mode : 0;
@@ -904,6 +1060,7 @@ static int asus_wmi_hwmon_init(struct asus_wmi *asus)
pr_err("Could not register asus hwmon device\n");
return PTR_ERR(hwmon);
}
+ dev_set_drvdata(hwmon, asus);
asus->hwmon_device = hwmon;
result = sysfs_create_group(&hwmon->kobj, &hwmon_attribute_group);
if (result)
@@ -1060,6 +1217,8 @@ static void asus_wmi_notify(u32 value, void *context)
acpi_status status;
int code;
int orig_code;
+ unsigned int key_value = 1;
+ bool autorelease = 1;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
@@ -1075,6 +1234,13 @@ static void asus_wmi_notify(u32 value, void *context)
code = obj->integer.value;
orig_code = code;
+ if (asus->driver->key_filter) {
+ asus->driver->key_filter(asus->driver, &code, &key_value,
+ &autorelease);
+ if (code == ASUS_WMI_KEY_IGNORE)
+ goto exit;
+ }
+
if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
code = NOTIFY_BRNUP_MIN;
else if (code >= NOTIFY_BRNDOWN_MIN &&
@@ -1084,7 +1250,8 @@ static void asus_wmi_notify(u32 value, void *context)
if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) {
if (!acpi_video_backlight_support())
asus_wmi_backlight_notify(asus, orig_code);
- } else if (!sparse_keymap_report_event(asus->inputdev, code, 1, true))
+ } else if (!sparse_keymap_report_event(asus->inputdev, code,
+ key_value, autorelease))
pr_info("Unknown key %x pressed\n", code);
exit:
@@ -1164,14 +1331,18 @@ ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER);
static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int value;
+ int value, rv;
if (!count || sscanf(buf, "%i", &value) != 1)
return -EINVAL;
if (value < 0 || value > 2)
return -EINVAL;
- return asus_wmi_evaluate_method(ASUS_WMI_METHODID_CFVS, value, 0, NULL);
+ rv = asus_wmi_evaluate_method(ASUS_WMI_METHODID_CFVS, value, 0, NULL);
+ if (rv < 0)
+ return rv;
+
+ return count;
}
static DEVICE_ATTR(cpufv, S_IRUGO | S_IWUSR, NULL, store_cpufv);
@@ -1234,7 +1405,7 @@ static int asus_wmi_platform_init(struct asus_wmi *asus)
/* We don't know yet what to do with this version... */
if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SPEC, 0, 0x9, &rv)) {
- pr_info("BIOS WMI version: %d.%d", rv >> 8, rv & 0xFF);
+ pr_info("BIOS WMI version: %d.%d", rv >> 16, rv & 0xFF);
asus->spec = rv;
}
@@ -1266,6 +1437,12 @@ static int asus_wmi_platform_init(struct asus_wmi *asus)
return -ENODEV;
}
+ /* CWAP allow to define the behavior of the Fn+F2 key,
+ * this method doesn't seems to be present on Eee PCs */
+ if (asus->driver->wapf >= 0)
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_CWAP,
+ asus->driver->wapf, NULL);
+
return asus_wmi_sysfs_init(asus->platform_device);
}
@@ -1568,6 +1745,14 @@ static int asus_hotk_restore(struct device *device)
bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WWAN3G);
rfkill_set_sw_state(asus->wwan3g.rfkill, bl);
}
+ if (asus->gps.rfkill) {
+ bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPS);
+ rfkill_set_sw_state(asus->gps.rfkill, bl);
+ }
+ if (asus->uwb.rfkill) {
+ bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_UWB);
+ rfkill_set_sw_state(asus->uwb.rfkill, bl);
+ }
return 0;
}
@@ -1604,7 +1789,7 @@ static int asus_wmi_probe(struct platform_device *pdev)
static bool used;
-int asus_wmi_register_driver(struct asus_wmi_driver *driver)
+int __init_or_module asus_wmi_register_driver(struct asus_wmi_driver *driver)
{
struct platform_driver *platform_driver;
struct platform_device *platform_device;
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index c044522..8147c10 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -29,12 +29,15 @@
#include <linux/platform_device.h>
+#define ASUS_WMI_KEY_IGNORE (-1)
+
struct module;
struct key_entry;
struct asus_wmi;
struct asus_wmi_driver {
bool hotplug_wireless;
+ int wapf;
const char *name;
struct module *owner;
@@ -44,6 +47,10 @@ struct asus_wmi_driver {
const struct key_entry *keymap;
const char *input_name;
const char *input_phys;
+ /* Returns new code, value, and autorelease values in arguments.
+ * Return ASUS_WMI_KEY_IGNORE in code if event should be ignored. */
+ void (*key_filter) (struct asus_wmi_driver *driver, int *code,
+ unsigned int *value, bool *autorelease);
int (*probe) (struct platform_device *device);
void (*quirks) (struct asus_wmi_driver *driver);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index e39ab1d..f31fa4e 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -612,7 +612,6 @@ static int __init dell_init(void)
if (!bufferpage)
goto fail_buffer;
buffer = page_address(bufferpage);
- mutex_init(&buffer_mutex);
ret = dell_setup_rfkill();
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index ce79082..fa9a217 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -54,6 +54,8 @@ MODULE_ALIAS("wmi:"DELL_EVENT_GUID);
*/
static const struct key_entry dell_wmi_legacy_keymap[] __initconst = {
+ { KE_IGNORE, 0x003a, { KEY_CAPSLOCK } },
+
{ KE_KEY, 0xe045, { KEY_PROG1 } },
{ KE_KEY, 0xe009, { KEY_EJECTCD } },
@@ -85,6 +87,11 @@ static const struct key_entry dell_wmi_legacy_keymap[] __initconst = {
{ KE_IGNORE, 0xe013, { KEY_RESERVED } },
{ KE_IGNORE, 0xe020, { KEY_MUTE } },
+
+ /* Shortcut and audio panel keys */
+ { KE_IGNORE, 0xe025, { KEY_RESERVED } },
+ { KE_IGNORE, 0xe026, { KEY_RESERVED } },
+
{ KE_IGNORE, 0xe02e, { KEY_VOLUMEDOWN } },
{ KE_IGNORE, 0xe030, { KEY_VOLUMEUP } },
{ KE_IGNORE, 0xe033, { KEY_KBDILLUMUP } },
@@ -92,6 +99,9 @@ static const struct key_entry dell_wmi_legacy_keymap[] __initconst = {
{ KE_IGNORE, 0xe03a, { KEY_CAPSLOCK } },
{ KE_IGNORE, 0xe045, { KEY_NUMLOCK } },
{ KE_IGNORE, 0xe046, { KEY_SCROLLLOCK } },
+ { KE_IGNORE, 0xe0f7, { KEY_MUTE } },
+ { KE_IGNORE, 0xe0f8, { KEY_VOLUMEDOWN } },
+ { KE_IGNORE, 0xe0f9, { KEY_VOLUMEUP } },
{ KE_END, 0 }
};
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 4aa867a..9f6e643 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -56,6 +56,11 @@ MODULE_PARM_DESC(hotplug_wireless,
"If your laptop needs that, please report to "
"acpi4asus-user@lists.sourceforge.net.");
+/* Values for T101MT "Home" key */
+#define HOME_PRESS 0xe4
+#define HOME_HOLD 0xea
+#define HOME_RELEASE 0xe5
+
static const struct key_entry eeepc_wmi_keymap[] = {
/* Sleep already handled via generic ACPI code */
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
@@ -71,6 +76,7 @@ static const struct key_entry eeepc_wmi_keymap[] = {
{ KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */
{ KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */
+ { KE_KEY, HOME_PRESS, { KEY_CONFIG } }, /* Home/Express gate key */
{ KE_KEY, 0xe8, { KEY_SCREENLOCK } },
{ KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } },
{ KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } },
@@ -81,6 +87,25 @@ static const struct key_entry eeepc_wmi_keymap[] = {
{ KE_END, 0},
};
+static void eeepc_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code,
+ unsigned int *value, bool *autorelease)
+{
+ switch (*code) {
+ case HOME_PRESS:
+ *value = 1;
+ *autorelease = 0;
+ break;
+ case HOME_HOLD:
+ *code = ASUS_WMI_KEY_IGNORE;
+ break;
+ case HOME_RELEASE:
+ *code = HOME_PRESS;
+ *value = 0;
+ *autorelease = 0;
+ break;
+ }
+}
+
static acpi_status eeepc_wmi_parse_device(acpi_handle handle, u32 level,
void *context, void **retval)
{
@@ -141,6 +166,7 @@ static void eeepc_dmi_check(struct asus_wmi_driver *driver)
static void eeepc_wmi_quirks(struct asus_wmi_driver *driver)
{
driver->hotplug_wireless = hotplug_wireless;
+ driver->wapf = -1;
eeepc_dmi_check(driver);
}
@@ -151,6 +177,7 @@ static struct asus_wmi_driver asus_wmi_driver = {
.keymap = eeepc_wmi_keymap,
.input_name = "Eee PC WMI hotkeys",
.input_phys = EEEPC_WMI_FILE "/input0",
+ .key_filter = eeepc_wmi_key_filter,
.probe = eeepc_wmi_probe,
.quirks = eeepc_wmi_quirks,
};
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index bfdda33..0c59541 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -32,13 +32,22 @@
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
#define IDEAPAD_RFKILL_DEV_NUM (3)
+#define CFG_BT_BIT (16)
+#define CFG_3G_BIT (17)
+#define CFG_WIFI_BIT (18)
+#define CFG_CAMERA_BIT (19)
+
struct ideapad_private {
struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM];
struct platform_device *platform_device;
struct input_dev *inputdev;
+ struct backlight_device *blightdev;
+ unsigned long cfg;
};
static acpi_handle ideapad_handle;
@@ -155,7 +164,7 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data)
}
/*
- * camera power
+ * sysfs
*/
static ssize_t show_ideapad_cam(struct device *dev,
struct device_attribute *attr,
@@ -186,6 +195,44 @@ static ssize_t store_ideapad_cam(struct device *dev,
static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam);
+static ssize_t show_ideapad_cfg(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%.8lX\n", priv->cfg);
+}
+
+static DEVICE_ATTR(cfg, 0444, show_ideapad_cfg, NULL);
+
+static struct attribute *ideapad_attributes[] = {
+ &dev_attr_camera_power.attr,
+ &dev_attr_cfg.attr,
+ NULL
+};
+
+static mode_t ideapad_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int idx)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ bool supported;
+
+ if (attr == &dev_attr_camera_power.attr)
+ supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg));
+ else
+ supported = true;
+
+ return supported ? attr->mode : 0;
+}
+
+static struct attribute_group ideapad_attribute_group = {
+ .is_visible = ideapad_is_visible,
+ .attrs = ideapad_attributes
+};
+
/*
* Rfkill
*/
@@ -197,9 +244,9 @@ struct ideapad_rfk_data {
};
const struct ideapad_rfk_data ideapad_rfk_data[] = {
- { "ideapad_wlan", 18, 0x15, RFKILL_TYPE_WLAN },
- { "ideapad_bluetooth", 16, 0x17, RFKILL_TYPE_BLUETOOTH },
- { "ideapad_3g", 17, 0x20, RFKILL_TYPE_WWAN },
+ { "ideapad_wlan", CFG_WIFI_BIT, 0x15, RFKILL_TYPE_WLAN },
+ { "ideapad_bluetooth", CFG_BT_BIT, 0x17, RFKILL_TYPE_BLUETOOTH },
+ { "ideapad_3g", CFG_3G_BIT, 0x20, RFKILL_TYPE_WWAN },
};
static int ideapad_rfk_set(void *data, bool blocked)
@@ -265,8 +312,7 @@ static int __devinit ideapad_register_rfkill(struct acpi_device *adevice,
return 0;
}
-static void __devexit ideapad_unregister_rfkill(struct acpi_device *adevice,
- int dev)
+static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev)
{
struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
@@ -280,15 +326,6 @@ static void __devexit ideapad_unregister_rfkill(struct acpi_device *adevice,
/*
* Platform device
*/
-static struct attribute *ideapad_attributes[] = {
- &dev_attr_camera_power.attr,
- NULL
-};
-
-static struct attribute_group ideapad_attribute_group = {
- .attrs = ideapad_attributes
-};
-
static int __devinit ideapad_platform_init(struct ideapad_private *priv)
{
int result;
@@ -369,7 +406,7 @@ err_free_dev:
return error;
}
-static void __devexit ideapad_input_exit(struct ideapad_private *priv)
+static void ideapad_input_exit(struct ideapad_private *priv)
{
sparse_keymap_free(priv->inputdev);
input_unregister_device(priv->inputdev);
@@ -383,6 +420,98 @@ static void ideapad_input_report(struct ideapad_private *priv,
}
/*
+ * backlight
+ */
+static int ideapad_backlight_get_brightness(struct backlight_device *blightdev)
+{
+ unsigned long now;
+
+ if (read_ec_data(ideapad_handle, 0x12, &now))
+ return -EIO;
+ return now;
+}
+
+static int ideapad_backlight_update_status(struct backlight_device *blightdev)
+{
+ if (write_ec_cmd(ideapad_handle, 0x13, blightdev->props.brightness))
+ return -EIO;
+ if (write_ec_cmd(ideapad_handle, 0x33,
+ blightdev->props.power == FB_BLANK_POWERDOWN ? 0 : 1))
+ return -EIO;
+
+ return 0;
+}
+
+static const struct backlight_ops ideapad_backlight_ops = {
+ .get_brightness = ideapad_backlight_get_brightness,
+ .update_status = ideapad_backlight_update_status,
+};
+
+static int ideapad_backlight_init(struct ideapad_private *priv)
+{
+ struct backlight_device *blightdev;
+ struct backlight_properties props;
+ unsigned long max, now, power;
+
+ if (read_ec_data(ideapad_handle, 0x11, &max))
+ return -EIO;
+ if (read_ec_data(ideapad_handle, 0x12, &now))
+ return -EIO;
+ if (read_ec_data(ideapad_handle, 0x18, &power))
+ return -EIO;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = max;
+ props.type = BACKLIGHT_PLATFORM;
+ blightdev = backlight_device_register("ideapad",
+ &priv->platform_device->dev,
+ priv,
+ &ideapad_backlight_ops,
+ &props);
+ if (IS_ERR(blightdev)) {
+ pr_err("Could not register backlight device\n");
+ return PTR_ERR(blightdev);
+ }
+
+ priv->blightdev = blightdev;
+ blightdev->props.brightness = now;
+ blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+ backlight_update_status(blightdev);
+
+ return 0;
+}
+
+static void ideapad_backlight_exit(struct ideapad_private *priv)
+{
+ if (priv->blightdev)
+ backlight_device_unregister(priv->blightdev);
+ priv->blightdev = NULL;
+}
+
+static void ideapad_backlight_notify_power(struct ideapad_private *priv)
+{
+ unsigned long power;
+ struct backlight_device *blightdev = priv->blightdev;
+
+ if (read_ec_data(ideapad_handle, 0x18, &power))
+ return;
+ blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+}
+
+static void ideapad_backlight_notify_brightness(struct ideapad_private *priv)
+{
+ unsigned long now;
+
+ /* if we control brightness via acpi video driver */
+ if (priv->blightdev == NULL) {
+ read_ec_data(ideapad_handle, 0x12, &now);
+ return;
+ }
+
+ backlight_force_update(priv->blightdev, BACKLIGHT_UPDATE_HOTKEY);
+}
+
+/*
* module init/exit
*/
static const struct acpi_device_id ideapad_device_ids[] = {
@@ -393,10 +522,11 @@ MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
{
- int ret, i, cfg;
+ int ret, i;
+ unsigned long cfg;
struct ideapad_private *priv;
- if (read_method_int(adevice->handle, "_CFG", &cfg))
+ if (read_method_int(adevice->handle, "_CFG", (int *)&cfg))
return -ENODEV;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -404,6 +534,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
return -ENOMEM;
dev_set_drvdata(&adevice->dev, priv);
ideapad_handle = adevice->handle;
+ priv->cfg = cfg;
ret = ideapad_platform_init(priv);
if (ret)
@@ -414,15 +545,25 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
goto input_failed;
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
- if (test_bit(ideapad_rfk_data[i].cfgbit, (unsigned long *)&cfg))
+ if (test_bit(ideapad_rfk_data[i].cfgbit, &cfg))
ideapad_register_rfkill(adevice, i);
else
priv->rfk[i] = NULL;
}
ideapad_sync_rfk_state(adevice);
+ if (!acpi_video_backlight_support()) {
+ ret = ideapad_backlight_init(priv);
+ if (ret && ret != -ENODEV)
+ goto backlight_failed;
+ }
+
return 0;
+backlight_failed:
+ for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
+ ideapad_unregister_rfkill(adevice, i);
+ ideapad_input_exit(priv);
input_failed:
ideapad_platform_exit(priv);
platform_failed:
@@ -435,6 +576,7 @@ static int __devexit ideapad_acpi_remove(struct acpi_device *adevice, int type)
struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
int i;
+ ideapad_backlight_exit(priv);
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
ideapad_unregister_rfkill(adevice, i);
ideapad_input_exit(priv);
@@ -459,12 +601,19 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
vpc1 = (vpc2 << 8) | vpc1;
for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) {
if (test_bit(vpc_bit, &vpc1)) {
- if (vpc_bit == 9)
+ switch (vpc_bit) {
+ case 9:
ideapad_sync_rfk_state(adevice);
- else if (vpc_bit == 4)
- read_ec_data(handle, 0x12, &vpc2);
- else
+ break;
+ case 4:
+ ideapad_backlight_notify_brightness(priv);
+ break;
+ case 2:
+ ideapad_backlight_notify_power(priv);
+ break;
+ default:
ideapad_input_report(priv, vpc_bit);
+ }
}
}
}
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 5ffe7c3..809a3ae 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -403,7 +403,7 @@ static void ips_cpu_raise(struct ips_driver *ips)
thm_writew(THM_MPCPC, (new_tdp_limit * 10) / 8);
- turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDC_OVR_EN;
+ turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN;
wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
turbo_override &= ~TURBO_TDP_MASK;
@@ -438,7 +438,7 @@ static void ips_cpu_lower(struct ips_driver *ips)
thm_writew(THM_MPCPC, (new_limit * 10) / 8);
- turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDC_OVR_EN;
+ turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN;
wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
turbo_override &= ~TURBO_TDP_MASK;
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index 809adea..abddc83 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -477,6 +477,8 @@ static acpi_status intel_menlow_register_sensor(acpi_handle handle, u32 lvl,
return AE_ERROR;
}
+ return AE_OK;
+
aux1_not_found:
if (status == AE_NOT_FOUND)
return AE_OK;
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 3a57832..ccd7b1f 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -493,20 +493,30 @@ static int mid_thermal_probe(struct platform_device *pdev)
/* Register each sensor with the generic thermal framework*/
for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
+ struct thermal_device_info *td_info = initialize_sensor(i);
+
+ if (!td_info) {
+ ret = -ENOMEM;
+ goto err;
+ }
pinfo->tzd[i] = thermal_zone_device_register(name[i],
- 0, initialize_sensor(i), &tzd_ops, 0, 0, 0, 0);
- if (IS_ERR(pinfo->tzd[i]))
- goto reg_fail;
+ 0, td_info, &tzd_ops, 0, 0, 0, 0);
+ if (IS_ERR(pinfo->tzd[i])) {
+ kfree(td_info);
+ ret = PTR_ERR(pinfo->tzd[i]);
+ goto err;
+ }
}
pinfo->pdev = pdev;
platform_set_drvdata(pdev, pinfo);
return 0;
-reg_fail:
- ret = PTR_ERR(pinfo->tzd[i]);
- while (--i >= 0)
+err:
+ while (--i >= 0) {
+ kfree(pinfo->tzd[i]->devdata);
thermal_zone_device_unregister(pinfo->tzd[i]);
+ }
configure_adc(0);
kfree(pinfo);
return ret;
@@ -524,8 +534,10 @@ static int mid_thermal_remove(struct platform_device *pdev)
int i;
struct platform_info *pinfo = platform_get_drvdata(pdev);
- for (i = 0; i < MSIC_THERMAL_SENSORS; i++)
+ for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
+ kfree(pinfo->tzd[i]->devdata);
thermal_zone_device_unregister(pinfo->tzd[i]);
+ }
kfree(pinfo);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/platform/x86/intel_rar_register.c b/drivers/platform/x86/intel_rar_register.c
index bde47e9..c8a6aed 100644
--- a/drivers/platform/x86/intel_rar_register.c
+++ b/drivers/platform/x86/intel_rar_register.c
@@ -637,15 +637,13 @@ end_function:
return error;
}
-const struct pci_device_id rar_pci_id_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rar_pci_id_tbl) = {
{ PCI_VDEVICE(INTEL, 0x4110) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, rar_pci_id_tbl);
-const struct pci_device_id *my_id_table = rar_pci_id_tbl;
-
/* field for registering driver to PCI device */
static struct pci_driver rar_pci_driver = {
.name = "rar_register_driver",
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 940accb..c866653 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -725,7 +725,7 @@ static void ipc_remove(struct pci_dev *pdev)
intel_scu_devices_destroy();
}
-static const struct pci_device_id pci_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080e)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x082a)},
{ 0,}
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 3ff629d..f204643 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -538,6 +538,15 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
},
.callback = dmi_check_cb
},
+ {
+ .ident = "MSI U270",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "Micro-Star International Co., Ltd."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U270 series"),
+ },
+ .callback = dmi_check_cb
+ },
{ }
};
@@ -996,3 +1005,4 @@ MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N034:*");
MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N051:*");
MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N014:*");
MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnCR620:*");
+MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnU270series:*");
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index c832e33..6f40bf2 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -272,6 +272,7 @@ static int __init msi_wmi_init(void)
err_free_backlight:
backlight_device_unregister(backlight);
err_free_input:
+ sparse_keymap_free(msi_wmi_input_dev);
input_unregister_device(msi_wmi_input_dev);
err_uninstall_notifier:
wmi_remove_notify_handler(MSIWMI_EVENT_GUID);
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index d347116..3591630 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -521,6 +521,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
.callback = dmi_check_cb,
},
{
+ .ident = "N510",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N510"),
+ DMI_MATCH(DMI_BOARD_NAME, "N510"),
+ },
+ .callback = dmi_check_cb,
+ },
+ {
.ident = "X125",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
@@ -601,6 +611,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
.callback = dmi_check_cb,
},
{
+ .ident = "N150/N210/N220",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
+ DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
+ },
+ .callback = dmi_check_cb,
+ },
+ {
.ident = "N150/N210/N220/N230",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
new file mode 100644
index 0000000..1e54ae7
--- /dev/null
+++ b/drivers/platform/x86/samsung-q10.c
@@ -0,0 +1,196 @@
+/*
+ * Driver for Samsung Q10 and related laptops: controls the backlight
+ *
+ * Copyright (c) 2011 Frederick van der Wyck <fvanderwyck@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/backlight.h>
+#include <linux/i8042.h>
+#include <linux/dmi.h>
+
+#define SAMSUNGQ10_BL_MAX_INTENSITY 255
+#define SAMSUNGQ10_BL_DEFAULT_INTENSITY 185
+
+#define SAMSUNGQ10_BL_8042_CMD 0xbe
+#define SAMSUNGQ10_BL_8042_DATA { 0x89, 0x91 }
+
+static int samsungq10_bl_brightness;
+
+static bool force;
+module_param(force, bool, 0);
+MODULE_PARM_DESC(force,
+ "Disable the DMI check and force the driver to be loaded");
+
+static int samsungq10_bl_set_intensity(struct backlight_device *bd)
+{
+
+ int brightness = bd->props.brightness;
+ unsigned char c[3] = SAMSUNGQ10_BL_8042_DATA;
+
+ c[2] = (unsigned char)brightness;
+ i8042_lock_chip();
+ i8042_command(c, (0x30 << 8) | SAMSUNGQ10_BL_8042_CMD);
+ i8042_unlock_chip();
+ samsungq10_bl_brightness = brightness;
+
+ return 0;
+}
+
+static int samsungq10_bl_get_intensity(struct backlight_device *bd)
+{
+ return samsungq10_bl_brightness;
+}
+
+static const struct backlight_ops samsungq10_bl_ops = {
+ .get_brightness = samsungq10_bl_get_intensity,
+ .update_status = samsungq10_bl_set_intensity,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int samsungq10_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int samsungq10_resume(struct device *dev)
+{
+
+ struct backlight_device *bd = dev_get_drvdata(dev);
+
+ samsungq10_bl_set_intensity(bd);
+ return 0;
+}
+#else
+#define samsungq10_suspend NULL
+#define samsungq10_resume NULL
+#endif
+
+static SIMPLE_DEV_PM_OPS(samsungq10_pm_ops,
+ samsungq10_suspend, samsungq10_resume);
+
+static int __devinit samsungq10_probe(struct platform_device *pdev)
+{
+
+ struct backlight_properties props;
+ struct backlight_device *bd;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = SAMSUNGQ10_BL_MAX_INTENSITY;
+ bd = backlight_device_register("samsung", &pdev->dev, NULL,
+ &samsungq10_bl_ops, &props);
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
+
+ platform_set_drvdata(pdev, bd);
+
+ bd->props.brightness = SAMSUNGQ10_BL_DEFAULT_INTENSITY;
+ samsungq10_bl_set_intensity(bd);
+
+ return 0;
+}
+
+static int __devexit samsungq10_remove(struct platform_device *pdev)
+{
+
+ struct backlight_device *bd = platform_get_drvdata(pdev);
+
+ bd->props.brightness = SAMSUNGQ10_BL_DEFAULT_INTENSITY;
+ samsungq10_bl_set_intensity(bd);
+
+ backlight_device_unregister(bd);
+
+ return 0;
+}
+
+static struct platform_driver samsungq10_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .pm = &samsungq10_pm_ops,
+ },
+ .probe = samsungq10_probe,
+ .remove = __devexit_p(samsungq10_remove),
+};
+
+static struct platform_device *samsungq10_device;
+
+static int __init dmi_check_callback(const struct dmi_system_id *id)
+{
+ printk(KERN_INFO KBUILD_MODNAME ": found model '%s'\n", id->ident);
+ return 1;
+}
+
+static struct dmi_system_id __initdata samsungq10_dmi_table[] = {
+ {
+ .ident = "Samsung Q10",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Samsung"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SQ10"),
+ },
+ .callback = dmi_check_callback,
+ },
+ {
+ .ident = "Samsung Q20",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG Electronics"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SENS Q20"),
+ },
+ .callback = dmi_check_callback,
+ },
+ {
+ .ident = "Samsung Q25",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG Electronics"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "NQ25"),
+ },
+ .callback = dmi_check_callback,
+ },
+ {
+ .ident = "Dell Latitude X200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X200"),
+ },
+ .callback = dmi_check_callback,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(dmi, samsungq10_dmi_table);
+
+static int __init samsungq10_init(void)
+{
+ if (!force && !dmi_check_system(samsungq10_dmi_table))
+ return -ENODEV;
+
+ samsungq10_device = platform_create_bundle(&samsungq10_driver,
+ samsungq10_probe,
+ NULL, 0, NULL, 0);
+
+ if (IS_ERR(samsungq10_device))
+ return PTR_ERR(samsungq10_device);
+
+ return 0;
+}
+
+static void __exit samsungq10_exit(void)
+{
+ platform_device_unregister(samsungq10_device);
+ platform_driver_unregister(&samsungq10_driver);
+}
+
+module_init(samsungq10_init);
+module_exit(samsungq10_exit);
+
+MODULE_AUTHOR("Frederick van der Wyck <fvanderwyck@gmail.com>");
+MODULE_DESCRIPTION("Samsung Q10 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 26c5b11..7bd829f 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3186,8 +3186,17 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
KEY_VENDOR, /* 0x17: Thinkpad/AccessIBM/Lenovo */
/* (assignments unknown, please report if found) */
+ KEY_UNKNOWN, KEY_UNKNOWN,
+
+ /*
+ * The mic mute button only sends 0x1a. It does not
+ * automatically mute the mic or change the mute light.
+ */
+ KEY_MICMUTE, /* 0x1a: Mic mute (since ?400 or so) */
+
+ /* (assignments unknown, please report if found) */
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
- KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN,
},
};
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index e57b50b..57de051 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -235,4 +235,18 @@ config CHARGER_GPIO
This driver can be build as a module. If so, the module will be
called gpio-charger.
+config CHARGER_MAX8997
+ tristate "Maxim MAX8997/MAX8966 PMIC battery charger driver"
+ depends on MFD_MAX8997 && REGULATOR_MAX8997
+ help
+ Say Y to enable support for the battery charger control sysfs and
+ platform data of MAX8997/LP3974 PMICs.
+
+config CHARGER_MAX8998
+ tristate "Maxim MAX8998/LP3974 PMIC battery charger driver"
+ depends on MFD_MAX8998 && REGULATOR_MAX8998
+ help
+ Say Y to enable support for the battery charger control sysfs and
+ platform data of MAX8998/LP3974 PMICs.
+
endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 009a90f..b4af13d 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -36,3 +36,5 @@ obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o
obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
+obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
+obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
diff --git a/drivers/power/apm_power.c b/drivers/power/apm_power.c
index dc628cb..8a612de 100644
--- a/drivers/power/apm_power.c
+++ b/drivers/power/apm_power.c
@@ -14,11 +14,11 @@
#include <linux/apm-emulation.h>
-#define PSY_PROP(psy, prop, val) psy->get_property(psy, \
- POWER_SUPPLY_PROP_##prop, val)
+#define PSY_PROP(psy, prop, val) (psy->get_property(psy, \
+ POWER_SUPPLY_PROP_##prop, val))
-#define _MPSY_PROP(prop, val) main_battery->get_property(main_battery, \
- prop, val)
+#define _MPSY_PROP(prop, val) (main_battery->get_property(main_battery, \
+ prop, val))
#define MPSY_PROP(prop, val) _MPSY_PROP(POWER_SUPPLY_PROP_##prop, val)
diff --git a/drivers/power/bq20z75.c b/drivers/power/bq20z75.c
index 506585e..9c5e5be 100644
--- a/drivers/power/bq20z75.c
+++ b/drivers/power/bq20z75.c
@@ -152,6 +152,10 @@ struct bq20z75_info {
bool gpio_detect;
bool enable_detection;
int irq;
+ int last_state;
+ int poll_time;
+ struct delayed_work work;
+ int ignore_changes;
};
static int bq20z75_read_word_data(struct i2c_client *client, u8 address)
@@ -279,6 +283,7 @@ static int bq20z75_get_battery_property(struct i2c_client *client,
int reg_offset, enum power_supply_property psp,
union power_supply_propval *val)
{
+ struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
s32 ret;
ret = bq20z75_read_word_data(client,
@@ -293,15 +298,24 @@ static int bq20z75_get_battery_property(struct i2c_client *client,
if (ret >= bq20z75_data[reg_offset].min_value &&
ret <= bq20z75_data[reg_offset].max_value) {
val->intval = ret;
- if (psp == POWER_SUPPLY_PROP_STATUS) {
- if (ret & BATTERY_FULL_CHARGED)
- val->intval = POWER_SUPPLY_STATUS_FULL;
- else if (ret & BATTERY_FULL_DISCHARGED)
- val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
- else if (ret & BATTERY_DISCHARGING)
- val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
- else
- val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ if (psp != POWER_SUPPLY_PROP_STATUS)
+ return 0;
+
+ if (ret & BATTERY_FULL_CHARGED)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else if (ret & BATTERY_FULL_DISCHARGED)
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else if (ret & BATTERY_DISCHARGING)
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+
+ if (bq20z75_device->poll_time == 0)
+ bq20z75_device->last_state = val->intval;
+ else if (bq20z75_device->last_state != val->intval) {
+ cancel_delayed_work_sync(&bq20z75_device->work);
+ power_supply_changed(&bq20z75_device->power_supply);
+ bq20z75_device->poll_time = 0;
}
} else {
if (psp == POWER_SUPPLY_PROP_STATUS)
@@ -545,6 +559,60 @@ static irqreturn_t bq20z75_irq(int irq, void *devid)
return IRQ_HANDLED;
}
+static void bq20z75_external_power_changed(struct power_supply *psy)
+{
+ struct bq20z75_info *bq20z75_device;
+
+ bq20z75_device = container_of(psy, struct bq20z75_info, power_supply);
+
+ if (bq20z75_device->ignore_changes > 0) {
+ bq20z75_device->ignore_changes--;
+ return;
+ }
+
+ /* cancel outstanding work */
+ cancel_delayed_work_sync(&bq20z75_device->work);
+
+ schedule_delayed_work(&bq20z75_device->work, HZ);
+ bq20z75_device->poll_time = bq20z75_device->pdata->poll_retry_count;
+}
+
+static void bq20z75_delayed_work(struct work_struct *work)
+{
+ struct bq20z75_info *bq20z75_device;
+ s32 ret;
+
+ bq20z75_device = container_of(work, struct bq20z75_info, work.work);
+
+ ret = bq20z75_read_word_data(bq20z75_device->client,
+ bq20z75_data[REG_STATUS].addr);
+ /* if the read failed, give up on this work */
+ if (ret < 0) {
+ bq20z75_device->poll_time = 0;
+ return;
+ }
+
+ if (ret & BATTERY_FULL_CHARGED)
+ ret = POWER_SUPPLY_STATUS_FULL;
+ else if (ret & BATTERY_FULL_DISCHARGED)
+ ret = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else if (ret & BATTERY_DISCHARGING)
+ ret = POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ ret = POWER_SUPPLY_STATUS_CHARGING;
+
+ if (bq20z75_device->last_state != ret) {
+ bq20z75_device->poll_time = 0;
+ power_supply_changed(&bq20z75_device->power_supply);
+ return;
+ }
+ if (bq20z75_device->poll_time > 0) {
+ schedule_delayed_work(&bq20z75_device->work, HZ);
+ bq20z75_device->poll_time--;
+ return;
+ }
+}
+
static int __devinit bq20z75_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -566,6 +634,13 @@ static int __devinit bq20z75_probe(struct i2c_client *client,
bq20z75_device->power_supply.num_properties =
ARRAY_SIZE(bq20z75_properties);
bq20z75_device->power_supply.get_property = bq20z75_get_property;
+ /* ignore first notification of external change, it is generated
+ * from the power_supply_register call back
+ */
+ bq20z75_device->ignore_changes = 1;
+ bq20z75_device->last_state = POWER_SUPPLY_STATUS_UNKNOWN;
+ bq20z75_device->power_supply.external_power_changed =
+ bq20z75_external_power_changed;
if (pdata) {
bq20z75_device->gpio_detect =
@@ -625,6 +700,10 @@ skip_gpio:
dev_info(&client->dev,
"%s: battery gas gauge device registered\n", client->name);
+ INIT_DELAYED_WORK(&bq20z75_device->work, bq20z75_delayed_work);
+
+ bq20z75_device->enable_detection = true;
+
return 0;
exit_psupply:
@@ -648,6 +727,9 @@ static int __devexit bq20z75_remove(struct i2c_client *client)
gpio_free(bq20z75_device->pdata->battery_detect);
power_supply_unregister(&bq20z75_device->power_supply);
+
+ cancel_delayed_work_sync(&bq20z75_device->work);
+
kfree(bq20z75_device);
bq20z75_device = NULL;
@@ -661,6 +743,9 @@ static int bq20z75_suspend(struct i2c_client *client,
struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
s32 ret;
+ if (bq20z75_device->poll_time > 0)
+ cancel_delayed_work_sync(&bq20z75_device->work);
+
/* write to manufacturer access with sleep command */
ret = bq20z75_write_word_data(client,
bq20z75_data[REG_MANUFACTURER_DATA].addr,
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
index 718f2c5..a64b885 100644
--- a/drivers/power/gpio-charger.c
+++ b/drivers/power/gpio-charger.c
@@ -127,7 +127,7 @@ static int __devinit gpio_charger_probe(struct platform_device *pdev)
ret = request_any_context_irq(irq, gpio_charger_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
dev_name(&pdev->dev), charger);
- if (ret)
+ if (ret < 0)
dev_warn(&pdev->dev, "Failed to request irq: %d\n", ret);
else
gpio_charger->irq = irq;
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index c5c8805..98bfab3 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -29,74 +29,6 @@
#include <linux/power_supply.h>
#include <linux/power/max17042_battery.h>
-enum max17042_register {
- MAX17042_STATUS = 0x00,
- MAX17042_VALRT_Th = 0x01,
- MAX17042_TALRT_Th = 0x02,
- MAX17042_SALRT_Th = 0x03,
- MAX17042_AtRate = 0x04,
- MAX17042_RepCap = 0x05,
- MAX17042_RepSOC = 0x06,
- MAX17042_Age = 0x07,
- MAX17042_TEMP = 0x08,
- MAX17042_VCELL = 0x09,
- MAX17042_Current = 0x0A,
- MAX17042_AvgCurrent = 0x0B,
- MAX17042_Qresidual = 0x0C,
- MAX17042_SOC = 0x0D,
- MAX17042_AvSOC = 0x0E,
- MAX17042_RemCap = 0x0F,
- MAX17402_FullCAP = 0x10,
- MAX17042_TTE = 0x11,
- MAX17042_V_empty = 0x12,
-
- MAX17042_RSLOW = 0x14,
-
- MAX17042_AvgTA = 0x16,
- MAX17042_Cycles = 0x17,
- MAX17042_DesignCap = 0x18,
- MAX17042_AvgVCELL = 0x19,
- MAX17042_MinMaxTemp = 0x1A,
- MAX17042_MinMaxVolt = 0x1B,
- MAX17042_MinMaxCurr = 0x1C,
- MAX17042_CONFIG = 0x1D,
- MAX17042_ICHGTerm = 0x1E,
- MAX17042_AvCap = 0x1F,
- MAX17042_ManName = 0x20,
- MAX17042_DevName = 0x21,
- MAX17042_DevChem = 0x22,
-
- MAX17042_TempNom = 0x24,
- MAX17042_TempCold = 0x25,
- MAX17042_TempHot = 0x26,
- MAX17042_AIN = 0x27,
- MAX17042_LearnCFG = 0x28,
- MAX17042_SHFTCFG = 0x29,
- MAX17042_RelaxCFG = 0x2A,
- MAX17042_MiscCFG = 0x2B,
- MAX17042_TGAIN = 0x2C,
- MAx17042_TOFF = 0x2D,
- MAX17042_CGAIN = 0x2E,
- MAX17042_COFF = 0x2F,
-
- MAX17042_Q_empty = 0x33,
- MAX17042_T_empty = 0x34,
-
- MAX17042_RCOMP0 = 0x38,
- MAX17042_TempCo = 0x39,
- MAX17042_Rx = 0x3A,
- MAX17042_T_empty0 = 0x3B,
- MAX17042_TaskPeriod = 0x3C,
- MAX17042_FSTAT = 0x3D,
-
- MAX17042_SHDNTIMER = 0x3F,
-
- MAX17042_VFRemCap = 0x4A,
-
- MAX17042_QH = 0x4D,
- MAX17042_QL = 0x4E,
-};
-
struct max17042_chip {
struct i2c_client *client;
struct power_supply battery;
@@ -123,10 +55,27 @@ static int max17042_read_reg(struct i2c_client *client, u8 reg)
return ret;
}
+static void max17042_set_reg(struct i2c_client *client,
+ struct max17042_reg_data *data, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ max17042_write_reg(client, data[i].addr, data[i].data);
+}
+
static enum power_supply_property max17042_battery_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_AVG,
POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
};
static int max17042_get_property(struct power_supply *psy,
@@ -137,6 +86,30 @@ static int max17042_get_property(struct power_supply *psy,
struct max17042_chip, battery);
switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_STATUS);
+ if (val->intval & MAX17042_STATUS_BattAbsent)
+ val->intval = 0;
+ else
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_Cycles);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_MinMaxVolt);
+ val->intval >>= 8;
+ val->intval *= 20000; /* Units of LSB = 20mV */
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_V_empty);
+ val->intval >>= 7;
+ val->intval *= 10000; /* Units of LSB = 10mV */
+ break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
val->intval = max17042_read_reg(chip->client,
MAX17042_VCELL) * 83; /* 1000 / 12 = 83 */
@@ -149,6 +122,57 @@ static int max17042_get_property(struct power_supply *psy,
val->intval = max17042_read_reg(chip->client,
MAX17042_SOC) / 256;
break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_RepSOC);
+ if ((val->intval / 256) >= MAX17042_BATTERY_FULL)
+ val->intval = 1;
+ else if (val->intval >= 0)
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_TEMP);
+ /* The value is signed. */
+ if (val->intval & 0x8000) {
+ val->intval = (0x7fff & ~val->intval) + 1;
+ val->intval *= -1;
+ }
+ /* The value is converted into deci-centigrade scale */
+ /* Units of LSB = 1 / 256 degree Celsius */
+ val->intval = val->intval * 10 / 256;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ if (chip->pdata->enable_current_sense) {
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_Current);
+ if (val->intval & 0x8000) {
+ /* Negative */
+ val->intval = ~val->intval & 0x7fff;
+ val->intval++;
+ val->intval *= -1;
+ }
+ val->intval >>= 4;
+ val->intval *= 1000000 * 25 / chip->pdata->r_sns;
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ if (chip->pdata->enable_current_sense) {
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_AvgCurrent);
+ if (val->intval & 0x8000) {
+ /* Negative */
+ val->intval = ~val->intval & 0x7fff;
+ val->intval++;
+ val->intval *= -1;
+ }
+ val->intval *= 1562500 / chip->pdata->r_sns;
+ } else {
+ return -EINVAL;
+ }
+ break;
default:
return -EINVAL;
}
@@ -180,18 +204,30 @@ static int __devinit max17042_probe(struct i2c_client *client,
chip->battery.properties = max17042_battery_props;
chip->battery.num_properties = ARRAY_SIZE(max17042_battery_props);
+ /* When current is not measured,
+ * CURRENT_NOW and CURRENT_AVG properties should be invisible. */
+ if (!chip->pdata->enable_current_sense)
+ chip->battery.num_properties -= 2;
+
ret = power_supply_register(&client->dev, &chip->battery);
if (ret) {
dev_err(&client->dev, "failed: power supply register\n");
- i2c_set_clientdata(client, NULL);
kfree(chip);
return ret;
}
+ /* Initialize registers according to values from the platform data */
+ if (chip->pdata->init_data)
+ max17042_set_reg(client, chip->pdata->init_data,
+ chip->pdata->num_init_data);
+
if (!chip->pdata->enable_current_sense) {
max17042_write_reg(client, MAX17042_CGAIN, 0x0000);
max17042_write_reg(client, MAX17042_MiscCFG, 0x0003);
max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
+ } else {
+ if (chip->pdata->r_sns == 0)
+ chip->pdata->r_sns = MAX17042_DEFAULT_SNS_RESISTOR;
}
return 0;
@@ -202,7 +238,6 @@ static int __devexit max17042_remove(struct i2c_client *client)
struct max17042_chip *chip = i2c_get_clientdata(client);
power_supply_unregister(&chip->battery);
- i2c_set_clientdata(client, NULL);
kfree(chip);
return 0;
}
diff --git a/drivers/power/max8903_charger.c b/drivers/power/max8903_charger.c
index 33ff0e3..a9b0209 100644
--- a/drivers/power/max8903_charger.c
+++ b/drivers/power/max8903_charger.c
@@ -28,7 +28,7 @@
#include <linux/power/max8903_charger.h>
struct max8903_data {
- struct max8903_pdata *pdata;
+ struct max8903_pdata pdata;
struct device *dev;
struct power_supply psy;
bool fault;
@@ -52,8 +52,8 @@ static int max8903_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
- if (data->pdata->chg) {
- if (gpio_get_value(data->pdata->chg) == 0)
+ if (data->pdata.chg) {
+ if (gpio_get_value(data->pdata.chg) == 0)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (data->usb_in || data->ta_in)
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -80,7 +80,7 @@ static int max8903_get_property(struct power_supply *psy,
static irqreturn_t max8903_dcin(int irq, void *_data)
{
struct max8903_data *data = _data;
- struct max8903_pdata *pdata = data->pdata;
+ struct max8903_pdata *pdata = &data->pdata;
bool ta_in;
enum power_supply_type old_type;
@@ -121,7 +121,7 @@ static irqreturn_t max8903_dcin(int irq, void *_data)
static irqreturn_t max8903_usbin(int irq, void *_data)
{
struct max8903_data *data = _data;
- struct max8903_pdata *pdata = data->pdata;
+ struct max8903_pdata *pdata = &data->pdata;
bool usb_in;
enum power_supply_type old_type;
@@ -160,7 +160,7 @@ static irqreturn_t max8903_usbin(int irq, void *_data)
static irqreturn_t max8903_fault(int irq, void *_data)
{
struct max8903_data *data = _data;
- struct max8903_pdata *pdata = data->pdata;
+ struct max8903_pdata *pdata = &data->pdata;
bool fault;
fault = gpio_get_value(pdata->flt) ? false : true;
@@ -193,7 +193,7 @@ static __devinit int max8903_probe(struct platform_device *pdev)
dev_err(dev, "Cannot allocate memory.\n");
return -ENOMEM;
}
- data->pdata = pdata;
+ memcpy(&data->pdata, pdata, sizeof(struct max8903_pdata));
data->dev = dev;
platform_set_drvdata(pdev, data);
@@ -349,7 +349,7 @@ static __devexit int max8903_remove(struct platform_device *pdev)
struct max8903_data *data = platform_get_drvdata(pdev);
if (data) {
- struct max8903_pdata *pdata = data->pdata;
+ struct max8903_pdata *pdata = &data->pdata;
if (pdata->flt)
free_irq(gpio_to_irq(pdata->flt), data);
diff --git a/drivers/power/max8997_charger.c b/drivers/power/max8997_charger.c
new file mode 100644
index 0000000..ffc5033
--- /dev/null
+++ b/drivers/power/max8997_charger.c
@@ -0,0 +1,207 @@
+/*
+ * max8997_charger.c - Power supply consumer driver for the Maxim 8997/8966
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/mfd/max8997.h>
+#include <linux/mfd/max8997-private.h>
+
+struct charger_data {
+ struct device *dev;
+ struct max8997_dev *iodev;
+ struct power_supply battery;
+};
+
+static enum power_supply_property max8997_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS, /* "FULL" or "NOT FULL" only. */
+ POWER_SUPPLY_PROP_PRESENT, /* the presence of battery */
+ POWER_SUPPLY_PROP_ONLINE, /* charger is active or not */
+};
+
+/* Note that the charger control is done by a current regulator "CHARGER" */
+static int max8997_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct charger_data *charger = container_of(psy,
+ struct charger_data, battery);
+ struct i2c_client *i2c = charger->iodev->i2c;
+ int ret;
+ u8 reg;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = 0;
+ ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, &reg);
+ if (ret)
+ return ret;
+ if ((reg & (1 << 0)) == 0x1)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 0;
+ ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, &reg);
+ if (ret)
+ return ret;
+ if ((reg & (1 << 2)) == 0x0)
+ val->intval = 1;
+
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = 0;
+ ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, &reg);
+ if (ret)
+ return ret;
+ /* DCINOK */
+ if (reg & (1 << 1))
+ val->intval = 1;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static __devinit int max8997_battery_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct charger_data *charger;
+ struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev);
+
+ if (!pdata)
+ return -EINVAL;
+
+ if (pdata->eoc_mA) {
+ u8 val = (pdata->eoc_mA - 50) / 10;
+ if (val < 0)
+ val = 0;
+ if (val > 0xf)
+ val = 0xf;
+
+ ret = max8997_update_reg(iodev->i2c,
+ MAX8997_REG_MBCCTRL5, val, 0xf);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot use i2c bus.\n");
+ return ret;
+ }
+ }
+
+ switch (pdata->timeout) {
+ case 5:
+ ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
+ 0x2 << 4, 0x7 << 4);
+ break;
+ case 6:
+ ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
+ 0x3 << 4, 0x7 << 4);
+ break;
+ case 7:
+ ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
+ 0x4 << 4, 0x7 << 4);
+ break;
+ case 0:
+ ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
+ 0x7 << 4, 0x7 << 4);
+ break;
+ default:
+ dev_err(&pdev->dev, "incorrect timeout value (%d)\n",
+ pdata->timeout);
+ return -EINVAL;
+ }
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot use i2c bus.\n");
+ return ret;
+ }
+
+ charger = kzalloc(sizeof(struct charger_data), GFP_KERNEL);
+ if (charger == NULL) {
+ dev_err(&pdev->dev, "Cannot allocate memory.\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, charger);
+
+ charger->battery.name = "max8997_pmic";
+ charger->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ charger->battery.get_property = max8997_battery_get_property;
+ charger->battery.properties = max8997_battery_props;
+ charger->battery.num_properties = ARRAY_SIZE(max8997_battery_props);
+
+ charger->dev = &pdev->dev;
+ charger->iodev = iodev;
+
+ ret = power_supply_register(&pdev->dev, &charger->battery);
+ if (ret) {
+ dev_err(&pdev->dev, "failed: power supply register\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ kfree(charger);
+ return ret;
+}
+
+static int __devexit max8997_battery_remove(struct platform_device *pdev)
+{
+ struct charger_data *charger = platform_get_drvdata(pdev);
+
+ power_supply_unregister(&charger->battery);
+ kfree(charger);
+ return 0;
+}
+
+static const struct platform_device_id max8997_battery_id[] = {
+ { "max8997-battery", 0 },
+};
+
+static struct platform_driver max8997_battery_driver = {
+ .driver = {
+ .name = "max8997-battery",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8997_battery_probe,
+ .remove = __devexit_p(max8997_battery_remove),
+ .id_table = max8997_battery_id,
+};
+
+static int __init max8997_battery_init(void)
+{
+ return platform_driver_register(&max8997_battery_driver);
+}
+subsys_initcall(max8997_battery_init);
+
+static void __exit max8997_battery_cleanup(void)
+{
+ platform_driver_unregister(&max8997_battery_driver);
+}
+module_exit(max8997_battery_cleanup);
+
+MODULE_DESCRIPTION("MAXIM 8997/8966 battery control driver");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/max8998_charger.c b/drivers/power/max8998_charger.c
new file mode 100644
index 0000000..ef8efad
--- /dev/null
+++ b/drivers/power/max8998_charger.c
@@ -0,0 +1,219 @@
+/*
+ * max8998_charger.c - Power supply consumer driver for the Maxim 8998/LP3974
+ *
+ * Copyright (C) 2009-2010 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/mfd/max8998.h>
+#include <linux/mfd/max8998-private.h>
+
+struct max8998_battery_data {
+ struct device *dev;
+ struct max8998_dev *iodev;
+ struct power_supply battery;
+};
+
+static enum power_supply_property max8998_battery_props[] = {
+ POWER_SUPPLY_PROP_PRESENT, /* the presence of battery */
+ POWER_SUPPLY_PROP_ONLINE, /* charger is active or not */
+};
+
+/* Note that the charger control is done by a current regulator "CHARGER" */
+static int max8998_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max8998_battery_data *max8998 = container_of(psy,
+ struct max8998_battery_data, battery);
+ struct i2c_client *i2c = max8998->iodev->i2c;
+ int ret;
+ u8 reg;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ ret = max8998_read_reg(i2c, MAX8998_REG_STATUS2, &reg);
+ if (ret)
+ return ret;
+ if (reg & (1 << 4))
+ val->intval = 0;
+ else
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = max8998_read_reg(i2c, MAX8998_REG_STATUS2, &reg);
+ if (ret)
+ return ret;
+ if (reg & (1 << 3))
+ val->intval = 0;
+ else
+ val->intval = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static __devinit int max8998_battery_probe(struct platform_device *pdev)
+{
+ struct max8998_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct max8998_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct max8998_battery_data *max8998;
+ struct i2c_client *i2c;
+ int ret = 0;
+
+ if (!pdata) {
+ dev_err(pdev->dev.parent, "No platform init data supplied\n");
+ return -ENODEV;
+ }
+
+ max8998 = kzalloc(sizeof(struct max8998_battery_data), GFP_KERNEL);
+ if (!max8998)
+ return -ENOMEM;
+
+ max8998->dev = &pdev->dev;
+ max8998->iodev = iodev;
+ platform_set_drvdata(pdev, max8998);
+ i2c = max8998->iodev->i2c;
+
+ /* Setup "End of Charge" */
+ /* If EOC value equals 0,
+ * remain value set from bootloader or default value */
+ if (pdata->eoc >= 10 && pdata->eoc <= 45) {
+ max8998_update_reg(i2c, MAX8998_REG_CHGR1,
+ (pdata->eoc / 5 - 2) << 5, 0x7 << 5);
+ } else if (pdata->eoc == 0) {
+ dev_dbg(max8998->dev,
+ "EOC value not set: leave it unchanged.\n");
+ } else {
+ dev_err(max8998->dev, "Invalid EOC value\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Setup Charge Restart Level */
+ switch (pdata->restart) {
+ case 100:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR1, 0x1 << 3, 0x3 << 3);
+ break;
+ case 150:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR1, 0x0 << 3, 0x3 << 3);
+ break;
+ case 200:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR1, 0x2 << 3, 0x3 << 3);
+ break;
+ case -1:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR1, 0x3 << 3, 0x3 << 3);
+ break;
+ case 0:
+ dev_dbg(max8998->dev,
+ "Restart Level not set: leave it unchanged.\n");
+ break;
+ default:
+ dev_err(max8998->dev, "Invalid Restart Level\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Setup Charge Full Timeout */
+ switch (pdata->timeout) {
+ case 5:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR2, 0x0 << 4, 0x3 << 4);
+ break;
+ case 6:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR2, 0x1 << 4, 0x3 << 4);
+ break;
+ case 7:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR2, 0x2 << 4, 0x3 << 4);
+ break;
+ case -1:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR2, 0x3 << 4, 0x3 << 4);
+ break;
+ case 0:
+ dev_dbg(max8998->dev,
+ "Full Timeout not set: leave it unchanged.\n");
+ default:
+ dev_err(max8998->dev, "Invalid Full Timeout value\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ max8998->battery.name = "max8998_pmic";
+ max8998->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ max8998->battery.get_property = max8998_battery_get_property;
+ max8998->battery.properties = max8998_battery_props;
+ max8998->battery.num_properties = ARRAY_SIZE(max8998_battery_props);
+
+ ret = power_supply_register(max8998->dev, &max8998->battery);
+ if (ret) {
+ dev_err(max8998->dev, "failed: power supply register\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ kfree(max8998);
+ return ret;
+}
+
+static int __devexit max8998_battery_remove(struct platform_device *pdev)
+{
+ struct max8998_battery_data *max8998 = platform_get_drvdata(pdev);
+
+ power_supply_unregister(&max8998->battery);
+ kfree(max8998);
+
+ return 0;
+}
+
+static const struct platform_device_id max8998_battery_id[] = {
+ { "max8998-battery", TYPE_MAX8998 },
+};
+
+static struct platform_driver max8998_battery_driver = {
+ .driver = {
+ .name = "max8998-battery",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8998_battery_probe,
+ .remove = __devexit_p(max8998_battery_remove),
+ .id_table = max8998_battery_id,
+};
+
+static int __init max8998_battery_init(void)
+{
+ return platform_driver_register(&max8998_battery_driver);
+}
+module_init(max8998_battery_init);
+
+static void __exit max8998_battery_cleanup(void)
+{
+ platform_driver_unregister(&max8998_battery_driver);
+}
+module_exit(max8998_battery_cleanup);
+
+MODULE_DESCRIPTION("MAXIM 8998 battery control driver");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:max8998-battery");
diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c
index d36c289..d32d0d7 100644
--- a/drivers/power/s3c_adc_battery.c
+++ b/drivers/power/s3c_adc_battery.c
@@ -20,6 +20,7 @@
#include <linux/s3c_adc_battery.h>
#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <plat/adc.h>
@@ -266,7 +267,7 @@ static irqreturn_t s3c_adc_bat_charged(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __init s3c_adc_bat_probe(struct platform_device *pdev)
+static int __devinit s3c_adc_bat_probe(struct platform_device *pdev)
{
struct s3c_adc_client *client;
struct s3c_adc_bat_pdata *pdata = pdev->dev.platform_data;
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index 92c16e1..54b9198 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -62,7 +62,7 @@
#define TWL4030_MSTATEC_COMPLETE4 0x0e
static bool allow_usb;
-module_param(allow_usb, bool, 1);
+module_param(allow_usb, bool, 0644);
MODULE_PARM_DESC(allow_usb, "Allow USB charge drawing default current");
struct twl4030_bci {
@@ -425,7 +425,7 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
{
struct twl4030_bci *bci;
int ret;
- int reg;
+ u32 reg;
bci = kzalloc(sizeof(*bci), GFP_KERNEL);
if (bci == NULL)
@@ -486,7 +486,7 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
}
/* Enable interrupts now. */
- reg = ~(TWL4030_ICHGLOW | TWL4030_ICHGEOC | TWL4030_TBATOR2 |
+ reg = ~(u32)(TWL4030_ICHGLOW | TWL4030_ICHGEOC | TWL4030_TBATOR2 |
TWL4030_TBATOR1 | TWL4030_BATSTS);
ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg,
TWL4030_INTERRUPTS_BCIIMR1A);
@@ -495,7 +495,7 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
goto fail_unmask_interrupts;
}
- reg = ~(TWL4030_VBATOV | TWL4030_VBUSOV | TWL4030_ACCHGOV);
+ reg = ~(u32)(TWL4030_VBATOV | TWL4030_VBUSOV | TWL4030_ACCHGOV);
ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg,
TWL4030_INTERRUPTS_BCIIMR2A);
if (ret < 0)
@@ -572,7 +572,7 @@ static void __exit twl4030_bci_exit(void)
}
module_exit(twl4030_bci_exit);
-MODULE_AUTHOR("Gražydas Ignotas");
+MODULE_AUTHOR("Gražvydas Ignotas");
MODULE_DESCRIPTION("TWL4030 Battery Charger Interface driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:twl4030_bci");
diff --git a/drivers/power/wm831x_backup.c b/drivers/power/wm831x_backup.c
index 0fd130d..e648cbe 100644
--- a/drivers/power/wm831x_backup.c
+++ b/drivers/power/wm831x_backup.c
@@ -22,6 +22,7 @@
struct wm831x_backup {
struct wm831x *wm831x;
struct power_supply backup;
+ char name[20];
};
static int wm831x_backup_read_voltage(struct wm831x *wm831x,
@@ -163,6 +164,7 @@ static enum power_supply_property wm831x_backup_props[] = {
static __devinit int wm831x_backup_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
+ struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data;
struct wm831x_backup *devdata;
struct power_supply *backup;
int ret;
@@ -182,7 +184,14 @@ static __devinit int wm831x_backup_probe(struct platform_device *pdev)
*/
wm831x_config_backup(wm831x);
- backup->name = "wm831x-backup";
+ if (wm831x_pdata && wm831x_pdata->wm831x_num)
+ snprintf(devdata->name, sizeof(devdata->name),
+ "wm831x-backup.%d", wm831x_pdata->wm831x_num);
+ else
+ snprintf(devdata->name, sizeof(devdata->name),
+ "wm831x-backup");
+
+ backup->name = devdata->name;
backup->type = POWER_SUPPLY_TYPE_BATTERY;
backup->properties = wm831x_backup_props;
backup->num_properties = ARRAY_SIZE(wm831x_backup_props);
@@ -203,6 +212,7 @@ static __devexit int wm831x_backup_remove(struct platform_device *pdev)
struct wm831x_backup *devdata = platform_get_drvdata(pdev);
power_supply_unregister(&devdata->backup);
+ kfree(devdata->backup.name);
kfree(devdata);
return 0;
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index ddf8cf5..6cc2ca6 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -24,6 +24,9 @@ struct wm831x_power {
struct power_supply wall;
struct power_supply usb;
struct power_supply battery;
+ char wall_name[20];
+ char usb_name[20];
+ char battery_name[20];
};
static int wm831x_power_check_online(struct wm831x *wm831x, int supply,
@@ -486,6 +489,7 @@ static irqreturn_t wm831x_pwr_src_irq(int irq, void *data)
static __devinit int wm831x_power_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
+ struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data;
struct wm831x_power *power;
struct power_supply *usb;
struct power_supply *battery;
@@ -503,12 +507,28 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
battery = &power->battery;
wall = &power->wall;
+ if (wm831x_pdata && wm831x_pdata->wm831x_num) {
+ snprintf(power->wall_name, sizeof(power->wall_name),
+ "wm831x-wall.%d", wm831x_pdata->wm831x_num);
+ snprintf(power->battery_name, sizeof(power->wall_name),
+ "wm831x-battery.%d", wm831x_pdata->wm831x_num);
+ snprintf(power->usb_name, sizeof(power->wall_name),
+ "wm831x-usb.%d", wm831x_pdata->wm831x_num);
+ } else {
+ snprintf(power->wall_name, sizeof(power->wall_name),
+ "wm831x-wall");
+ snprintf(power->battery_name, sizeof(power->wall_name),
+ "wm831x-battery");
+ snprintf(power->usb_name, sizeof(power->wall_name),
+ "wm831x-usb");
+ }
+
/* We ignore configuration failures since we can still read back
* the status without enabling the charger.
*/
wm831x_config_battery(wm831x);
- wall->name = "wm831x-wall";
+ wall->name = power->wall_name;
wall->type = POWER_SUPPLY_TYPE_MAINS;
wall->properties = wm831x_wall_props;
wall->num_properties = ARRAY_SIZE(wm831x_wall_props);
@@ -517,7 +537,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
if (ret)
goto err_kmalloc;
- battery->name = "wm831x-battery";
+ battery->name = power->battery_name;
battery->properties = wm831x_bat_props;
battery->num_properties = ARRAY_SIZE(wm831x_bat_props);
battery->get_property = wm831x_bat_get_prop;
@@ -526,7 +546,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
if (ret)
goto err_wall;
- usb->name = "wm831x-usb",
+ usb->name = power->usb_name,
usb->type = POWER_SUPPLY_TYPE_USB;
usb->properties = wm831x_usb_props;
usb->num_properties = ARRAY_SIZE(wm831x_usb_props);
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index ee89358..ebe77dd 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -505,8 +505,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
rdev->dev.dma_mask = &rdev->dma_mask;
rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- if ((rdev->pef & RIO_PEF_INB_DOORBELL) &&
- (rdev->dst_ops & RIO_DST_OPS_DOORBELL))
+ if (rdev->dst_ops & RIO_DST_OPS_DOORBELL)
rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
0, 0xffff);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 118eb21..c7fd2c0 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -249,6 +249,12 @@ config REGULATOR_TPS6507X
three step-down converters and two general-purpose LDO voltage regulators.
It supports TI's software based Class-2 SmartReflex implementation.
+config REGULATOR_TPS65912
+ tristate "TI TPS65912 Power regulator"
+ depends on (MFD_TPS65912_I2C || MFD_TPS65912_SPI)
+ help
+ This driver supports TPS65912 voltage regulator chip.
+
config REGULATOR_88PM8607
bool "Marvell 88PM8607 Power regulators"
depends on MFD_88PM860X=y
@@ -304,5 +310,12 @@ config REGULATOR_TPS65910
help
This driver supports TPS65910 voltage regulator chips.
+config REGULATOR_AAT2870
+ tristate "AnalogicTech AAT2870 Regulators"
+ depends on MFD_AAT2870_CORE
+ help
+ If you have a AnalogicTech AAT2870 say Y to enable the
+ regulator driver.
+
endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 3932d2e..040d5aa 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -38,10 +38,12 @@ obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
+obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o
obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
+obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
new file mode 100644
index 0000000..cd41045
--- /dev/null
+++ b/drivers/regulator/aat2870-regulator.c
@@ -0,0 +1,232 @@
+/*
+ * linux/drivers/regulator/aat2870-regulator.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ * Author: Jin Park <jinyoungp@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/aat2870.h>
+
+struct aat2870_regulator {
+ struct platform_device *pdev;
+ struct regulator_desc desc;
+
+ const int *voltages; /* uV */
+
+ int min_uV;
+ int max_uV;
+
+ u8 enable_addr;
+ u8 enable_shift;
+ u8 enable_mask;
+
+ u8 voltage_addr;
+ u8 voltage_shift;
+ u8 voltage_mask;
+};
+
+static int aat2870_ldo_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+
+ return ri->voltages[selector];
+}
+
+static int aat2870_ldo_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+ struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
+
+ return aat2870->update(aat2870, ri->voltage_addr, ri->voltage_mask,
+ (selector << ri->voltage_shift) & ri->voltage_mask);
+}
+
+static int aat2870_ldo_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+ struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
+ u8 val;
+ int ret;
+
+ ret = aat2870->read(aat2870, ri->voltage_addr, &val);
+ if (ret)
+ return ret;
+
+ return (val & ri->voltage_mask) >> ri->voltage_shift;
+}
+
+static int aat2870_ldo_enable(struct regulator_dev *rdev)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+ struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
+
+ return aat2870->update(aat2870, ri->enable_addr, ri->enable_mask,
+ ri->enable_mask);
+}
+
+static int aat2870_ldo_disable(struct regulator_dev *rdev)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+ struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
+
+ return aat2870->update(aat2870, ri->enable_addr, ri->enable_mask, 0);
+}
+
+static int aat2870_ldo_is_enabled(struct regulator_dev *rdev)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+ struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
+ u8 val;
+ int ret;
+
+ ret = aat2870->read(aat2870, ri->enable_addr, &val);
+ if (ret)
+ return ret;
+
+ return val & ri->enable_mask ? 1 : 0;
+}
+
+static struct regulator_ops aat2870_ldo_ops = {
+ .list_voltage = aat2870_ldo_list_voltage,
+ .set_voltage_sel = aat2870_ldo_set_voltage_sel,
+ .get_voltage_sel = aat2870_ldo_get_voltage_sel,
+ .enable = aat2870_ldo_enable,
+ .disable = aat2870_ldo_disable,
+ .is_enabled = aat2870_ldo_is_enabled,
+};
+
+static const int aat2870_ldo_voltages[] = {
+ 1200000, 1300000, 1500000, 1600000,
+ 1800000, 2000000, 2200000, 2500000,
+ 2600000, 2700000, 2800000, 2900000,
+ 3000000, 3100000, 3200000, 3300000,
+};
+
+#define AAT2870_LDO(ids) \
+ { \
+ .desc = { \
+ .name = #ids, \
+ .id = AAT2870_ID_##ids, \
+ .n_voltages = ARRAY_SIZE(aat2870_ldo_voltages), \
+ .ops = &aat2870_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ .voltages = aat2870_ldo_voltages, \
+ .min_uV = 1200000, \
+ .max_uV = 3300000, \
+ }
+
+static struct aat2870_regulator aat2870_regulators[] = {
+ AAT2870_LDO(LDOA),
+ AAT2870_LDO(LDOB),
+ AAT2870_LDO(LDOC),
+ AAT2870_LDO(LDOD),
+};
+
+static struct aat2870_regulator *aat2870_get_regulator(int id)
+{
+ struct aat2870_regulator *ri = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aat2870_regulators); i++) {
+ ri = &aat2870_regulators[i];
+ if (ri->desc.id == id)
+ break;
+ }
+
+ if (!ri)
+ return NULL;
+
+ ri->enable_addr = AAT2870_LDO_EN;
+ ri->enable_shift = id - AAT2870_ID_LDOA;
+ ri->enable_mask = 0x1 << ri->enable_shift;
+
+ ri->voltage_addr = (id - AAT2870_ID_LDOA) / 2 ?
+ AAT2870_LDO_CD : AAT2870_LDO_AB;
+ ri->voltage_shift = (id - AAT2870_ID_LDOA) % 2 ? 0 : 4;
+ ri->voltage_mask = 0xF << ri->voltage_shift;
+
+ return ri;
+}
+
+static int aat2870_regulator_probe(struct platform_device *pdev)
+{
+ struct aat2870_regulator *ri;
+ struct regulator_dev *rdev;
+
+ ri = aat2870_get_regulator(pdev->id);
+ if (!ri) {
+ dev_err(&pdev->dev, "Invalid device ID, %d\n", pdev->id);
+ return -EINVAL;
+ }
+ ri->pdev = pdev;
+
+ rdev = regulator_register(&ri->desc, &pdev->dev,
+ pdev->dev.platform_data, ri);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "Failed to register regulator %s\n",
+ ri->desc.name);
+ return PTR_ERR(rdev);
+ }
+ platform_set_drvdata(pdev, rdev);
+
+ return 0;
+}
+
+static int __devexit aat2870_regulator_remove(struct platform_device *pdev)
+{
+ struct regulator_dev *rdev = platform_get_drvdata(pdev);
+
+ regulator_unregister(rdev);
+ return 0;
+}
+
+static struct platform_driver aat2870_regulator_driver = {
+ .driver = {
+ .name = "aat2870-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = aat2870_regulator_probe,
+ .remove = __devexit_p(aat2870_regulator_remove),
+};
+
+static int __init aat2870_regulator_init(void)
+{
+ return platform_driver_register(&aat2870_regulator_driver);
+}
+subsys_initcall(aat2870_regulator_init);
+
+static void __exit aat2870_regulator_exit(void)
+{
+ platform_driver_unregister(&aat2870_regulator_driver);
+}
+module_exit(aat2870_regulator_exit);
+
+MODULE_DESCRIPTION("AnalogicTech AAT2870 Regulator");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jin Park <jinyoungp@nvidia.com>");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d3e3879..d8e6a42 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -20,6 +20,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/slab.h>
+#include <linux/async.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/suspend.h>
@@ -33,6 +34,8 @@
#include "dummy.h"
+#define rdev_crit(rdev, fmt, ...) \
+ pr_crit("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
#define rdev_err(rdev, fmt, ...) \
pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
#define rdev_warn(rdev, fmt, ...) \
@@ -78,11 +81,13 @@ struct regulator {
char *supply_name;
struct device_attribute dev_attr;
struct regulator_dev *rdev;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif
};
static int _regulator_is_enabled(struct regulator_dev *rdev);
-static int _regulator_disable(struct regulator_dev *rdev,
- struct regulator_dev **supply_rdev_ptr);
+static int _regulator_disable(struct regulator_dev *rdev);
static int _regulator_get_voltage(struct regulator_dev *rdev);
static int _regulator_get_current_limit(struct regulator_dev *rdev);
static unsigned int _regulator_get_mode(struct regulator_dev *rdev);
@@ -90,6 +95,9 @@ static void _notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data);
static int _regulator_do_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV);
+static struct regulator *create_regulator(struct regulator_dev *rdev,
+ struct device *dev,
+ const char *supply_name);
static const char *rdev_get_name(struct regulator_dev *rdev)
{
@@ -143,8 +151,11 @@ static int regulator_check_voltage(struct regulator_dev *rdev,
if (*min_uV < rdev->constraints->min_uV)
*min_uV = rdev->constraints->min_uV;
- if (*min_uV > *max_uV)
+ if (*min_uV > *max_uV) {
+ rdev_err(rdev, "unsupportable voltage range: %d-%duV\n",
+ *min_uV, *max_uV);
return -EINVAL;
+ }
return 0;
}
@@ -197,8 +208,11 @@ static int regulator_check_current_limit(struct regulator_dev *rdev,
if (*min_uA < rdev->constraints->min_uA)
*min_uA = rdev->constraints->min_uA;
- if (*min_uA > *max_uA)
+ if (*min_uA > *max_uA) {
+ rdev_err(rdev, "unsupportable current range: %d-%duA\n",
+ *min_uA, *max_uA);
return -EINVAL;
+ }
return 0;
}
@@ -213,6 +227,7 @@ static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode)
case REGULATOR_MODE_STANDBY:
break;
default:
+ rdev_err(rdev, "invalid mode %x specified\n", *mode);
return -EINVAL;
}
@@ -779,7 +794,6 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
if (ret < 0) {
rdev_err(rdev, "failed to apply %duV constraint\n",
rdev->constraints->min_uV);
- rdev->constraints = NULL;
return ret;
}
}
@@ -882,7 +896,6 @@ static int set_machine_constraints(struct regulator_dev *rdev,
ret = suspend_prepare(rdev, rdev->constraints->initial_state);
if (ret < 0) {
rdev_err(rdev, "failed to set suspend state\n");
- rdev->constraints = NULL;
goto out;
}
}
@@ -909,13 +922,15 @@ static int set_machine_constraints(struct regulator_dev *rdev,
ret = ops->enable(rdev);
if (ret < 0) {
rdev_err(rdev, "failed to enable\n");
- rdev->constraints = NULL;
goto out;
}
}
print_constraints(rdev);
+ return 0;
out:
+ kfree(rdev->constraints);
+ rdev->constraints = NULL;
return ret;
}
@@ -929,21 +944,20 @@ out:
* core if it's child is enabled.
*/
static int set_supply(struct regulator_dev *rdev,
- struct regulator_dev *supply_rdev)
+ struct regulator_dev *supply_rdev)
{
int err;
- err = sysfs_create_link(&rdev->dev.kobj, &supply_rdev->dev.kobj,
- "supply");
- if (err) {
- rdev_err(rdev, "could not add device link %s err %d\n",
- supply_rdev->dev.kobj.name, err);
- goto out;
+ rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev));
+
+ rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
+ if (IS_ERR(rdev->supply)) {
+ err = PTR_ERR(rdev->supply);
+ rdev->supply = NULL;
+ return err;
}
- rdev->supply = supply_rdev;
- list_add(&rdev->slist, &supply_rdev->supply_list);
-out:
- return err;
+
+ return 0;
}
/**
@@ -1032,7 +1046,7 @@ static void unset_regulator_supplies(struct regulator_dev *rdev)
}
}
-#define REG_STR_SIZE 32
+#define REG_STR_SIZE 64
static struct regulator *create_regulator(struct regulator_dev *rdev,
struct device *dev,
@@ -1052,8 +1066,9 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
if (dev) {
/* create a 'requested_microamps_name' sysfs entry */
- size = scnprintf(buf, REG_STR_SIZE, "microamps_requested_%s",
- supply_name);
+ size = scnprintf(buf, REG_STR_SIZE,
+ "microamps_requested_%s-%s",
+ dev_name(dev), supply_name);
if (size >= REG_STR_SIZE)
goto overflow_err;
@@ -1088,7 +1103,28 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
dev->kobj.name, err);
goto link_name_err;
}
+ } else {
+ regulator->supply_name = kstrdup(supply_name, GFP_KERNEL);
+ if (regulator->supply_name == NULL)
+ goto attr_err;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ regulator->debugfs = debugfs_create_dir(regulator->supply_name,
+ rdev->debugfs);
+ if (IS_ERR_OR_NULL(regulator->debugfs)) {
+ rdev_warn(rdev, "Failed to create debugfs directory\n");
+ regulator->debugfs = NULL;
+ } else {
+ debugfs_create_u32("uA_load", 0444, regulator->debugfs,
+ &regulator->uA_load);
+ debugfs_create_u32("min_uV", 0444, regulator->debugfs,
+ &regulator->min_uV);
+ debugfs_create_u32("max_uV", 0444, regulator->debugfs,
+ &regulator->max_uV);
}
+#endif
+
mutex_unlock(&rdev->mutex);
return regulator;
link_name_err:
@@ -1267,13 +1303,17 @@ void regulator_put(struct regulator *regulator)
mutex_lock(&regulator_list_mutex);
rdev = regulator->rdev;
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(regulator->debugfs);
+#endif
+
/* remove any sysfs entries */
if (regulator->dev) {
sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
- kfree(regulator->supply_name);
device_remove_file(regulator->dev, &regulator->dev_attr);
kfree(regulator->dev_attr.attr.name);
}
+ kfree(regulator->supply_name);
list_del(&regulator->list);
kfree(regulator);
@@ -1301,19 +1341,6 @@ static int _regulator_enable(struct regulator_dev *rdev)
{
int ret, delay;
- if (rdev->use_count == 0) {
- /* do we need to enable the supply regulator first */
- if (rdev->supply) {
- mutex_lock(&rdev->supply->mutex);
- ret = _regulator_enable(rdev->supply);
- mutex_unlock(&rdev->supply->mutex);
- if (ret < 0) {
- rdev_err(rdev, "failed to enable: %d\n", ret);
- return ret;
- }
- }
- }
-
/* check voltage and requested load before enabling */
if (rdev->constraints &&
(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS))
@@ -1388,19 +1415,27 @@ int regulator_enable(struct regulator *regulator)
struct regulator_dev *rdev = regulator->rdev;
int ret = 0;
+ if (rdev->supply) {
+ ret = regulator_enable(rdev->supply);
+ if (ret != 0)
+ return ret;
+ }
+
mutex_lock(&rdev->mutex);
ret = _regulator_enable(rdev);
mutex_unlock(&rdev->mutex);
+
+ if (ret != 0)
+ regulator_disable(rdev->supply);
+
return ret;
}
EXPORT_SYMBOL_GPL(regulator_enable);
/* locks held by regulator_disable() */
-static int _regulator_disable(struct regulator_dev *rdev,
- struct regulator_dev **supply_rdev_ptr)
+static int _regulator_disable(struct regulator_dev *rdev)
{
int ret = 0;
- *supply_rdev_ptr = NULL;
if (WARN(rdev->use_count <= 0,
"unbalanced disables for %s\n", rdev_get_name(rdev)))
@@ -1427,9 +1462,6 @@ static int _regulator_disable(struct regulator_dev *rdev,
NULL);
}
- /* decrease our supplies ref count and disable if required */
- *supply_rdev_ptr = rdev->supply;
-
rdev->use_count = 0;
} else if (rdev->use_count > 1) {
@@ -1440,6 +1472,7 @@ static int _regulator_disable(struct regulator_dev *rdev,
rdev->use_count--;
}
+
return ret;
}
@@ -1458,29 +1491,21 @@ static int _regulator_disable(struct regulator_dev *rdev,
int regulator_disable(struct regulator *regulator)
{
struct regulator_dev *rdev = regulator->rdev;
- struct regulator_dev *supply_rdev = NULL;
int ret = 0;
mutex_lock(&rdev->mutex);
- ret = _regulator_disable(rdev, &supply_rdev);
+ ret = _regulator_disable(rdev);
mutex_unlock(&rdev->mutex);
- /* decrease our supplies ref count and disable if required */
- while (supply_rdev != NULL) {
- rdev = supply_rdev;
-
- mutex_lock(&rdev->mutex);
- _regulator_disable(rdev, &supply_rdev);
- mutex_unlock(&rdev->mutex);
- }
+ if (ret == 0 && rdev->supply)
+ regulator_disable(rdev->supply);
return ret;
}
EXPORT_SYMBOL_GPL(regulator_disable);
/* locks held by regulator_force_disable() */
-static int _regulator_force_disable(struct regulator_dev *rdev,
- struct regulator_dev **supply_rdev_ptr)
+static int _regulator_force_disable(struct regulator_dev *rdev)
{
int ret = 0;
@@ -1497,10 +1522,6 @@ static int _regulator_force_disable(struct regulator_dev *rdev,
REGULATOR_EVENT_DISABLE, NULL);
}
- /* decrease our supplies ref count and disable if required */
- *supply_rdev_ptr = rdev->supply;
-
- rdev->use_count = 0;
return ret;
}
@@ -1516,16 +1537,16 @@ static int _regulator_force_disable(struct regulator_dev *rdev,
int regulator_force_disable(struct regulator *regulator)
{
struct regulator_dev *rdev = regulator->rdev;
- struct regulator_dev *supply_rdev = NULL;
int ret;
mutex_lock(&rdev->mutex);
regulator->uA_load = 0;
- ret = _regulator_force_disable(rdev, &supply_rdev);
+ ret = _regulator_force_disable(regulator->rdev);
mutex_unlock(&rdev->mutex);
- if (supply_rdev)
- regulator_disable(get_device_regulator(rdev_get_dev(supply_rdev)));
+ if (rdev->supply)
+ while (rdev->open_count--)
+ regulator_disable(rdev->supply);
return ret;
}
@@ -2136,7 +2157,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
/* get input voltage */
input_uV = 0;
if (rdev->supply)
- input_uV = _regulator_get_voltage(rdev->supply);
+ input_uV = regulator_get_voltage(rdev->supply);
if (input_uV <= 0)
input_uV = rdev->constraints->input_uV;
if (input_uV <= 0) {
@@ -2206,17 +2227,8 @@ EXPORT_SYMBOL_GPL(regulator_unregister_notifier);
static void _notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data)
{
- struct regulator_dev *_rdev;
-
/* call rdev chain first */
blocking_notifier_call_chain(&rdev->notifier, event, NULL);
-
- /* now notify regulator we supply */
- list_for_each_entry(_rdev, &rdev->supply_list, slist) {
- mutex_lock(&_rdev->mutex);
- _notifier_call_chain(_rdev, event, data);
- mutex_unlock(&_rdev->mutex);
- }
}
/**
@@ -2264,6 +2276,13 @@ err:
}
EXPORT_SYMBOL_GPL(regulator_bulk_get);
+static void regulator_bulk_enable_async(void *data, async_cookie_t cookie)
+{
+ struct regulator_bulk_data *bulk = data;
+
+ bulk->ret = regulator_enable(bulk->consumer);
+}
+
/**
* regulator_bulk_enable - enable multiple regulator consumers
*
@@ -2279,21 +2298,33 @@ EXPORT_SYMBOL_GPL(regulator_bulk_get);
int regulator_bulk_enable(int num_consumers,
struct regulator_bulk_data *consumers)
{
+ LIST_HEAD(async_domain);
int i;
- int ret;
+ int ret = 0;
+
+ for (i = 0; i < num_consumers; i++)
+ async_schedule_domain(regulator_bulk_enable_async,
+ &consumers[i], &async_domain);
+
+ async_synchronize_full_domain(&async_domain);
+ /* If any consumer failed we need to unwind any that succeeded */
for (i = 0; i < num_consumers; i++) {
- ret = regulator_enable(consumers[i].consumer);
- if (ret != 0)
+ if (consumers[i].ret != 0) {
+ ret = consumers[i].ret;
goto err;
+ }
}
return 0;
err:
- pr_err("Failed to enable %s: %d\n", consumers[i].supply, ret);
- for (--i; i >= 0; --i)
- regulator_disable(consumers[i].consumer);
+ for (i = 0; i < num_consumers; i++)
+ if (consumers[i].ret == 0)
+ regulator_disable(consumers[i].consumer);
+ else
+ pr_err("Failed to enable %s: %d\n",
+ consumers[i].supply, consumers[i].ret);
return ret;
}
@@ -2589,9 +2620,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
rdev->owner = regulator_desc->owner;
rdev->desc = regulator_desc;
INIT_LIST_HEAD(&rdev->consumer_list);
- INIT_LIST_HEAD(&rdev->supply_list);
INIT_LIST_HEAD(&rdev->list);
- INIT_LIST_HEAD(&rdev->slist);
BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier);
/* preform any regulator specific init */
@@ -2672,6 +2701,7 @@ unset_supplies:
unset_regulator_supplies(rdev);
scrub:
+ kfree(rdev->constraints);
device_unregister(&rdev->dev);
/* device core frees rdev */
rdev = ERR_PTR(ret);
@@ -2703,7 +2733,7 @@ void regulator_unregister(struct regulator_dev *rdev)
unset_regulator_supplies(rdev);
list_del(&rdev->list);
if (rdev->supply)
- sysfs_remove_link(&rdev->dev.kobj, "supply");
+ regulator_put(rdev->supply);
device_unregister(&rdev->dev);
kfree(rdev->constraints);
mutex_unlock(&regulator_list_mutex);
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index c7410bd..f6ef669 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -36,6 +36,29 @@ static struct regulator_desc dummy_desc = {
.ops = &dummy_ops,
};
+static int __devinit dummy_regulator_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ dummy_regulator_rdev = regulator_register(&dummy_desc, NULL,
+ &dummy_initdata, NULL);
+ if (IS_ERR(dummy_regulator_rdev)) {
+ ret = PTR_ERR(dummy_regulator_rdev);
+ pr_err("Failed to register regulator: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver dummy_regulator_driver = {
+ .probe = dummy_regulator_probe,
+ .driver = {
+ .name = "reg-dummy",
+ .owner = THIS_MODULE,
+ },
+};
+
static struct platform_device *dummy_pdev;
void __init regulator_dummy_init(void)
@@ -55,12 +78,9 @@ void __init regulator_dummy_init(void)
return;
}
- dummy_regulator_rdev = regulator_register(&dummy_desc, NULL,
- &dummy_initdata, NULL);
- if (IS_ERR(dummy_regulator_rdev)) {
- ret = PTR_ERR(dummy_regulator_rdev);
- pr_err("Failed to register regulator: %d\n", ret);
+ ret = platform_driver_register(&dummy_regulator_driver);
+ if (ret != 0) {
+ pr_err("Failed to register dummy regulator driver: %d\n", ret);
platform_device_unregister(dummy_pdev);
- return;
}
}
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 55dd4e6..66d2d60 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -49,7 +49,6 @@
#define TPS65911_REG_LDO7 11
#define TPS65911_REG_LDO8 12
-#define TPS65910_NUM_REGULATOR 13
#define TPS65910_SUPPLY_STATE_ENABLED 0x1
/* supported VIO voltages in milivolts */
@@ -264,11 +263,12 @@ static struct tps_info tps65911_regs[] = {
};
struct tps65910_reg {
- struct regulator_desc desc[TPS65910_NUM_REGULATOR];
+ struct regulator_desc *desc;
struct tps65910 *mfd;
- struct regulator_dev *rdev[TPS65910_NUM_REGULATOR];
- struct tps_info *info[TPS65910_NUM_REGULATOR];
+ struct regulator_dev **rdev;
+ struct tps_info **info;
struct mutex mutex;
+ int num_regulators;
int mode;
int (*get_ctrl_reg)(int);
};
@@ -759,8 +759,13 @@ static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
mult = (selector / VDD1_2_NUM_VOLTS) + 1;
volt = VDD1_2_MIN_VOLT +
(selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET;
+ break;
case TPS65911_REG_VDDCTRL:
volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET);
+ break;
+ default:
+ BUG();
+ return -EINVAL;
}
return volt * 100 * mult;
@@ -897,16 +902,42 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
switch(tps65910_chip_id(tps65910)) {
case TPS65910:
pmic->get_ctrl_reg = &tps65910_get_ctrl_register;
+ pmic->num_regulators = ARRAY_SIZE(tps65910_regs);
info = tps65910_regs;
+ break;
case TPS65911:
pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
+ pmic->num_regulators = ARRAY_SIZE(tps65911_regs);
info = tps65911_regs;
+ break;
default:
pr_err("Invalid tps chip version\n");
+ kfree(pmic);
return -ENODEV;
}
- for (i = 0; i < TPS65910_NUM_REGULATOR; i++, info++, reg_data++) {
+ pmic->desc = kcalloc(pmic->num_regulators,
+ sizeof(struct regulator_desc), GFP_KERNEL);
+ if (!pmic->desc) {
+ err = -ENOMEM;
+ goto err_free_pmic;
+ }
+
+ pmic->info = kcalloc(pmic->num_regulators,
+ sizeof(struct tps_info *), GFP_KERNEL);
+ if (!pmic->info) {
+ err = -ENOMEM;
+ goto err_free_desc;
+ }
+
+ pmic->rdev = kcalloc(pmic->num_regulators,
+ sizeof(struct regulator_dev *), GFP_KERNEL);
+ if (!pmic->rdev) {
+ err = -ENOMEM;
+ goto err_free_info;
+ }
+
+ for (i = 0; i < pmic->num_regulators; i++, info++, reg_data++) {
/* Register the regulators */
pmic->info[i] = info;
@@ -938,7 +969,7 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
"failed to register %s regulator\n",
pdev->name);
err = PTR_ERR(rdev);
- goto err;
+ goto err_unregister_regulator;
}
/* Save regulator for cleanup */
@@ -946,23 +977,31 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
}
return 0;
-err:
+err_unregister_regulator:
while (--i >= 0)
regulator_unregister(pmic->rdev[i]);
-
+ kfree(pmic->rdev);
+err_free_info:
+ kfree(pmic->info);
+err_free_desc:
+ kfree(pmic->desc);
+err_free_pmic:
kfree(pmic);
return err;
}
static int __devexit tps65910_remove(struct platform_device *pdev)
{
- struct tps65910_reg *tps65910_reg = platform_get_drvdata(pdev);
+ struct tps65910_reg *pmic = platform_get_drvdata(pdev);
int i;
- for (i = 0; i < TPS65910_NUM_REGULATOR; i++)
- regulator_unregister(tps65910_reg->rdev[i]);
+ for (i = 0; i < pmic->num_regulators; i++)
+ regulator_unregister(pmic->rdev[i]);
- kfree(tps65910_reg);
+ kfree(pmic->rdev);
+ kfree(pmic->info);
+ kfree(pmic->desc);
+ kfree(pmic);
return 0;
}
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
new file mode 100644
index 0000000..3a9313e
--- /dev/null
+++ b/drivers/regulator/tps65912-regulator.c
@@ -0,0 +1,800 @@
+/*
+ * tps65912.c -- TI tps65912
+ *
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This driver is based on wm8350 implementation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/mfd/tps65912.h>
+
+/* DCDC's */
+#define TPS65912_REG_DCDC1 0
+#define TPS65912_REG_DCDC2 1
+#define TPS65912_REG_DCDC3 2
+#define TPS65912_REG_DCDC4 3
+
+/* LDOs */
+#define TPS65912_REG_LDO1 4
+#define TPS65912_REG_LDO2 5
+#define TPS65912_REG_LDO3 6
+#define TPS65912_REG_LDO4 7
+#define TPS65912_REG_LDO5 8
+#define TPS65912_REG_LDO6 9
+#define TPS65912_REG_LDO7 10
+#define TPS65912_REG_LDO8 11
+#define TPS65912_REG_LDO9 12
+#define TPS65912_REG_LDO10 13
+
+#define TPS65912_MAX_REG_ID TPS65912_REG_LDO_10
+
+/* Number of step-down converters available */
+#define TPS65912_NUM_DCDC 4
+
+/* Number of LDO voltage regulators available */
+#define TPS65912_NUM_LDO 10
+
+/* Number of total regulators available */
+#define TPS65912_NUM_REGULATOR (TPS65912_NUM_DCDC + TPS65912_NUM_LDO)
+
+#define TPS65912_REG_ENABLED 0x80
+#define OP_SELREG_MASK 0x40
+#define OP_SELREG_SHIFT 6
+
+struct tps_info {
+ const char *name;
+};
+
+static struct tps_info tps65912_regs[] = {
+ {
+ .name = "DCDC1",
+ },
+ {
+ .name = "DCDC2",
+ },
+ {
+ .name = "DCDC3",
+ },
+ {
+ .name = "DCDC4",
+ },
+ {
+ .name = "LDO1",
+ },
+ {
+ .name = "LDO2",
+ },
+ {
+ .name = "LDO3",
+ },
+ {
+ .name = "LDO4",
+ },
+ {
+ .name = "LDO5",
+ },
+ {
+ .name = "LDO6",
+ },
+ {
+ .name = "LDO7",
+ },
+ {
+ .name = "LDO8",
+ },
+ {
+ .name = "LDO9",
+ },
+ {
+ .name = "LDO10",
+ },
+};
+
+struct tps65912_reg {
+ struct regulator_desc desc[TPS65912_NUM_REGULATOR];
+ struct tps65912 *mfd;
+ struct regulator_dev *rdev[TPS65912_NUM_REGULATOR];
+ struct tps_info *info[TPS65912_NUM_REGULATOR];
+ /* for read/write access */
+ struct mutex io_lock;
+ int mode;
+ int (*get_ctrl_reg)(int);
+ int dcdc1_range;
+ int dcdc2_range;
+ int dcdc3_range;
+ int dcdc4_range;
+ int pwm_mode_reg;
+ int eco_reg;
+};
+
+static int tps65912_get_range(struct tps65912_reg *pmic, int id)
+{
+ struct tps65912 *mfd = pmic->mfd;
+
+ if (id > TPS65912_REG_DCDC4)
+ return 0;
+
+ switch (id) {
+ case TPS65912_REG_DCDC1:
+ pmic->dcdc1_range = tps65912_reg_read(mfd,
+ TPS65912_DCDC1_LIMIT);
+ if (pmic->dcdc1_range < 0)
+ return pmic->dcdc1_range;
+ pmic->dcdc1_range = (pmic->dcdc1_range &
+ DCDC_LIMIT_RANGE_MASK) >> DCDC_LIMIT_RANGE_SHIFT;
+ return pmic->dcdc1_range;
+ case TPS65912_REG_DCDC2:
+ pmic->dcdc2_range = tps65912_reg_read(mfd,
+ TPS65912_DCDC2_LIMIT);
+ if (pmic->dcdc2_range < 0)
+ return pmic->dcdc2_range;
+ pmic->dcdc2_range = (pmic->dcdc2_range &
+ DCDC_LIMIT_RANGE_MASK) >> DCDC_LIMIT_RANGE_SHIFT;
+ return pmic->dcdc2_range;
+ case TPS65912_REG_DCDC3:
+ pmic->dcdc3_range = tps65912_reg_read(mfd,
+ TPS65912_DCDC3_LIMIT);
+ if (pmic->dcdc3_range < 0)
+ return pmic->dcdc3_range;
+ pmic->dcdc3_range = (pmic->dcdc3_range &
+ DCDC_LIMIT_RANGE_MASK) >> DCDC_LIMIT_RANGE_SHIFT;
+ return pmic->dcdc3_range;
+ case TPS65912_REG_DCDC4:
+ pmic->dcdc4_range = tps65912_reg_read(mfd,
+ TPS65912_DCDC4_LIMIT);
+ if (pmic->dcdc4_range < 0)
+ return pmic->dcdc4_range;
+ pmic->dcdc4_range = (pmic->dcdc4_range &
+ DCDC_LIMIT_RANGE_MASK) >> DCDC_LIMIT_RANGE_SHIFT;
+ return pmic->dcdc4_range;
+ default:
+ return 0;
+ }
+}
+
+static unsigned long tps65912_vsel_to_uv_range0(u8 vsel)
+{
+ unsigned long uv;
+
+ uv = ((vsel * 12500) + 500000);
+ return uv;
+}
+
+static unsigned long tps65912_vsel_to_uv_range1(u8 vsel)
+{
+ unsigned long uv;
+
+ uv = ((vsel * 12500) + 700000);
+ return uv;
+}
+
+static unsigned long tps65912_vsel_to_uv_range2(u8 vsel)
+{
+ unsigned long uv;
+
+ uv = ((vsel * 25000) + 500000);
+ return uv;
+}
+
+static unsigned long tps65912_vsel_to_uv_range3(u8 vsel)
+{
+ unsigned long uv;
+
+ if (vsel == 0x3f)
+ uv = 3800000;
+ else
+ uv = ((vsel * 50000) + 500000);
+
+ return uv;
+}
+
+static unsigned long tps65912_vsel_to_uv_ldo(u8 vsel)
+{
+ unsigned long uv = 0;
+
+ if (vsel <= 32)
+ uv = ((vsel * 25000) + 800000);
+ else if (vsel > 32 && vsel <= 60)
+ uv = (((vsel - 32) * 50000) + 1600000);
+ else if (vsel > 60)
+ uv = (((vsel - 60) * 100000) + 3000000);
+
+ return uv;
+}
+
+static int tps65912_get_ctrl_register(int id)
+{
+ switch (id) {
+ case TPS65912_REG_DCDC1:
+ return TPS65912_DCDC1_AVS;
+ case TPS65912_REG_DCDC2:
+ return TPS65912_DCDC2_AVS;
+ case TPS65912_REG_DCDC3:
+ return TPS65912_DCDC3_AVS;
+ case TPS65912_REG_DCDC4:
+ return TPS65912_DCDC4_AVS;
+ case TPS65912_REG_LDO1:
+ return TPS65912_LDO1_AVS;
+ case TPS65912_REG_LDO2:
+ return TPS65912_LDO2_AVS;
+ case TPS65912_REG_LDO3:
+ return TPS65912_LDO3_AVS;
+ case TPS65912_REG_LDO4:
+ return TPS65912_LDO4_AVS;
+ case TPS65912_REG_LDO5:
+ return TPS65912_LDO5;
+ case TPS65912_REG_LDO6:
+ return TPS65912_LDO6;
+ case TPS65912_REG_LDO7:
+ return TPS65912_LDO7;
+ case TPS65912_REG_LDO8:
+ return TPS65912_LDO8;
+ case TPS65912_REG_LDO9:
+ return TPS65912_LDO9;
+ case TPS65912_REG_LDO10:
+ return TPS65912_LDO10;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tps65912_get_dcdc_sel_register(struct tps65912_reg *pmic, int id)
+{
+ struct tps65912 *mfd = pmic->mfd;
+ int opvsel = 0, sr = 0;
+ u8 reg = 0;
+
+ if (id < TPS65912_REG_DCDC1 || id > TPS65912_REG_DCDC4)
+ return -EINVAL;
+
+ switch (id) {
+ case TPS65912_REG_DCDC1:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC1_OP);
+ sr = ((opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT);
+ if (sr)
+ reg = TPS65912_DCDC1_AVS;
+ else
+ reg = TPS65912_DCDC1_OP;
+ break;
+ case TPS65912_REG_DCDC2:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC2_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_DCDC2_AVS;
+ else
+ reg = TPS65912_DCDC2_OP;
+ break;
+ case TPS65912_REG_DCDC3:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC3_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_DCDC3_AVS;
+ else
+ reg = TPS65912_DCDC3_OP;
+ break;
+ case TPS65912_REG_DCDC4:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC4_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_DCDC4_AVS;
+ else
+ reg = TPS65912_DCDC4_OP;
+ break;
+ }
+ return reg;
+}
+
+static int tps65912_get_ldo_sel_register(struct tps65912_reg *pmic, int id)
+{
+ struct tps65912 *mfd = pmic->mfd;
+ int opvsel = 0, sr = 0;
+ u8 reg = 0;
+
+ if (id < TPS65912_REG_LDO1 || id > TPS65912_REG_LDO10)
+ return -EINVAL;
+
+ switch (id) {
+ case TPS65912_REG_LDO1:
+ opvsel = tps65912_reg_read(mfd, TPS65912_LDO1_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_LDO1_AVS;
+ else
+ reg = TPS65912_LDO1_OP;
+ break;
+ case TPS65912_REG_LDO2:
+ opvsel = tps65912_reg_read(mfd, TPS65912_LDO2_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_LDO2_AVS;
+ else
+ reg = TPS65912_LDO2_OP;
+ break;
+ case TPS65912_REG_LDO3:
+ opvsel = tps65912_reg_read(mfd, TPS65912_LDO3_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_LDO3_AVS;
+ else
+ reg = TPS65912_LDO3_OP;
+ break;
+ case TPS65912_REG_LDO4:
+ opvsel = tps65912_reg_read(mfd, TPS65912_LDO4_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_LDO4_AVS;
+ else
+ reg = TPS65912_LDO4_OP;
+ break;
+ case TPS65912_REG_LDO5:
+ reg = TPS65912_LDO5;
+ break;
+ case TPS65912_REG_LDO6:
+ reg = TPS65912_LDO6;
+ break;
+ case TPS65912_REG_LDO7:
+ reg = TPS65912_LDO7;
+ break;
+ case TPS65912_REG_LDO8:
+ reg = TPS65912_LDO8;
+ break;
+ case TPS65912_REG_LDO9:
+ reg = TPS65912_LDO9;
+ break;
+ case TPS65912_REG_LDO10:
+ reg = TPS65912_LDO10;
+ break;
+ }
+
+ return reg;
+}
+
+static int tps65912_get_mode_regiters(struct tps65912_reg *pmic, int id)
+{
+ switch (id) {
+ case TPS65912_REG_DCDC1:
+ pmic->pwm_mode_reg = TPS65912_DCDC1_CTRL;
+ pmic->eco_reg = TPS65912_DCDC1_AVS;
+ break;
+ case TPS65912_REG_DCDC2:
+ pmic->pwm_mode_reg = TPS65912_DCDC2_CTRL;
+ pmic->eco_reg = TPS65912_DCDC2_AVS;
+ break;
+ case TPS65912_REG_DCDC3:
+ pmic->pwm_mode_reg = TPS65912_DCDC3_CTRL;
+ pmic->eco_reg = TPS65912_DCDC3_AVS;
+ break;
+ case TPS65912_REG_DCDC4:
+ pmic->pwm_mode_reg = TPS65912_DCDC4_CTRL;
+ pmic->eco_reg = TPS65912_DCDC4_AVS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tps65912_reg_is_enabled(struct regulator_dev *dev)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int reg, value, id = rdev_get_id(dev);
+
+ if (id < TPS65912_REG_DCDC1 || id > TPS65912_REG_LDO10)
+ return -EINVAL;
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ value = tps65912_reg_read(mfd, reg);
+ if (value < 0)
+ return value;
+
+ return value & TPS65912_REG_ENABLED;
+}
+
+static int tps65912_reg_enable(struct regulator_dev *dev)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int id = rdev_get_id(dev);
+ int reg;
+
+ if (id < TPS65912_REG_DCDC1 || id > TPS65912_REG_LDO10)
+ return -EINVAL;
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ return tps65912_set_bits(mfd, reg, TPS65912_REG_ENABLED);
+}
+
+static int tps65912_reg_disable(struct regulator_dev *dev)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int id = rdev_get_id(dev), reg;
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ return tps65912_clear_bits(mfd, reg, TPS65912_REG_ENABLED);
+}
+
+static int tps65912_set_mode(struct regulator_dev *dev, unsigned int mode)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int pwm_mode, eco, id = rdev_get_id(dev);
+
+ tps65912_get_mode_regiters(pmic, id);
+
+ pwm_mode = tps65912_reg_read(mfd, pmic->pwm_mode_reg);
+ eco = tps65912_reg_read(mfd, pmic->eco_reg);
+
+ pwm_mode &= DCDCCTRL_DCDC_MODE_MASK;
+ eco &= DCDC_AVS_ECO_MASK;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ /* Verify if mode alredy set */
+ if (pwm_mode && !eco)
+ break;
+ tps65912_set_bits(mfd, pmic->pwm_mode_reg, DCDCCTRL_DCDC_MODE_MASK);
+ tps65912_clear_bits(mfd, pmic->eco_reg, DCDC_AVS_ECO_MASK);
+ break;
+ case REGULATOR_MODE_NORMAL:
+ case REGULATOR_MODE_IDLE:
+ if (!pwm_mode && !eco)
+ break;
+ tps65912_clear_bits(mfd, pmic->pwm_mode_reg, DCDCCTRL_DCDC_MODE_MASK);
+ tps65912_clear_bits(mfd, pmic->eco_reg, DCDC_AVS_ECO_MASK);
+ break;
+ case REGULATOR_MODE_STANDBY:
+ if (!pwm_mode && eco)
+ break;
+ tps65912_clear_bits(mfd, pmic->pwm_mode_reg, DCDCCTRL_DCDC_MODE_MASK);
+ tps65912_set_bits(mfd, pmic->eco_reg, DCDC_AVS_ECO_MASK);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned int tps65912_get_mode(struct regulator_dev *dev)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int pwm_mode, eco, mode = 0, id = rdev_get_id(dev);
+
+ tps65912_get_mode_regiters(pmic, id);
+
+ pwm_mode = tps65912_reg_read(mfd, pmic->pwm_mode_reg);
+ eco = tps65912_reg_read(mfd, pmic->eco_reg);
+
+ pwm_mode &= DCDCCTRL_DCDC_MODE_MASK;
+ eco &= DCDC_AVS_ECO_MASK;
+
+ if (pwm_mode && !eco)
+ mode = REGULATOR_MODE_FAST;
+ else if (!pwm_mode && !eco)
+ mode = REGULATOR_MODE_NORMAL;
+ else if (!pwm_mode && eco)
+ mode = REGULATOR_MODE_STANDBY;
+
+ return mode;
+}
+
+static int tps65912_get_voltage_dcdc(struct regulator_dev *dev)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int id = rdev_get_id(dev), voltage = 0, range;
+ int opvsel = 0, avsel = 0, sr, vsel;
+
+ switch (id) {
+ case TPS65912_REG_DCDC1:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC1_OP);
+ avsel = tps65912_reg_read(mfd, TPS65912_DCDC1_AVS);
+ range = pmic->dcdc1_range;
+ break;
+ case TPS65912_REG_DCDC2:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC2_OP);
+ avsel = tps65912_reg_read(mfd, TPS65912_DCDC2_AVS);
+ range = pmic->dcdc2_range;
+ break;
+ case TPS65912_REG_DCDC3:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC3_OP);
+ avsel = tps65912_reg_read(mfd, TPS65912_DCDC3_AVS);
+ range = pmic->dcdc3_range;
+ break;
+ case TPS65912_REG_DCDC4:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC4_OP);
+ avsel = tps65912_reg_read(mfd, TPS65912_DCDC4_AVS);
+ range = pmic->dcdc4_range;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ vsel = avsel;
+ else
+ vsel = opvsel;
+ vsel &= 0x3F;
+
+ switch (range) {
+ case 0:
+ /* 0.5 - 1.2875V in 12.5mV steps */
+ voltage = tps65912_vsel_to_uv_range0(vsel);
+ break;
+ case 1:
+ /* 0.7 - 1.4875V in 12.5mV steps */
+ voltage = tps65912_vsel_to_uv_range1(vsel);
+ break;
+ case 2:
+ /* 0.5 - 2.075V in 25mV steps */
+ voltage = tps65912_vsel_to_uv_range2(vsel);
+ break;
+ case 3:
+ /* 0.5 - 3.8V in 50mV steps */
+ voltage = tps65912_vsel_to_uv_range3(vsel);
+ break;
+ }
+ return voltage;
+}
+
+static int tps65912_set_voltage_dcdc(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int id = rdev_get_id(dev);
+ int value;
+ u8 reg;
+
+ reg = tps65912_get_dcdc_sel_register(pmic, id);
+ value = tps65912_reg_read(mfd, reg);
+ value &= 0xC0;
+ return tps65912_reg_write(mfd, reg, selector | value);
+}
+
+static int tps65912_get_voltage_ldo(struct regulator_dev *dev)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int id = rdev_get_id(dev);
+ int vsel = 0;
+ u8 reg;
+
+ reg = tps65912_get_ldo_sel_register(pmic, id);
+ vsel = tps65912_reg_read(mfd, reg);
+ vsel &= 0x3F;
+
+ return tps65912_vsel_to_uv_ldo(vsel);
+}
+
+static int tps65912_set_voltage_ldo(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int id = rdev_get_id(dev), reg, value;
+
+ reg = tps65912_get_ldo_sel_register(pmic, id);
+ value = tps65912_reg_read(mfd, reg);
+ value &= 0xC0;
+ return tps65912_reg_write(mfd, reg, selector | value);
+}
+
+static int tps65912_list_voltage_dcdc(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ int range, voltage = 0, id = rdev_get_id(dev);
+
+ switch (id) {
+ case TPS65912_REG_DCDC1:
+ range = pmic->dcdc1_range;
+ break;
+ case TPS65912_REG_DCDC2:
+ range = pmic->dcdc2_range;
+ break;
+ case TPS65912_REG_DCDC3:
+ range = pmic->dcdc3_range;
+ break;
+ case TPS65912_REG_DCDC4:
+ range = pmic->dcdc4_range;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (range) {
+ case 0:
+ /* 0.5 - 1.2875V in 12.5mV steps */
+ voltage = tps65912_vsel_to_uv_range0(selector);
+ break;
+ case 1:
+ /* 0.7 - 1.4875V in 12.5mV steps */
+ voltage = tps65912_vsel_to_uv_range1(selector);
+ break;
+ case 2:
+ /* 0.5 - 2.075V in 25mV steps */
+ voltage = tps65912_vsel_to_uv_range2(selector);
+ break;
+ case 3:
+ /* 0.5 - 3.8V in 50mV steps */
+ voltage = tps65912_vsel_to_uv_range3(selector);
+ break;
+ }
+ return voltage;
+}
+
+static int tps65912_list_voltage_ldo(struct regulator_dev *dev,
+ unsigned selector)
+{
+ int ldo = rdev_get_id(dev);
+
+ if (ldo < TPS65912_REG_LDO1 || ldo > TPS65912_REG_LDO10)
+ return -EINVAL;
+
+ return tps65912_vsel_to_uv_ldo(selector);
+}
+
+/* Operations permitted on DCDCx */
+static struct regulator_ops tps65912_ops_dcdc = {
+ .is_enabled = tps65912_reg_is_enabled,
+ .enable = tps65912_reg_enable,
+ .disable = tps65912_reg_disable,
+ .set_mode = tps65912_set_mode,
+ .get_mode = tps65912_get_mode,
+ .get_voltage = tps65912_get_voltage_dcdc,
+ .set_voltage_sel = tps65912_set_voltage_dcdc,
+ .list_voltage = tps65912_list_voltage_dcdc,
+};
+
+/* Operations permitted on LDOx */
+static struct regulator_ops tps65912_ops_ldo = {
+ .is_enabled = tps65912_reg_is_enabled,
+ .enable = tps65912_reg_enable,
+ .disable = tps65912_reg_disable,
+ .get_voltage = tps65912_get_voltage_ldo,
+ .set_voltage_sel = tps65912_set_voltage_ldo,
+ .list_voltage = tps65912_list_voltage_ldo,
+};
+
+static __devinit int tps65912_probe(struct platform_device *pdev)
+{
+ struct tps65912 *tps65912 = dev_get_drvdata(pdev->dev.parent);
+ struct tps_info *info;
+ struct regulator_init_data *reg_data;
+ struct regulator_dev *rdev;
+ struct tps65912_reg *pmic;
+ struct tps65912_board *pmic_plat_data;
+ int i, err;
+
+ pmic_plat_data = dev_get_platdata(tps65912->dev);
+ if (!pmic_plat_data)
+ return -EINVAL;
+
+ reg_data = pmic_plat_data->tps65912_pmic_init_data;
+
+ pmic = kzalloc(sizeof(*pmic), GFP_KERNEL);
+ if (!pmic)
+ return -ENOMEM;
+
+ mutex_init(&pmic->io_lock);
+ pmic->mfd = tps65912;
+ platform_set_drvdata(pdev, pmic);
+
+ pmic->get_ctrl_reg = &tps65912_get_ctrl_register;
+ info = tps65912_regs;
+
+ for (i = 0; i < TPS65912_NUM_REGULATOR; i++, info++, reg_data++) {
+ int range = 0;
+ /* Register the regulators */
+ pmic->info[i] = info;
+
+ pmic->desc[i].name = info->name;
+ pmic->desc[i].id = i;
+ pmic->desc[i].n_voltages = 64;
+ pmic->desc[i].ops = (i > TPS65912_REG_DCDC4 ?
+ &tps65912_ops_ldo : &tps65912_ops_dcdc);
+ pmic->desc[i].type = REGULATOR_VOLTAGE;
+ pmic->desc[i].owner = THIS_MODULE;
+ range = tps65912_get_range(pmic, i);
+ rdev = regulator_register(&pmic->desc[i],
+ tps65912->dev, reg_data, pmic);
+ if (IS_ERR(rdev)) {
+ dev_err(tps65912->dev,
+ "failed to register %s regulator\n",
+ pdev->name);
+ err = PTR_ERR(rdev);
+ goto err;
+ }
+
+ /* Save regulator for cleanup */
+ pmic->rdev[i] = rdev;
+ }
+ return 0;
+
+err:
+ while (--i >= 0)
+ regulator_unregister(pmic->rdev[i]);
+
+ kfree(pmic);
+ return err;
+}
+
+static int __devexit tps65912_remove(struct platform_device *pdev)
+{
+ struct tps65912_reg *tps65912_reg = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < TPS65912_NUM_REGULATOR; i++)
+ regulator_unregister(tps65912_reg->rdev[i]);
+
+ kfree(tps65912_reg);
+ return 0;
+}
+
+static struct platform_driver tps65912_driver = {
+ .driver = {
+ .name = "tps65912-pmic",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps65912_probe,
+ .remove = __devexit_p(tps65912_remove),
+};
+
+/**
+ * tps65912_init
+ *
+ * Module init function
+ */
+static int __init tps65912_init(void)
+{
+ return platform_driver_register(&tps65912_driver);
+}
+subsys_initcall(tps65912_init);
+
+/**
+ * tps65912_cleanup
+ *
+ * Module exit function
+ */
+static void __exit tps65912_cleanup(void)
+{
+ platform_driver_unregister(&tps65912_driver);
+}
+module_exit(tps65912_cleanup);
+
+MODULE_AUTHOR("Margarita Olaya Cabrera <magi@slimlogic.co.uk>");
+MODULE_DESCRIPTION("TPS65912 voltage regulator driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps65912-pmic");
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 87fe0f7..ee8747f 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -835,8 +835,8 @@ static struct regulator_ops twlsmps_ops = {
remap_conf) \
TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
remap_conf, TWL4030, twl4030fixed_ops)
-#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay) \
- TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
+#define TWL6030_FIXED_LDO(label, offset, mVolts, turnon_delay) \
+ TWL_FIXED_LDO(label, offset, mVolts, 0x0, turnon_delay, \
0x0, TWL6030, twl6030fixed_ops)
#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \
@@ -856,24 +856,22 @@ static struct regulator_ops twlsmps_ops = {
}, \
}
-#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \
+#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
.base = offset, \
- .id = num, \
.min_mV = min_mVolts, \
.max_mV = max_mVolts, \
.desc = { \
.name = #label, \
.id = TWL6030_REG_##label, \
- .n_voltages = (max_mVolts - min_mVolts)/100, \
+ .n_voltages = (max_mVolts - min_mVolts)/100 + 1, \
.ops = &twl6030ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
}
-#define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \
+#define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
.base = offset, \
- .id = num, \
.min_mV = min_mVolts, \
.max_mV = max_mVolts, \
.desc = { \
@@ -903,9 +901,8 @@ static struct regulator_ops twlsmps_ops = {
}, \
}
-#define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay) { \
+#define TWL6030_FIXED_RESOURCE(label, offset, turnon_delay) { \
.base = offset, \
- .id = num, \
.delay = turnon_delay, \
.desc = { \
.name = #label, \
@@ -916,9 +913,8 @@ static struct regulator_ops twlsmps_ops = {
}, \
}
-#define TWL6025_ADJUSTABLE_SMPS(label, offset, num) { \
+#define TWL6025_ADJUSTABLE_SMPS(label, offset) { \
.base = offset, \
- .id = num, \
.min_mV = 600, \
.max_mV = 2100, \
.desc = { \
@@ -961,32 +957,32 @@ static struct twlreg_info twl_regs[] = {
/* 6030 REG with base as PMC Slave Misc : 0x0030 */
/* Turnon-delay and remap configuration values for 6030 are not
verified since the specification is not public */
- TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1),
- TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2),
- TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3),
- TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4),
- TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5),
- TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7),
- TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0),
- TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0),
- TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0),
- TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0),
- TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0),
+ TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300),
+ TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300),
+ TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300),
+ TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300),
+ TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300),
+ TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300),
+ TWL6030_FIXED_LDO(VANA, 0x50, 2100, 0),
+ TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 0),
+ TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0),
+ TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0),
+ TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 0),
/* 6025 are renamed compared to 6030 versions */
- TWL6025_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300, 1),
- TWL6025_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300, 2),
- TWL6025_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300, 3),
- TWL6025_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300, 4),
- TWL6025_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300, 5),
- TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300, 7),
- TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300, 16),
- TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300, 17),
- TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300, 18),
-
- TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34, 1),
- TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10, 2),
- TWL6025_ADJUSTABLE_SMPS(VIO, 0x16, 3),
+ TWL6025_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300),
+
+ TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34),
+ TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10),
+ TWL6025_ADJUSTABLE_SMPS(VIO, 0x16),
};
static u8 twl_get_smps_offset(void)
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index a0982e8..bd3531d 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -267,23 +267,6 @@ static int wm831x_buckv_select_min_voltage(struct regulator_dev *rdev,
return vsel;
}
-static int wm831x_buckv_select_max_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- u16 vsel;
-
- if (max_uV < 600000 || max_uV > 1800000)
- return -EINVAL;
-
- vsel = ((max_uV - 600000) / 12500) + 8;
-
- if (wm831x_buckv_list_voltage(rdev, vsel) < min_uV ||
- wm831x_buckv_list_voltage(rdev, vsel) < max_uV)
- return -EINVAL;
-
- return vsel;
-}
-
static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state)
{
struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
@@ -338,28 +321,23 @@ static int wm831x_buckv_set_voltage(struct regulator_dev *rdev,
if (ret < 0)
return ret;
- /* Set the high voltage as the DVS voltage. This is optimised
- * for CPUfreq usage, most processors will keep the maximum
- * voltage constant and lower the minimum with the frequency. */
- vsel = wm831x_buckv_select_max_voltage(rdev, min_uV, max_uV);
- if (vsel < 0) {
- /* This should never happen - at worst the same vsel
- * should be chosen */
- WARN_ON(vsel < 0);
- return 0;
+ /*
+ * If this VSEL is higher than the last one we've seen then
+ * remember it as the DVS VSEL. This is optimised for CPUfreq
+ * usage where we want to get to the highest voltage very
+ * quickly.
+ */
+ if (vsel > dcdc->dvs_vsel) {
+ ret = wm831x_set_bits(wm831x, dvs_reg,
+ WM831X_DC1_DVS_VSEL_MASK,
+ dcdc->dvs_vsel);
+ if (ret == 0)
+ dcdc->dvs_vsel = vsel;
+ else
+ dev_warn(wm831x->dev,
+ "Failed to set DCDC DVS VSEL: %d\n", ret);
}
- /* Don't bother if it's the same VSEL we're already using */
- if (vsel == dcdc->on_vsel)
- return 0;
-
- ret = wm831x_set_bits(wm831x, dvs_reg, WM831X_DC1_DVS_VSEL_MASK, vsel);
- if (ret == 0)
- dcdc->dvs_vsel = vsel;
- else
- dev_warn(wm831x->dev, "Failed to set DCDC DVS VSEL: %d\n",
- ret);
-
return 0;
}
@@ -456,27 +434,6 @@ static __devinit void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
if (!pdata || !pdata->dvs_gpio)
return;
- switch (pdata->dvs_control_src) {
- case 1:
- ctrl = 2 << WM831X_DC1_DVS_SRC_SHIFT;
- break;
- case 2:
- ctrl = 3 << WM831X_DC1_DVS_SRC_SHIFT;
- break;
- default:
- dev_err(wm831x->dev, "Invalid DVS control source %d for %s\n",
- pdata->dvs_control_src, dcdc->name);
- return;
- }
-
- ret = wm831x_set_bits(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL,
- WM831X_DC1_DVS_SRC_MASK, ctrl);
- if (ret < 0) {
- dev_err(wm831x->dev, "Failed to set %s DVS source: %d\n",
- dcdc->name, ret);
- return;
- }
-
ret = gpio_request(pdata->dvs_gpio, "DCDC DVS");
if (ret < 0) {
dev_err(wm831x->dev, "Failed to get %s DVS GPIO: %d\n",
@@ -498,17 +455,57 @@ static __devinit void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
}
dcdc->dvs_gpio = pdata->dvs_gpio;
+
+ switch (pdata->dvs_control_src) {
+ case 1:
+ ctrl = 2 << WM831X_DC1_DVS_SRC_SHIFT;
+ break;
+ case 2:
+ ctrl = 3 << WM831X_DC1_DVS_SRC_SHIFT;
+ break;
+ default:
+ dev_err(wm831x->dev, "Invalid DVS control source %d for %s\n",
+ pdata->dvs_control_src, dcdc->name);
+ return;
+ }
+
+ /* If DVS_VSEL is set to the minimum value then raise it to ON_VSEL
+ * to make bootstrapping a bit smoother.
+ */
+ if (!dcdc->dvs_vsel) {
+ ret = wm831x_set_bits(wm831x,
+ dcdc->base + WM831X_DCDC_DVS_CONTROL,
+ WM831X_DC1_DVS_VSEL_MASK, dcdc->on_vsel);
+ if (ret == 0)
+ dcdc->dvs_vsel = dcdc->on_vsel;
+ else
+ dev_warn(wm831x->dev, "Failed to set DVS_VSEL: %d\n",
+ ret);
+ }
+
+ ret = wm831x_set_bits(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL,
+ WM831X_DC1_DVS_SRC_MASK, ctrl);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to set %s DVS source: %d\n",
+ dcdc->name, ret);
+ }
}
static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int id = pdev->id % ARRAY_SIZE(pdata->dcdc);
+ int id;
struct wm831x_dcdc *dcdc;
struct resource *res;
int ret, irq;
+ if (pdata && pdata->wm831x_num)
+ id = (pdata->wm831x_num * 10) + 1;
+ else
+ id = 0;
+ id = pdev->id - id;
+
dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1);
if (pdata == NULL || pdata->dcdc[id] == NULL)
@@ -545,7 +542,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
}
dcdc->on_vsel = ret & WM831X_DC1_ON_VSEL_MASK;
- ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG);
+ ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL);
if (ret < 0) {
dev_err(wm831x->dev, "Failed to read DVS VSEL: %d\n", ret);
goto err;
@@ -709,11 +706,17 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int id = pdev->id % ARRAY_SIZE(pdata->dcdc);
+ int id;
struct wm831x_dcdc *dcdc;
struct resource *res;
int ret, irq;
+ if (pdata && pdata->wm831x_num)
+ id = (pdata->wm831x_num * 10) + 1;
+ else
+ id = 0;
+ id = pdev->id - id;
+
dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1);
if (pdata == NULL || pdata->dcdc[id] == NULL)
@@ -1046,3 +1049,4 @@ MODULE_DESCRIPTION("WM831x DC-DC convertor driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm831x-buckv");
MODULE_ALIAS("platform:wm831x-buckp");
+MODULE_ALIAS("platform:wm831x-epe");
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 2220cf8..6709710 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -310,11 +310,17 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int id = pdev->id % ARRAY_SIZE(pdata->ldo);
+ int id;
struct wm831x_ldo *ldo;
struct resource *res;
int ret, irq;
+ if (pdata && pdata->wm831x_num)
+ id = (pdata->wm831x_num * 10) + 1;
+ else
+ id = 0;
+ id = pdev->id - id;
+
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
if (pdata == NULL || pdata->ldo[id] == NULL)
@@ -574,11 +580,17 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int id = pdev->id % ARRAY_SIZE(pdata->ldo);
+ int id;
struct wm831x_ldo *ldo;
struct resource *res;
int ret, irq;
+ if (pdata && pdata->wm831x_num)
+ id = (pdata->wm831x_num * 10) + 1;
+ else
+ id = 0;
+ id = pdev->id - id;
+
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
if (pdata == NULL || pdata->ldo[id] == NULL)
@@ -764,11 +776,18 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int id = pdev->id % ARRAY_SIZE(pdata->ldo);
+ int id;
struct wm831x_ldo *ldo;
struct resource *res;
int ret;
+ if (pdata && pdata->wm831x_num)
+ id = (pdata->wm831x_num * 10) + 1;
+ else
+ id = 0;
+ id = pdev->id - id;
+
+
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
if (pdata == NULL || pdata->ldo[id] == NULL)
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 35b2958..1a6a690 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -43,7 +43,7 @@ static int wm8994_ldo_enable(struct regulator_dev *rdev)
if (!ldo->enable)
return 0;
- gpio_set_value(ldo->enable, 1);
+ gpio_set_value_cansleep(ldo->enable, 1);
ldo->is_enabled = true;
return 0;
@@ -57,7 +57,7 @@ static int wm8994_ldo_disable(struct regulator_dev *rdev)
if (!ldo->enable)
return -EINVAL;
- gpio_set_value(ldo->enable, 0);
+ gpio_set_value_cansleep(ldo->enable, 0);
ldo->is_enabled = false;
return 0;
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index df68618..44e91e5 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -636,6 +636,29 @@ void rtc_irq_unregister(struct rtc_device *rtc, struct rtc_task *task)
}
EXPORT_SYMBOL_GPL(rtc_irq_unregister);
+static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled)
+{
+ /*
+ * We always cancel the timer here first, because otherwise
+ * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
+ * when we manage to start the timer before the callback
+ * returns HRTIMER_RESTART.
+ *
+ * We cannot use hrtimer_cancel() here as a running callback
+ * could be blocked on rtc->irq_task_lock and hrtimer_cancel()
+ * would spin forever.
+ */
+ if (hrtimer_try_to_cancel(&rtc->pie_timer) < 0)
+ return -1;
+
+ if (enabled) {
+ ktime_t period = ktime_set(0, NSEC_PER_SEC / rtc->irq_freq);
+
+ hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
+ }
+ return 0;
+}
+
/**
* rtc_irq_set_state - enable/disable 2^N Hz periodic IRQs
* @rtc: the rtc device
@@ -651,21 +674,21 @@ int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled
int err = 0;
unsigned long flags;
+retry:
spin_lock_irqsave(&rtc->irq_task_lock, flags);
if (rtc->irq_task != NULL && task == NULL)
err = -EBUSY;
if (rtc->irq_task != task)
err = -EACCES;
-
- if (enabled) {
- ktime_t period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
- hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
- } else {
- hrtimer_cancel(&rtc->pie_timer);
+ if (!err) {
+ if (rtc_update_hrtimer(rtc, enabled) < 0) {
+ spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
+ cpu_relax();
+ goto retry;
+ }
+ rtc->pie_enabled = enabled;
}
- rtc->pie_enabled = enabled;
spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
-
return err;
}
EXPORT_SYMBOL_GPL(rtc_irq_set_state);
@@ -685,22 +708,20 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
int err = 0;
unsigned long flags;
- if (freq <= 0)
+ if (freq <= 0 || freq > RTC_MAX_FREQ)
return -EINVAL;
-
+retry:
spin_lock_irqsave(&rtc->irq_task_lock, flags);
if (rtc->irq_task != NULL && task == NULL)
err = -EBUSY;
if (rtc->irq_task != task)
err = -EACCES;
- if (err == 0) {
+ if (!err) {
rtc->irq_freq = freq;
- if (rtc->pie_enabled) {
- ktime_t period;
- hrtimer_cancel(&rtc->pie_timer);
- period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
- hrtimer_start(&rtc->pie_timer, period,
- HRTIMER_MODE_REL);
+ if (rtc->pie_enabled && rtc_update_hrtimer(rtc, 1) < 0) {
+ spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
+ cpu_relax();
+ goto retry;
}
}
spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 335551d..14a42a1 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -36,6 +36,7 @@
*/
struct ep93xx_rtc {
void __iomem *mmio_base;
+ struct rtc_device *rtc;
};
static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
@@ -130,7 +131,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
{
struct ep93xx_rtc *ep93xx_rtc;
struct resource *res;
- struct rtc_device *rtc;
int err;
ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL);
@@ -151,12 +151,12 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
return -ENXIO;
pdev->dev.platform_data = ep93xx_rtc;
- platform_set_drvdata(pdev, rtc);
+ platform_set_drvdata(pdev, ep93xx_rtc);
- rtc = rtc_device_register(pdev->name,
+ ep93xx_rtc->rtc = rtc_device_register(pdev->name,
&pdev->dev, &ep93xx_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- err = PTR_ERR(rtc);
+ if (IS_ERR(ep93xx_rtc->rtc)) {
+ err = PTR_ERR(ep93xx_rtc->rtc);
goto exit;
}
@@ -167,7 +167,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
return 0;
fail:
- rtc_device_unregister(rtc);
+ rtc_device_unregister(ep93xx_rtc->rtc);
exit:
platform_set_drvdata(pdev, NULL);
pdev->dev.platform_data = NULL;
@@ -176,11 +176,11 @@ exit:
static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
{
- struct rtc_device *rtc = platform_get_drvdata(pdev);
+ struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev);
sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files);
platform_set_drvdata(pdev, NULL);
- rtc_device_unregister(rtc);
+ rtc_device_unregister(ep93xx_rtc->rtc);
pdev->dev.platform_data = NULL;
return 0;
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index 2dd3c01..d93a960 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -35,6 +35,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
+#include <linux/sched.h>
#include <linux/workqueue.h>
/* DryIce Register Definitions */
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index 075f170..c4cf057 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -85,6 +85,8 @@ void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
time -= tm->tm_hour * 3600;
tm->tm_min = time / 60;
tm->tm_sec = time - tm->tm_min * 60;
+
+ tm->tm_isdst = 0;
}
EXPORT_SYMBOL(rtc_time_to_tm);
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index bcae8dd..7789002 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -368,7 +368,7 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
pr_info("%s: already running\n", pdev->name);
/* force to 24 hour mode */
- new_ctrl = reg & ~(OMAP_RTC_CTRL_SPLIT|OMAP_RTC_CTRL_AUTO_COMP);
+ new_ctrl = reg & (OMAP_RTC_CTRL_SPLIT|OMAP_RTC_CTRL_AUTO_COMP);
new_ctrl |= OMAP_RTC_CTRL_STOP;
/* BOARD-SPECIFIC CUSTOMIZATION CAN GO HERE:
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 9329dbb..7639ab9 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -51,6 +51,27 @@ static enum s3c_cpu_type s3c_rtc_cpu_type;
static DEFINE_SPINLOCK(s3c_rtc_pie_lock);
+static void s3c_rtc_alarm_clk_enable(bool enable)
+{
+ static DEFINE_SPINLOCK(s3c_rtc_alarm_clk_lock);
+ static bool alarm_clk_enabled;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&s3c_rtc_alarm_clk_lock, irq_flags);
+ if (enable) {
+ if (!alarm_clk_enabled) {
+ clk_enable(rtc_clk);
+ alarm_clk_enabled = true;
+ }
+ } else {
+ if (alarm_clk_enabled) {
+ clk_disable(rtc_clk);
+ alarm_clk_enabled = false;
+ }
+ }
+ spin_unlock_irqrestore(&s3c_rtc_alarm_clk_lock, irq_flags);
+}
+
/* IRQ Handlers */
static irqreturn_t s3c_rtc_alarmirq(int irq, void *id)
@@ -64,6 +85,9 @@ static irqreturn_t s3c_rtc_alarmirq(int irq, void *id)
writeb(S3C2410_INTP_ALM, s3c_rtc_base + S3C2410_INTP);
clk_disable(rtc_clk);
+
+ s3c_rtc_alarm_clk_enable(false);
+
return IRQ_HANDLED;
}
@@ -97,6 +121,8 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
clk_disable(rtc_clk);
+ s3c_rtc_alarm_clk_enable(enabled);
+
return 0;
}
@@ -152,10 +178,6 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
goto retry_get_time;
}
- pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n",
- 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
- rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
-
rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
@@ -164,6 +186,11 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
rtc_tm->tm_year += 100;
+
+ pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n",
+ 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
+ rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
+
rtc_tm->tm_mon -= 1;
clk_disable(rtc_clk);
@@ -269,10 +296,9 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
clk_enable(rtc_clk);
pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
alrm->enabled,
- 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
+ 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
-
alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;
writeb(0x00, base + S3C2410_RTCALM);
@@ -319,49 +345,7 @@ static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
return 0;
}
-static int s3c_rtc_open(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
- int ret;
-
- ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq,
- IRQF_DISABLED, "s3c2410-rtc alarm", rtc_dev);
-
- if (ret) {
- dev_err(dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret);
- return ret;
- }
-
- ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq,
- IRQF_DISABLED, "s3c2410-rtc tick", rtc_dev);
-
- if (ret) {
- dev_err(dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret);
- goto tick_err;
- }
-
- return ret;
-
- tick_err:
- free_irq(s3c_rtc_alarmno, rtc_dev);
- return ret;
-}
-
-static void s3c_rtc_release(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
-
- /* do not clear AIE here, it may be needed for wake */
-
- free_irq(s3c_rtc_alarmno, rtc_dev);
- free_irq(s3c_rtc_tickno, rtc_dev);
-}
-
static const struct rtc_class_ops s3c_rtcops = {
- .open = s3c_rtc_open,
- .release = s3c_rtc_release,
.read_time = s3c_rtc_gettime,
.set_time = s3c_rtc_settime,
.read_alarm = s3c_rtc_getalarm,
@@ -425,6 +409,9 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
{
struct rtc_device *rtc = platform_get_drvdata(dev);
+ free_irq(s3c_rtc_alarmno, rtc);
+ free_irq(s3c_rtc_tickno, rtc);
+
platform_set_drvdata(dev, NULL);
rtc_device_unregister(rtc);
@@ -548,10 +535,32 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
s3c_rtc_setfreq(&pdev->dev, 1);
+ ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq,
+ IRQF_DISABLED, "s3c2410-rtc alarm", rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret);
+ goto err_alarm_irq;
+ }
+
+ ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq,
+ IRQF_DISABLED, "s3c2410-rtc tick", rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret);
+ free_irq(s3c_rtc_alarmno, rtc);
+ goto err_tick_irq;
+ }
+
clk_disable(rtc_clk);
return 0;
+ err_tick_irq:
+ free_irq(s3c_rtc_alarmno, rtc);
+
+ err_alarm_irq:
+ platform_set_drvdata(pdev, NULL);
+ rtc_device_unregister(rtc);
+
err_nortc:
s3c_rtc_enable(pdev, 0);
clk_disable(rtc_clk);
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 9a81f77..20687d5 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -362,14 +362,6 @@ static irqreturn_t twl_rtc_interrupt(int irq, void *rtc)
int res;
u8 rd_reg;
-#ifdef CONFIG_LOCKDEP
- /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
- * we don't want and can't tolerate. Although it might be
- * friendlier not to borrow this thread context...
- */
- local_irq_enable();
-#endif
-
res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
if (res)
goto out;
@@ -428,24 +420,12 @@ static struct rtc_class_ops twl_rtc_ops = {
static int __devinit twl_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
- int ret = 0;
+ int ret = -EINVAL;
int irq = platform_get_irq(pdev, 0);
u8 rd_reg;
if (irq <= 0)
- return -EINVAL;
-
- rtc = rtc_device_register(pdev->name,
- &pdev->dev, &twl_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
- dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
- PTR_ERR(rtc));
- goto out0;
-
- }
-
- platform_set_drvdata(pdev, rtc);
+ goto out1;
ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
if (ret < 0)
@@ -462,14 +442,6 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
if (ret < 0)
goto out1;
- ret = request_irq(irq, twl_rtc_interrupt,
- IRQF_TRIGGER_RISING,
- dev_name(&rtc->dev), rtc);
- if (ret < 0) {
- dev_err(&pdev->dev, "IRQ is not free.\n");
- goto out1;
- }
-
if (twl_class_is_6030()) {
twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
REG_INT_MSK_LINE_A);
@@ -480,28 +452,44 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
/* Check RTC module status, Enable if it is off */
ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
if (ret < 0)
- goto out2;
+ goto out1;
if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) {
dev_info(&pdev->dev, "Enabling TWL-RTC.\n");
rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M;
ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
if (ret < 0)
- goto out2;
+ goto out1;
}
/* init cached IRQ enable bits */
ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
if (ret < 0)
+ goto out1;
+
+ rtc = rtc_device_register(pdev->name,
+ &pdev->dev, &twl_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc)) {
+ ret = PTR_ERR(rtc);
+ dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
+ PTR_ERR(rtc));
+ goto out1;
+ }
+
+ ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
+ IRQF_TRIGGER_RISING,
+ dev_name(&rtc->dev), rtc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "IRQ is not free.\n");
goto out2;
+ }
- return ret;
+ platform_set_drvdata(pdev, rtc);
+ return 0;
out2:
- free_irq(irq, rtc);
-out1:
rtc_device_unregister(rtc);
-out0:
+out1:
return ret;
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 432444a..a1d3ddb 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -24,6 +24,7 @@
#include <linux/mutex.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
#include <asm/ccwdev.h>
#include <asm/ebcdic.h>
@@ -888,11 +889,11 @@ char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
{
char *buffer;
- buffer = kmalloc(user_len + 1, GFP_KERNEL);
+ buffer = vmalloc(user_len + 1);
if (buffer == NULL)
return ERR_PTR(-ENOMEM);
if (copy_from_user(buffer, user_buf, user_len) != 0) {
- kfree(buffer);
+ vfree(buffer);
return ERR_PTR(-EFAULT);
}
/* got the string, now strip linefeed. */
@@ -930,7 +931,7 @@ static ssize_t dasd_stats_write(struct file *file,
dasd_profile_off(prof);
} else
rc = -EINVAL;
- kfree(buffer);
+ vfree(buffer);
return rc;
}
@@ -1042,7 +1043,7 @@ static ssize_t dasd_stats_global_write(struct file *file,
dasd_global_profile_level = DASD_PROFILE_OFF;
} else
rc = -EINVAL;
- kfree(buffer);
+ vfree(buffer);
return rc;
}
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 30fb979..6e835c9 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1461,6 +1461,15 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
"Read device characteristic failed, rc=%d", rc);
goto out_err3;
}
+
+ if ((device->features & DASD_FEATURE_USERAW) &&
+ !(private->rdc_data.facilities.RT_in_LR)) {
+ dev_err(&device->cdev->dev, "The storage server does not "
+ "support raw-track access\n");
+ rc = -EINVAL;
+ goto out_err3;
+ }
+
/* find the valid cylinder size */
if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
private->rdc_data.long_no_cyl)
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 77f778b..16c5208 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -21,7 +21,7 @@
#include <linux/slab.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/ebcdic.h>
#include "dasd_int.h"
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index eb4e034..f1a2016 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -249,6 +249,7 @@ static int dasd_ioctl_reset_profile(struct dasd_block *block)
static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
{
struct dasd_profile_info_t *data;
+ int rc = 0;
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
@@ -279,11 +280,14 @@ static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
spin_unlock_bh(&block->profile.lock);
} else {
spin_unlock_bh(&block->profile.lock);
- return -EIO;
+ rc = -EIO;
+ goto out;
}
if (copy_to_user(argp, data, sizeof(*data)))
- return -EFAULT;
- return 0;
+ rc = -EFAULT;
+out:
+ kfree(data);
+ return rc;
}
#else
static int dasd_ioctl_reset_profile(struct dasd_block *block)
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 6c3c536..e12989f 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -312,14 +312,14 @@ static ssize_t dasd_stats_proc_write(struct file *file,
pr_info("The statistics have been reset\n");
} else
goto out_parse_error;
- kfree(buffer);
+ vfree(buffer);
return user_len;
out_parse_error:
rc = -EINVAL;
pr_warning("%s is not a supported value for /proc/dasd/statistics\n",
str);
out_error:
- kfree(buffer);
+ vfree(buffer);
return rc;
#else
pr_warning("/proc/dasd/statistics: is not activated in this kernel\n");
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
index 7ad30e7..5f9f929 100644
--- a/drivers/s390/char/sclp_async.c
+++ b/drivers/s390/char/sclp_async.c
@@ -82,12 +82,9 @@ static int proc_handler_callhome(struct ctl_table *ctl, int write,
return -EFAULT;
} else {
len = *count;
- rc = copy_from_user(buf, buffer, sizeof(buf));
- if (rc != 0)
- return -EFAULT;
- buf[sizeof(buf) - 1] = '\0';
- if (strict_strtoul(buf, 0, &val) != 0)
- return -EINVAL;
+ rc = kstrtoul_from_user(buffer, len, 0, &val);
+ if (rc)
+ return rc;
if (val != 0 && val != 1)
return -EINVAL;
callhome_enabled = val;
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index be55fb2..837e010 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -383,8 +383,10 @@ static int sclp_attach_storage(u8 id)
switch (sccb->header.response_code) {
case 0x0020:
set_bit(id, sclp_storage_ids);
- for (i = 0; i < sccb->assigned; i++)
- sclp_unassign_storage(sccb->entries[i] >> 16);
+ for (i = 0; i < sccb->assigned; i++) {
+ if (sccb->entries[i])
+ sclp_unassign_storage(sccb->entries[i] >> 16);
+ }
break;
default:
rc = -EIO;
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 05909a7..a90a02c 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -13,7 +13,7 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/reboot.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/ptrace.h>
#include <asm/sigp.h>
#include <asm/smp.h>
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index c837d74..524d988 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -21,7 +21,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 7e297c7..0b7245c 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -2,7 +2,7 @@
#define S390_DEVICE_H
#include <asm/ccwdev.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/wait.h>
#include <linux/notifier.h>
#include "io_sch.h"
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 7bc643f..e5c9664 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -14,6 +14,8 @@
#include "chsc.h"
#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */
+#define QDIO_BUSY_BIT_RETRY_DELAY 10 /* 10 milliseconds */
+#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */
#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */
/*
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index f8b03a6..0e615cb 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -188,19 +188,13 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
struct qdio_irq *irq_ptr = seq->private;
struct qdio_q *q;
unsigned long val;
- char buf[8];
int ret, i;
if (!irq_ptr)
return 0;
- if (count >= sizeof(buf))
- return -EINVAL;
- if (copy_from_user(&buf, ubuf, count))
- return -EFAULT;
- buf[count] = 0;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret < 0)
+
+ ret = kstrtoul_from_user(ubuf, count, 10, &val);
+ if (ret)
return ret;
switch (val) {
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 570d4da..288c914 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -15,7 +15,7 @@
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/kernel_stat.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/debug.h>
#include <asm/qdio.h>
@@ -313,7 +313,7 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
unsigned int fc = QDIO_SIGA_WRITE;
u64 start_time = 0;
- int cc;
+ int retries = 0, cc;
if (is_qebsm(q)) {
schid = q->irq_ptr->sch_token;
@@ -325,6 +325,7 @@ again:
/* hipersocket busy condition */
if (unlikely(*busy_bit)) {
WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
+ retries++;
if (!start_time) {
start_time = get_clock();
@@ -333,6 +334,11 @@ again:
if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
goto again;
}
+ if (retries) {
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
+ "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
+ }
return cc;
}
@@ -728,13 +734,14 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q)
static int qdio_kick_outbound_q(struct qdio_q *q)
{
+ int retries = 0, cc;
unsigned int busy_bit;
- int cc;
if (!need_siga_out(q))
return 0;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
+retry:
qperf_inc(q, siga_write);
cc = qdio_siga_output(q, &busy_bit);
@@ -743,7 +750,11 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
break;
case 2:
if (busy_bit) {
- DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
+ while (++retries < QDIO_BUSY_BIT_RETRIES) {
+ mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
+ goto retry;
+ }
+ DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
cc |= QDIO_ERROR_SIGA_BUSY;
} else
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
@@ -753,6 +764,10 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
break;
}
+ if (retries) {
+ DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
+ DBF_ERROR("count:%u", retries);
+ }
return cc;
}
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 68be6e1..2a1d4df 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -9,7 +9,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/kernel_stat.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/debug.h>
#include <asm/qdio.h>
#include <asm/airq.h>
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index f8134a4..b77ae51 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -41,7 +41,7 @@
#include <linux/mutex.h>
#include <asm/reset.h>
#include <asm/airq.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/system.h>
#include <asm/isc.h>
#include <linux/hrtimer.h>
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 8e65447..88ad33e 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -36,7 +36,7 @@
#include <linux/seq_file.h>
#include <linux/compat.h>
#include <linux/slab.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <linux/hw_random.h>
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 2176d00..da171b5 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -30,7 +30,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include "ap_bus.h"
diff --git a/drivers/s390/crypto/zcrypt_mono.c b/drivers/s390/crypto/zcrypt_mono.c
index 44253fd..eb313c3 100644
--- a/drivers/s390/crypto/zcrypt_mono.c
+++ b/drivers/s390/crypto/zcrypt_mono.c
@@ -32,7 +32,7 @@
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/compat.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include "ap_bus.h"
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 1afb69c..d84816f 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -30,7 +30,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include "ap_bus.h"
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index aa4c050..bdbdbe1 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -30,7 +30,7 @@
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/err.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include "ap_bus.h"
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 4f85eb7..dd47378 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -31,7 +31,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include "ap_bus.h"
diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h
index 1e8b235..a4510cf 100644
--- a/drivers/s390/net/fsm.h
+++ b/drivers/s390/net/fsm.h
@@ -8,7 +8,7 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/string.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
/**
* Define this to get debugging messages.
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 2a4991d..7cac873 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -13,7 +13,7 @@
#include <linux/slab.h>
#include <scsi/fc/fc_fcp.h>
#include <scsi/scsi_eh.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "zfcp_ext.h"
#include "zfcp_dbf.h"
#include "zfcp_fc.h"
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 740da446..965a1fc 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -16,7 +16,7 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h> /* put_/get_user */
#include <asm/io.h>
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 081c171..5ce5170 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -397,7 +397,7 @@ struct amap_pdu_data_out {
};
struct be_cmd_bhs {
- struct iscsi_cmd iscsi_hdr;
+ struct iscsi_scsi_req iscsi_hdr;
unsigned char pad1[16];
struct pdu_data_out iscsi_data_pdu;
unsigned char pad2[BE_SENSE_INFO_SIZE -
@@ -428,7 +428,7 @@ struct be_nonio_bhs {
};
struct be_status_bhs {
- struct iscsi_cmd iscsi_hdr;
+ struct iscsi_scsi_req iscsi_hdr;
unsigned char pad1[16];
/**
* The plus 2 below is to hold the sense info length that gets
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index 3b0af11..a796de9 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -27,6 +27,7 @@
struct bfa_s;
typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
+typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
/*
* Interrupt message handlers
@@ -121,6 +122,7 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
(__hcb_qe)->cbfn = (__cbfn); \
(__hcb_qe)->cbarg = (__cbarg); \
+ (__hcb_qe)->pre_rmv = BFA_FALSE; \
list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
} while (0)
@@ -135,6 +137,11 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
} \
} while (0)
+#define bfa_cb_queue_status(__bfa, __hcb_qe, __status) do { \
+ (__hcb_qe)->fw_status = (__status); \
+ list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
+} while (0)
+
#define bfa_cb_queue_done(__hcb_qe) do { \
(__hcb_qe)->once = BFA_FALSE; \
} while (0)
@@ -177,7 +184,7 @@ struct bfa_msix_s {
struct bfa_hwif_s {
void (*hw_reginit)(struct bfa_s *bfa);
void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
- void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
+ void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq, u32 ci);
void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
void (*hw_msix_ctrl_install)(struct bfa_s *bfa);
void (*hw_msix_queue_install)(struct bfa_s *bfa);
@@ -268,10 +275,8 @@ struct bfa_iocfc_s {
((__bfa)->iocfc.hwif.hw_msix_queue_install(__bfa))
#define bfa_msix_uninstall(__bfa) \
((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
-#define bfa_isr_rspq_ack(__bfa, __queue) do { \
- if ((__bfa)->iocfc.hwif.hw_rspq_ack) \
- (__bfa)->iocfc.hwif.hw_rspq_ack(__bfa, __queue); \
-} while (0)
+#define bfa_isr_rspq_ack(__bfa, __queue, __ci) \
+ ((__bfa)->iocfc.hwif.hw_rspq_ack(__bfa, __queue, __ci))
#define bfa_isr_reqq_ack(__bfa, __queue) do { \
if ((__bfa)->iocfc.hwif.hw_reqq_ack) \
(__bfa)->iocfc.hwif.hw_reqq_ack(__bfa, __queue); \
@@ -311,7 +316,7 @@ void bfa_msix_rspq(struct bfa_s *bfa, int vec);
void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
void bfa_hwcb_reginit(struct bfa_s *bfa);
-void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
+void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa);
void bfa_hwcb_msix_queue_install(struct bfa_s *bfa);
@@ -324,7 +329,8 @@ void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
void bfa_hwct_reginit(struct bfa_s *bfa);
void bfa_hwct2_reginit(struct bfa_s *bfa);
void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
-void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
+void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
+void bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa);
void bfa_hwct_msix_queue_install(struct bfa_s *bfa);
@@ -376,6 +382,22 @@ int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
#define bfa_get_fw_clock_res(__bfa) \
((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res)
+/*
+ * lun mask macros return NULL when min cfg is enabled and there is
+ * no memory allocated for lunmask.
+ */
+#define bfa_get_lun_mask(__bfa) \
+ ((&(__bfa)->modules.dconf_mod)->min_cfg) ? NULL : \
+ (&(BFA_DCONF_MOD(__bfa)->dconf->lun_mask))
+
+#define bfa_get_lun_mask_list(_bfa) \
+ ((&(_bfa)->modules.dconf_mod)->min_cfg) ? NULL : \
+ (bfa_get_lun_mask(_bfa)->lun_list)
+
+#define bfa_get_lun_mask_status(_bfa) \
+ (((&(_bfa)->modules.dconf_mod)->min_cfg) \
+ ? BFA_LUNMASK_MINCFG : ((bfa_get_lun_mask(_bfa))->status))
+
void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
@@ -406,7 +428,22 @@ bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
void bfa_iocfc_enable(struct bfa_s *bfa);
void bfa_iocfc_disable(struct bfa_s *bfa);
+void bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status);
#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \
bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
+struct bfa_cb_pending_q_s {
+ struct bfa_cb_qe_s hcb_qe;
+ void *data; /* Driver buffer */
+};
+
+/* Common macros to operate on pending stats/attr apis */
+#define bfa_pending_q_init(__qe, __cbfn, __cbarg, __data) do { \
+ bfa_q_qe_init(&((__qe)->hcb_qe.qe)); \
+ (__qe)->hcb_qe.cbfn = (__cbfn); \
+ (__qe)->hcb_qe.cbarg = (__cbarg); \
+ (__qe)->hcb_qe.pre_rmv = BFA_TRUE; \
+ (__qe)->data = (__data); \
+} while (0)
+
#endif /* __BFA_H__ */
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index c38e589..4bd546b 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -33,6 +33,7 @@ static struct bfa_module_s *hal_mods[] = {
&hal_mod_uf,
&hal_mod_rport,
&hal_mod_fcp,
+ &hal_mod_dconf,
NULL
};
@@ -237,8 +238,6 @@ bfa_isr_rspq(struct bfa_s *bfa, int qid)
u32 pi, ci;
struct list_head *waitq;
- bfa_isr_rspq_ack(bfa, qid);
-
ci = bfa_rspq_ci(bfa, qid);
pi = bfa_rspq_pi(bfa, qid);
@@ -251,11 +250,9 @@ bfa_isr_rspq(struct bfa_s *bfa, int qid)
}
/*
- * update CI
+ * acknowledge RME completions and update CI
*/
- bfa_rspq_ci(bfa, qid) = pi;
- writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
- mmiowb();
+ bfa_isr_rspq_ack(bfa, qid, ci);
/*
* Resume any pending requests in the corresponding reqq.
@@ -325,23 +322,19 @@ bfa_intx(struct bfa_s *bfa)
int queue;
intr = readl(bfa->iocfc.bfa_regs.intr_status);
- if (!intr)
- return BFA_FALSE;
qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
if (qintr)
writel(qintr, bfa->iocfc.bfa_regs.intr_status);
/*
- * RME completion queue interrupt
+ * Unconditional RME completion queue interrupt
*/
- qintr = intr & __HFN_INT_RME_MASK;
- if (qintr && bfa->queue_process) {
+ if (bfa->queue_process) {
for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
bfa_isr_rspq(bfa, queue);
}
- intr &= ~qintr;
if (!intr)
return BFA_TRUE;
@@ -432,7 +425,8 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
__HFN_INT_MBOX_LPU1_CT2);
intr &= __HFN_INT_ERR_MASK_CT2;
} else {
- halt_isr = intr & __HFN_INT_LL_HALT;
+ halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ?
+ (intr & __HFN_INT_LL_HALT) : 0;
pss_isr = intr & __HFN_INT_ERR_PSS;
lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
intr &= __HFN_INT_ERR_MASK;
@@ -578,7 +572,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
} else {
iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
iocfc->hwif.hw_reqq_ack = NULL;
- iocfc->hwif.hw_rspq_ack = NULL;
+ iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
@@ -595,7 +589,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
iocfc->hwif.hw_isr_mode_set = NULL;
- iocfc->hwif.hw_rspq_ack = NULL;
+ iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack;
}
iocfc->hwif.hw_reginit(bfa);
@@ -685,7 +679,7 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
bfa->queue_process = BFA_TRUE;
for (i = 0; i < BFI_IOC_MAX_CQS; i++)
- bfa_isr_rspq_ack(bfa, i);
+ bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i));
for (i = 0; hal_mods[i]; i++)
hal_mods[i]->start(bfa);
@@ -709,7 +703,7 @@ bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
struct bfa_s *bfa = bfa_arg;
if (complete) {
- if (bfa->iocfc.cfgdone)
+ if (bfa->iocfc.cfgdone && BFA_DCONF_MOD(bfa)->flashdone)
bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
else
bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
@@ -822,9 +816,11 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
*/
bfa_fcport_init(bfa);
- if (iocfc->action == BFA_IOCFC_ACT_INIT)
- bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
- else {
+ if (iocfc->action == BFA_IOCFC_ACT_INIT) {
+ if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
+ bfa_cb_queue(bfa, &iocfc->init_hcb_qe,
+ bfa_iocfc_init_cb, bfa);
+ } else {
if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
bfa_iocfc_enable_cb, bfa);
@@ -1045,6 +1041,7 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
}
bfa_iocfc_send_cfg(bfa);
+ bfa_dconf_modinit(bfa);
}
/*
@@ -1207,7 +1204,9 @@ bfa_iocfc_stop(struct bfa_s *bfa)
bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
bfa->queue_process = BFA_FALSE;
- bfa_ioc_disable(&bfa->ioc);
+ bfa_dconf_modexit(bfa);
+ if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
+ bfa_ioc_disable(&bfa->ioc);
}
void
@@ -1540,10 +1539,17 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
struct list_head *qe;
struct list_head *qen;
struct bfa_cb_qe_s *hcb_qe;
+ bfa_cb_cbfn_status_t cbfn;
list_for_each_safe(qe, qen, comp_q) {
hcb_qe = (struct bfa_cb_qe_s *) qe;
- hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
+ if (hcb_qe->pre_rmv) {
+ /* qe is invalid after return, dequeue before cbfn() */
+ list_del(qe);
+ cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
+ cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
+ } else
+ hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
}
}
@@ -1556,10 +1562,20 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
while (!list_empty(comp_q)) {
bfa_q_deq(comp_q, &qe);
hcb_qe = (struct bfa_cb_qe_s *) qe;
+ WARN_ON(hcb_qe->pre_rmv);
hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
}
}
+void
+bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status)
+{
+ if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) {
+ if (bfa->iocfc.cfgdone == BFA_TRUE)
+ bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
+ bfa_iocfc_init_cb, bfa);
+ }
+}
/*
* Return the list of PCI vendor/device id lists supported by this
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index ed8d31b..7b3d235 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -144,6 +144,7 @@ enum bfa_status {
BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */
BFA_STATUS_CMD_NOTSUPP = 26, /* Command/API not supported */
BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */
+ BFA_STATUS_UNKNOWN_VWWN = 30, /* VPORT PWWN not found */
BFA_STATUS_PORT_OFFLINE = 34, /* Port is not online */
BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */
BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port */
@@ -164,6 +165,8 @@ enum bfa_status {
BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */
BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot
* configuration */
+ BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */
+ BFA_STATUS_INVALID_VENDOR = 158, /* Invalid switch vendor */
BFA_STATUS_SFP_NOT_READY = 159, /* SFP info is not ready. Retry */
BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on
* this adapter */
@@ -172,11 +175,15 @@ enum bfa_status {
BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */
BFA_STATUS_PHY_NOT_PRESENT = 183, /* PHY module not present */
BFA_STATUS_FEATURE_NOT_SUPPORTED = 192, /* Feature not supported */
+ BFA_STATUS_ENTRY_EXISTS = 193, /* Entry already exists */
+ BFA_STATUS_ENTRY_NOT_EXISTS = 194, /* Entry does not exist */
+ BFA_STATUS_NO_CHANGE = 195, /* Feature already in that state */
BFA_STATUS_FAA_ENABLED = 197, /* FAA is already enabled */
BFA_STATUS_FAA_DISABLED = 198, /* FAA is already disabled */
BFA_STATUS_FAA_ACQUIRED = 199, /* FAA is already acquired */
BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */
BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */
+ BFA_STATUS_MAX_ENTRY_REACHED = 212, /* MAX entry reached */
BFA_STATUS_MAX_VAL /* Unknown error code */
};
#define bfa_status_t enum bfa_status
@@ -359,6 +366,139 @@ struct bfa_ioc_attr_s {
};
/*
+ * AEN related definitions
+ */
+enum bfa_aen_category {
+ BFA_AEN_CAT_ADAPTER = 1,
+ BFA_AEN_CAT_PORT = 2,
+ BFA_AEN_CAT_LPORT = 3,
+ BFA_AEN_CAT_RPORT = 4,
+ BFA_AEN_CAT_ITNIM = 5,
+ BFA_AEN_CAT_AUDIT = 8,
+ BFA_AEN_CAT_IOC = 9,
+};
+
+/* BFA adapter level events */
+enum bfa_adapter_aen_event {
+ BFA_ADAPTER_AEN_ADD = 1, /* New Adapter found event */
+ BFA_ADAPTER_AEN_REMOVE = 2, /* Adapter removed event */
+};
+
+struct bfa_adapter_aen_data_s {
+ char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+ u32 nports; /* Number of NPorts */
+ wwn_t pwwn; /* WWN of one of its physical port */
+};
+
+/* BFA physical port Level events */
+enum bfa_port_aen_event {
+ BFA_PORT_AEN_ONLINE = 1, /* Physical Port online event */
+ BFA_PORT_AEN_OFFLINE = 2, /* Physical Port offline event */
+ BFA_PORT_AEN_RLIR = 3, /* RLIR event, not supported */
+ BFA_PORT_AEN_SFP_INSERT = 4, /* SFP inserted event */
+ BFA_PORT_AEN_SFP_REMOVE = 5, /* SFP removed event */
+ BFA_PORT_AEN_SFP_POM = 6, /* SFP POM event */
+ BFA_PORT_AEN_ENABLE = 7, /* Physical Port enable event */
+ BFA_PORT_AEN_DISABLE = 8, /* Physical Port disable event */
+ BFA_PORT_AEN_AUTH_ON = 9, /* Physical Port auth success event */
+ BFA_PORT_AEN_AUTH_OFF = 10, /* Physical Port auth fail event */
+ BFA_PORT_AEN_DISCONNECT = 11, /* Physical Port disconnect event */
+ BFA_PORT_AEN_QOS_NEG = 12, /* Base Port QOS negotiation event */
+ BFA_PORT_AEN_FABRIC_NAME_CHANGE = 13, /* Fabric Name/WWN change */
+ BFA_PORT_AEN_SFP_ACCESS_ERROR = 14, /* SFP read error event */
+ BFA_PORT_AEN_SFP_UNSUPPORT = 15, /* Unsupported SFP event */
+};
+
+enum bfa_port_aen_sfp_pom {
+ BFA_PORT_AEN_SFP_POM_GREEN = 1, /* Normal */
+ BFA_PORT_AEN_SFP_POM_AMBER = 2, /* Warning */
+ BFA_PORT_AEN_SFP_POM_RED = 3, /* Critical */
+ BFA_PORT_AEN_SFP_POM_MAX = BFA_PORT_AEN_SFP_POM_RED
+};
+
+struct bfa_port_aen_data_s {
+ wwn_t pwwn; /* WWN of the physical port */
+ wwn_t fwwn; /* WWN of the fabric port */
+ u32 phy_port_num; /* For SFP related events */
+ u16 ioc_type;
+ u16 level; /* Only transitions will be informed */
+ mac_t mac; /* MAC address of the ethernet port */
+ u16 rsvd;
+};
+
+/* BFA AEN logical port events */
+enum bfa_lport_aen_event {
+ BFA_LPORT_AEN_NEW = 1, /* LPort created event */
+ BFA_LPORT_AEN_DELETE = 2, /* LPort deleted event */
+ BFA_LPORT_AEN_ONLINE = 3, /* LPort online event */
+ BFA_LPORT_AEN_OFFLINE = 4, /* LPort offline event */
+ BFA_LPORT_AEN_DISCONNECT = 5, /* LPort disconnect event */
+ BFA_LPORT_AEN_NEW_PROP = 6, /* VPort created event */
+ BFA_LPORT_AEN_DELETE_PROP = 7, /* VPort deleted event */
+ BFA_LPORT_AEN_NEW_STANDARD = 8, /* VPort created event */
+ BFA_LPORT_AEN_DELETE_STANDARD = 9, /* VPort deleted event */
+ BFA_LPORT_AEN_NPIV_DUP_WWN = 10, /* VPort with duplicate WWN */
+ BFA_LPORT_AEN_NPIV_FABRIC_MAX = 11, /* Max NPIV in fabric/fport */
+ BFA_LPORT_AEN_NPIV_UNKNOWN = 12, /* Unknown NPIV Error code */
+};
+
+struct bfa_lport_aen_data_s {
+ u16 vf_id; /* vf_id of this logical port */
+ u16 roles; /* Logical port mode,IM/TM/IP etc */
+ u32 rsvd;
+ wwn_t ppwwn; /* WWN of its physical port */
+ wwn_t lpwwn; /* WWN of this logical port */
+};
+
+/* BFA ITNIM events */
+enum bfa_itnim_aen_event {
+ BFA_ITNIM_AEN_ONLINE = 1, /* Target online */
+ BFA_ITNIM_AEN_OFFLINE = 2, /* Target offline */
+ BFA_ITNIM_AEN_DISCONNECT = 3, /* Target disconnected */
+};
+
+struct bfa_itnim_aen_data_s {
+ u16 vf_id; /* vf_id of the IT nexus */
+ u16 rsvd[3];
+ wwn_t ppwwn; /* WWN of its physical port */
+ wwn_t lpwwn; /* WWN of logical port */
+ wwn_t rpwwn; /* WWN of remote(target) port */
+};
+
+/* BFA audit events */
+enum bfa_audit_aen_event {
+ BFA_AUDIT_AEN_AUTH_ENABLE = 1,
+ BFA_AUDIT_AEN_AUTH_DISABLE = 2,
+ BFA_AUDIT_AEN_FLASH_ERASE = 3,
+ BFA_AUDIT_AEN_FLASH_UPDATE = 4,
+};
+
+struct bfa_audit_aen_data_s {
+ wwn_t pwwn;
+ int partition_inst;
+ int partition_type;
+};
+
+/* BFA IOC level events */
+enum bfa_ioc_aen_event {
+ BFA_IOC_AEN_HBGOOD = 1, /* Heart Beat restore event */
+ BFA_IOC_AEN_HBFAIL = 2, /* Heart Beat failure event */
+ BFA_IOC_AEN_ENABLE = 3, /* IOC enabled event */
+ BFA_IOC_AEN_DISABLE = 4, /* IOC disabled event */
+ BFA_IOC_AEN_FWMISMATCH = 5, /* IOC firmware mismatch */
+ BFA_IOC_AEN_FWCFG_ERROR = 6, /* IOC firmware config error */
+ BFA_IOC_AEN_INVALID_VENDOR = 7,
+ BFA_IOC_AEN_INVALID_NWWN = 8, /* Zero NWWN */
+ BFA_IOC_AEN_INVALID_PWWN = 9 /* Zero PWWN */
+};
+
+struct bfa_ioc_aen_data_s {
+ wwn_t pwwn;
+ u16 ioc_type;
+ mac_t mac;
+};
+
+/*
* ---------------------- mfg definitions ------------
*/
@@ -520,6 +660,20 @@ struct bfa_boot_bootlun_s {
/*
* BOOT boot configuraton
*/
+struct bfa_boot_cfg_s {
+ u8 version;
+ u8 rsvd1;
+ u16 chksum;
+ u8 enable; /* enable/disable SAN boot */
+ u8 speed; /* boot speed settings */
+ u8 topology; /* boot topology setting */
+ u8 bootopt; /* bfa_boot_bootopt_t */
+ u32 nbluns; /* number of boot luns */
+ u32 rsvd2;
+ struct bfa_boot_bootlun_s blun[BFA_BOOT_BOOTLUN_MAX];
+ struct bfa_boot_bootlun_s blun_disc[BFA_BOOT_BOOTLUN_MAX];
+};
+
struct bfa_boot_pbc_s {
u8 enable; /* enable/disable SAN boot */
u8 speed; /* boot speed settings */
@@ -529,6 +683,15 @@ struct bfa_boot_pbc_s {
struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX];
};
+struct bfa_ethboot_cfg_s {
+ u8 version;
+ u8 rsvd1;
+ u16 chksum;
+ u8 enable; /* enable/disable Eth/PXE boot */
+ u8 rsvd2;
+ u16 vlan;
+};
+
/*
* ASIC block configuration related structures
*/
@@ -587,6 +750,14 @@ struct bfa_ablk_cfg_s {
*/
#define SFP_DIAGMON_SIZE 10 /* num bytes of diag monitor data */
+/* SFP state change notification event */
+#define BFA_SFP_SCN_REMOVED 0
+#define BFA_SFP_SCN_INSERTED 1
+#define BFA_SFP_SCN_POM 2
+#define BFA_SFP_SCN_FAILED 3
+#define BFA_SFP_SCN_UNSUPPORT 4
+#define BFA_SFP_SCN_VALID 5
+
enum bfa_defs_sfp_media_e {
BFA_SFP_MEDIA_UNKNOWN = 0x00,
BFA_SFP_MEDIA_CU = 0x01,
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 0b97525..863c6ba 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -268,6 +268,7 @@ struct bfa_fw_port_snsm_stats_s {
u32 error_resets; /* error resets initiated by upsm */
u32 sync_lost; /* Sync loss count */
u32 sig_lost; /* Signal loss count */
+ u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */
};
struct bfa_fw_port_physm_stats_s {
@@ -468,6 +469,7 @@ struct bfa_fw_stats_s {
* QoS states
*/
enum bfa_qos_state {
+ BFA_QOS_DISABLED = 0, /* QoS is disabled */
BFA_QOS_ONLINE = 1, /* QoS is online */
BFA_QOS_OFFLINE = 2, /* QoS is offline */
};
@@ -670,6 +672,12 @@ struct bfa_itnim_iostats_s {
u32 tm_iocdowns; /* TM cleaned-up due to IOC down */
u32 tm_cleanups; /* TM cleanup requests */
u32 tm_cleanup_comps; /* TM cleanup completions */
+ u32 lm_lun_across_sg; /* LM lun is across sg data buf */
+ u32 lm_lun_not_sup; /* LM lun not supported */
+ u32 lm_rpl_data_changed; /* LM report-lun data changed */
+ u32 lm_wire_residue_changed; /* LM report-lun rsp residue changed */
+ u32 lm_small_buf_addresidue; /* LM buf smaller than reported cnt */
+ u32 lm_lun_not_rdy; /* LM lun not ready */
};
/* Modify char* port_stt[] in bfal_port.c if a new state was added */
@@ -785,8 +793,51 @@ enum bfa_port_linkstate_rsn {
CEE_ISCSI_PRI_PFC_OFF = 42,
CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43
};
+
+#define MAX_LUN_MASK_CFG 16
+
+/*
+ * Initially flash content may be fff. On making LUN mask enable and disable
+ * state chnage. when report lun command is being processed it goes from
+ * BFA_LUN_MASK_ACTIVE to BFA_LUN_MASK_FETCH and comes back to
+ * BFA_LUN_MASK_ACTIVE.
+ */
+enum bfa_ioim_lun_mask_state_s {
+ BFA_IOIM_LUN_MASK_INACTIVE = 0,
+ BFA_IOIM_LUN_MASK_ACTIVE = 1,
+ BFA_IOIM_LUN_MASK_FETCHED = 2,
+};
+
+enum bfa_lunmask_state_s {
+ BFA_LUNMASK_DISABLED = 0x00,
+ BFA_LUNMASK_ENABLED = 0x01,
+ BFA_LUNMASK_MINCFG = 0x02,
+ BFA_LUNMASK_UNINITIALIZED = 0xff,
+};
+
#pragma pack(1)
/*
+ * LUN mask configuration
+ */
+struct bfa_lun_mask_s {
+ wwn_t lp_wwn;
+ wwn_t rp_wwn;
+ struct scsi_lun lun;
+ u8 ua;
+ u8 rsvd[3];
+ u16 rp_tag;
+ u8 lp_tag;
+ u8 state;
+};
+
+#define MAX_LUN_MASK_CFG 16
+struct bfa_lunmask_cfg_s {
+ u32 status;
+ u32 rsvd;
+ struct bfa_lun_mask_s lun_list[MAX_LUN_MASK_CFG];
+};
+
+/*
* Physical port configuration
*/
struct bfa_port_cfg_s {
@@ -1228,4 +1279,52 @@ struct bfa_cee_stats_s {
#pragma pack()
+/*
+ * AEN related definitions
+ */
+#define BFAD_NL_VENDOR_ID (((u64)0x01 << SCSI_NL_VID_TYPE_SHIFT) \
+ | BFA_PCI_VENDOR_ID_BROCADE)
+
+/* BFA remote port events */
+enum bfa_rport_aen_event {
+ BFA_RPORT_AEN_ONLINE = 1, /* RPort online event */
+ BFA_RPORT_AEN_OFFLINE = 2, /* RPort offline event */
+ BFA_RPORT_AEN_DISCONNECT = 3, /* RPort disconnect event */
+ BFA_RPORT_AEN_QOS_PRIO = 4, /* QOS priority change event */
+ BFA_RPORT_AEN_QOS_FLOWID = 5, /* QOS flow Id change event */
+};
+
+struct bfa_rport_aen_data_s {
+ u16 vf_id; /* vf_id of this logical port */
+ u16 rsvd[3];
+ wwn_t ppwwn; /* WWN of its physical port */
+ wwn_t lpwwn; /* WWN of this logical port */
+ wwn_t rpwwn; /* WWN of this remote port */
+ union {
+ struct bfa_rport_qos_attr_s qos;
+ } priv;
+};
+
+union bfa_aen_data_u {
+ struct bfa_adapter_aen_data_s adapter;
+ struct bfa_port_aen_data_s port;
+ struct bfa_lport_aen_data_s lport;
+ struct bfa_rport_aen_data_s rport;
+ struct bfa_itnim_aen_data_s itnim;
+ struct bfa_audit_aen_data_s audit;
+ struct bfa_ioc_aen_data_s ioc;
+};
+
+#define BFA_AEN_MAX_ENTRY 512
+
+struct bfa_aen_entry_s {
+ struct list_head qe;
+ enum bfa_aen_category aen_category;
+ u32 aen_type;
+ union bfa_aen_data_u aen_data;
+ struct timeval aen_tv;
+ u32 seq_num;
+ u32 bfad_num;
+};
+
#endif /* __BFA_DEFS_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index 8d0b88f..50b6a1c 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -56,6 +56,161 @@ struct scsi_cdb_s {
#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */
+#define SCSI_SENSE_CUR_ERR 0x70
+#define SCSI_SENSE_DEF_ERR 0x71
+
+/*
+ * SCSI additional sense codes
+ */
+#define SCSI_ASC_LUN_NOT_READY 0x04
+#define SCSI_ASC_LUN_NOT_SUPPORTED 0x25
+#define SCSI_ASC_TOCC 0x3F
+
+/*
+ * SCSI additional sense code qualifiers
+ */
+#define SCSI_ASCQ_MAN_INTR_REQ 0x03 /* manual intervention req */
+#define SCSI_ASCQ_RL_DATA_CHANGED 0x0E /* report luns data changed */
+
+/*
+ * Methods of reporting informational exceptions
+ */
+#define SCSI_MP_IEC_UNIT_ATTN 0x2 /* generate unit attention */
+
+struct scsi_report_luns_data_s {
+ u32 lun_list_length; /* length of LUN list length */
+ u32 reserved;
+ struct scsi_lun lun[1]; /* first LUN in lun list */
+};
+
+struct scsi_inquiry_vendor_s {
+ u8 vendor_id[8];
+};
+
+struct scsi_inquiry_prodid_s {
+ u8 product_id[16];
+};
+
+struct scsi_inquiry_prodrev_s {
+ u8 product_rev[4];
+};
+
+struct scsi_inquiry_data_s {
+#ifdef __BIG_ENDIAN
+ u8 peripheral_qual:3; /* peripheral qualifier */
+ u8 device_type:5; /* peripheral device type */
+ u8 rmb:1; /* removable medium bit */
+ u8 device_type_mod:7; /* device type modifier */
+ u8 version;
+ u8 aenc:1; /* async evt notification capability */
+ u8 trm_iop:1; /* terminate I/O process */
+ u8 norm_aca:1; /* normal ACA supported */
+ u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
+ u8 rsp_data_format:4;
+ u8 additional_len;
+ u8 sccs:1;
+ u8 reserved1:7;
+ u8 reserved2:1;
+ u8 enc_serv:1; /* enclosure service component */
+ u8 reserved3:1;
+ u8 multi_port:1; /* multi-port device */
+ u8 m_chngr:1; /* device in medium transport element */
+ u8 ack_req_q:1; /* SIP specific bit */
+ u8 addr32:1; /* SIP specific bit */
+ u8 addr16:1; /* SIP specific bit */
+ u8 rel_adr:1; /* relative address */
+ u8 w_bus32:1;
+ u8 w_bus16:1;
+ u8 synchronous:1;
+ u8 linked_commands:1;
+ u8 trans_dis:1;
+ u8 cmd_queue:1; /* command queueing supported */
+ u8 soft_reset:1; /* soft reset alternative (VS) */
+#else
+ u8 device_type:5; /* peripheral device type */
+ u8 peripheral_qual:3; /* peripheral qualifier */
+ u8 device_type_mod:7; /* device type modifier */
+ u8 rmb:1; /* removable medium bit */
+ u8 version;
+ u8 rsp_data_format:4;
+ u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
+ u8 norm_aca:1; /* normal ACA supported */
+ u8 terminate_iop:1;/* terminate I/O process */
+ u8 aenc:1; /* async evt notification capability */
+ u8 additional_len;
+ u8 reserved1:7;
+ u8 sccs:1;
+ u8 addr16:1; /* SIP specific bit */
+ u8 addr32:1; /* SIP specific bit */
+ u8 ack_req_q:1; /* SIP specific bit */
+ u8 m_chngr:1; /* device in medium transport element */
+ u8 multi_port:1; /* multi-port device */
+ u8 reserved3:1; /* TBD - Vendor Specific */
+ u8 enc_serv:1; /* enclosure service component */
+ u8 reserved2:1;
+ u8 soft_seset:1; /* soft reset alternative (VS) */
+ u8 cmd_queue:1; /* command queueing supported */
+ u8 trans_dis:1;
+ u8 linked_commands:1;
+ u8 synchronous:1;
+ u8 w_bus16:1;
+ u8 w_bus32:1;
+ u8 rel_adr:1; /* relative address */
+#endif
+ struct scsi_inquiry_vendor_s vendor_id;
+ struct scsi_inquiry_prodid_s product_id;
+ struct scsi_inquiry_prodrev_s product_rev;
+ u8 vendor_specific[20];
+ u8 reserved4[40];
+};
+
+/*
+ * SCSI sense data format
+ */
+struct scsi_sense_s {
+#ifdef __BIG_ENDIAN
+ u8 valid:1;
+ u8 rsp_code:7;
+#else
+ u8 rsp_code:7;
+ u8 valid:1;
+#endif
+ u8 seg_num;
+#ifdef __BIG_ENDIAN
+ u8 file_mark:1;
+ u8 eom:1; /* end of media */
+ u8 ili:1; /* incorrect length indicator */
+ u8 reserved:1;
+ u8 sense_key:4;
+#else
+ u8 sense_key:4;
+ u8 reserved:1;
+ u8 ili:1; /* incorrect length indicator */
+ u8 eom:1; /* end of media */
+ u8 file_mark:1;
+#endif
+ u8 information[4]; /* device-type or cmd specific info */
+ u8 add_sense_length; /* additional sense length */
+ u8 command_info[4];/* command specific information */
+ u8 asc; /* additional sense code */
+ u8 ascq; /* additional sense code qualifier */
+ u8 fru_code; /* field replaceable unit code */
+#ifdef __BIG_ENDIAN
+ u8 sksv:1; /* sense key specific valid */
+ u8 c_d:1; /* command/data bit */
+ u8 res1:2;
+ u8 bpv:1; /* bit pointer valid */
+ u8 bpointer:3; /* bit pointer */
+#else
+ u8 bpointer:3; /* bit pointer */
+ u8 bpv:1; /* bit pointer valid */
+ u8 res1:2;
+ u8 c_d:1; /* command/data bit */
+ u8 sksv:1; /* sense key specific valid */
+#endif
+ u8 fpointer[2]; /* field pointer */
+};
+
/*
* Fibre Channel Header Structure (FCHS) definition
*/
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index a4e7951..e07bd47 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -24,6 +24,9 @@ BFA_TRC_FILE(HAL, FCPIM);
* BFA ITNIM Related definitions
*/
static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
+static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
+static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
+static void bfa_ioim_lm_init(struct bfa_s *bfa);
#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
(((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
@@ -57,6 +60,14 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
} \
} while (0)
+#define bfa_ioim_rp_wwn(__ioim) \
+ (((struct bfa_fcs_rport_s *) \
+ (__ioim)->itnim->rport->rport_drv)->pwwn)
+
+#define bfa_ioim_lp_wwn(__ioim) \
+ ((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa), \
+ (__ioim)->itnim->rport->rport_info.lp_tag))->pwwn) \
+
#define bfa_itnim_sler_cb(__itnim) do { \
if ((__itnim)->bfa->fcs) \
bfa_cb_itnim_sler((__itnim)->ditn); \
@@ -66,6 +77,18 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
} \
} while (0)
+enum bfa_ioim_lm_status {
+ BFA_IOIM_LM_PRESENT = 1,
+ BFA_IOIM_LM_LUN_NOT_SUP = 2,
+ BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
+ BFA_IOIM_LM_LUN_NOT_RDY = 4,
+};
+
+enum bfa_ioim_lm_ua_status {
+ BFA_IOIM_LM_UA_RESET = 0,
+ BFA_IOIM_LM_UA_SET = 1,
+};
+
/*
* itnim state machine event
*/
@@ -122,6 +145,9 @@ enum bfa_ioim_event {
BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
+ BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/* lunmask lun not supported */
+ BFA_IOIM_SM_LM_RPL_DC = 20, /* lunmask report-lun data changed */
+ BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/* lunmask lun not ready */
};
@@ -219,6 +245,9 @@ static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
+static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
/*
* forward declaration of BFA IO state machine
@@ -416,6 +445,12 @@ bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
+ bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
+ bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
+ bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
+ bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
+ bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
+ bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
}
bfa_status_t
@@ -437,6 +472,59 @@ bfa_fcpim_port_iostats(struct bfa_s *bfa,
return BFA_STATUS_OK;
}
+void
+bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
+{
+ struct bfa_itnim_latency_s *io_lat =
+ &(ioim->itnim->ioprofile.io_latency);
+ u32 val, idx;
+
+ val = (u32)(jiffies - ioim->start_time);
+ idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
+ bfa_itnim_ioprofile_update(ioim->itnim, idx);
+
+ io_lat->count[idx]++;
+ io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
+ io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
+ io_lat->avg[idx] += val;
+}
+
+void
+bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
+{
+ ioim->start_time = jiffies;
+}
+
+bfa_status_t
+bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
+{
+ struct bfa_itnim_s *itnim;
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+ struct list_head *qe, *qen;
+
+ /* accumulate IO stats from itnim */
+ list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+ itnim = (struct bfa_itnim_s *) qe;
+ bfa_itnim_clear_stats(itnim);
+ }
+ fcpim->io_profile = BFA_TRUE;
+ fcpim->io_profile_start_time = time;
+ fcpim->profile_comp = bfa_ioim_profile_comp;
+ fcpim->profile_start = bfa_ioim_profile_start;
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcpim_profile_off(struct bfa_s *bfa)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+ fcpim->io_profile = BFA_FALSE;
+ fcpim->io_profile_start_time = 0;
+ fcpim->profile_comp = NULL;
+ fcpim->profile_start = NULL;
+ return BFA_STATUS_OK;
+}
+
u16
bfa_fcpim_qdepth_get(struct bfa_s *bfa)
{
@@ -1401,6 +1489,26 @@ bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
}
+#define bfa_io_lat_clock_res_div HZ
+#define bfa_io_lat_clock_res_mul 1000
+bfa_status_t
+bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
+ struct bfa_itnim_ioprofile_s *ioprofile)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
+ if (!fcpim->io_profile)
+ return BFA_STATUS_IOPROFILE_OFF;
+
+ itnim->ioprofile.index = BFA_IOBUCKET_MAX;
+ itnim->ioprofile.io_profile_start_time =
+ bfa_io_profile_start_time(itnim->bfa);
+ itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
+ itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
+ *ioprofile = itnim->ioprofile;
+
+ return BFA_STATUS_OK;
+}
+
void
bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
{
@@ -1469,7 +1577,28 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
- __bfa_cb_ioim_abort, ioim);
+ __bfa_cb_ioim_abort, ioim);
+ break;
+
+ case BFA_IOIM_SM_LM_LUN_NOT_SUP:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+ __bfa_cb_ioim_lm_lun_not_sup, ioim);
+ break;
+
+ case BFA_IOIM_SM_LM_RPL_DC:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+ __bfa_cb_ioim_lm_rpl_dc, ioim);
+ break;
+
+ case BFA_IOIM_SM_LM_LUN_NOT_RDY:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+ __bfa_cb_ioim_lm_lun_not_rdy, ioim);
break;
default:
@@ -2009,6 +2138,264 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
}
}
+/*
+ * This is called from bfa_fcpim_start after the bfa_init() with flash read
+ * is complete by driver. now invalidate the stale content of lun mask
+ * like unit attention, rp tag and lp tag.
+ */
+static void
+bfa_ioim_lm_init(struct bfa_s *bfa)
+{
+ struct bfa_lun_mask_s *lunm_list;
+ int i;
+
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return;
+
+ lunm_list = bfa_get_lun_mask_list(bfa);
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
+ lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
+ lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
+ }
+}
+
+/*
+ * Validate LUN for LUN masking
+ */
+static enum bfa_ioim_lm_status
+bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
+ struct bfa_rport_s *rp, struct scsi_lun lun)
+{
+ u8 i;
+ struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
+ struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
+
+ if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
+ (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
+ return BFA_IOIM_LM_PRESENT;
+ }
+
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+
+ if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
+ continue;
+
+ if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
+ scsilun_to_int((struct scsi_lun *)&lun))
+ && (rp->rport_tag == lun_list[i].rp_tag)
+ && ((u8)ioim->itnim->rport->rport_info.lp_tag ==
+ lun_list[i].lp_tag)) {
+ bfa_trc(ioim->bfa, lun_list[i].rp_tag);
+ bfa_trc(ioim->bfa, lun_list[i].lp_tag);
+ bfa_trc(ioim->bfa, scsilun_to_int(
+ (struct scsi_lun *)&lun_list[i].lun));
+
+ if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
+ ((cdb->scsi_cdb[0] != INQUIRY) ||
+ (cdb->scsi_cdb[0] != REPORT_LUNS))) {
+ lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
+ return BFA_IOIM_LM_RPL_DATA_CHANGED;
+ }
+
+ if (cdb->scsi_cdb[0] == REPORT_LUNS)
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
+
+ return BFA_IOIM_LM_PRESENT;
+ }
+ }
+
+ if ((cdb->scsi_cdb[0] == INQUIRY) &&
+ (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
+ return BFA_IOIM_LM_PRESENT;
+ }
+
+ if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
+ return BFA_IOIM_LM_LUN_NOT_RDY;
+
+ return BFA_IOIM_LM_LUN_NOT_SUP;
+}
+
+static bfa_boolean_t
+bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
+{
+ return BFA_TRUE;
+}
+
+static void
+bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
+ int buf_lun_cnt)
+{
+ struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
+ struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
+ struct scsi_lun lun;
+ int i, j;
+
+ bfa_trc(ioim->bfa, buf_lun_cnt);
+ for (j = 0; j < buf_lun_cnt; j++) {
+ lun = *((struct scsi_lun *)(lun_data + j));
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
+ continue;
+ if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
+ (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
+ (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
+ == scsilun_to_int((struct scsi_lun *)&lun))) {
+ lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
+ break;
+ }
+ } /* next lun in mask DB */
+ } /* next lun in buf */
+}
+
+static int
+bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
+ struct scsi_report_luns_data_s *rl)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
+ struct scatterlist *sg = scsi_sglist(cmnd);
+ struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
+ struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
+ int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
+ int lun_across_sg_bytes, bytes_from_next_buf;
+ u64 last_lun, temp_last_lun;
+
+ /* fetch luns from the first sg element */
+ bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
+ (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
+
+ /* fetch luns from multiple sg elements */
+ scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
+ if (sgeid == 0) {
+ prev_sg_len = sg_dma_len(sg);
+ prev_rl_data = (struct scsi_lun *)
+ phys_to_virt(sg_dma_address(sg));
+ continue;
+ }
+
+ /* if the buf is having more data */
+ lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
+ if (lun_across_sg_bytes) {
+ bfa_trc(ioim->bfa, lun_across_sg_bytes);
+ bfa_stats(ioim->itnim, lm_lun_across_sg);
+ bytes_from_next_buf = sizeof(struct scsi_lun) -
+ lun_across_sg_bytes;
+
+ /* from next buf take higher bytes */
+ temp_last_lun = *((u64 *)
+ phys_to_virt(sg_dma_address(sg)));
+ last_lun |= temp_last_lun >>
+ (lun_across_sg_bytes * BITS_PER_BYTE);
+
+ /* from prev buf take higher bytes */
+ temp_last_lun = *((u64 *)(prev_rl_data +
+ (prev_sg_len - lun_across_sg_bytes)));
+ temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
+ last_lun = last_lun | (temp_last_lun <<
+ (bytes_from_next_buf * BITS_PER_BYTE));
+
+ bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
+ } else
+ bytes_from_next_buf = 0;
+
+ *pgdlen += sg_dma_len(sg);
+ prev_sg_len = sg_dma_len(sg);
+ prev_rl_data = (struct scsi_lun *)
+ phys_to_virt(sg_dma_address(sg));
+ bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
+ bytes_from_next_buf,
+ sg_dma_len(sg) / sizeof(struct scsi_lun));
+ }
+
+ /* update the report luns data - based on fetched luns */
+ sg = scsi_sglist(cmnd);
+ base_rl_data = (struct scsi_lun *)rl->lun;
+ base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
+ for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
+ base_rl_data[j] = lun_list[i].lun;
+ lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
+ j++;
+ lun_fetched_cnt++;
+ }
+
+ if (j > base_count) {
+ j = 0;
+ sg = sg_next(sg);
+ base_rl_data = (struct scsi_lun *)
+ phys_to_virt(sg_dma_address(sg));
+ base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
+ }
+ }
+
+ bfa_trc(ioim->bfa, lun_fetched_cnt);
+ return lun_fetched_cnt;
+}
+
+static bfa_boolean_t
+bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
+{
+ struct scsi_inquiry_data_s *inq;
+ struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
+
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
+ inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
+
+ bfa_trc(ioim->bfa, inq->device_type);
+ inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
+ return 0;
+}
+
+static bfa_boolean_t
+bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
+ struct scatterlist *sg = scsi_sglist(cmnd);
+ struct bfi_ioim_rsp_s *m;
+ struct scsi_report_luns_data_s *rl = NULL;
+ int lun_count = 0, lun_fetched_cnt = 0;
+ u32 residue, pgdlen = 0;
+
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
+ if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
+ return BFA_TRUE;
+
+ m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
+ if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
+ return BFA_TRUE;
+
+ pgdlen = sg_dma_len(sg);
+ bfa_trc(ioim->bfa, pgdlen);
+ rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
+ lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
+ lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
+
+ if (lun_count == lun_fetched_cnt)
+ return BFA_TRUE;
+
+ bfa_trc(ioim->bfa, lun_count);
+ bfa_trc(ioim->bfa, lun_fetched_cnt);
+ bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
+
+ if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
+ rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
+ sizeof(struct scsi_lun);
+ else
+ bfa_stats(ioim->itnim, lm_small_buf_addresidue);
+
+ bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
+ bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
+
+ residue = be32_to_cpu(m->residue);
+ residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
+ bfa_stats(ioim->itnim, lm_wire_residue_changed);
+ m->residue = be32_to_cpu(residue);
+ bfa_trc(ioim->bfa, ioim->nsges);
+ return BFA_FALSE;
+}
static void
__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
@@ -2068,6 +2455,299 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
}
static void
+__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_ioim_s *ioim = cbarg;
+ int sns_len = 0xD;
+ u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
+ struct scsi_sense_s *snsinfo;
+
+ if (!complete) {
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+ return;
+ }
+
+ snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
+ ioim->fcpim->fcp, ioim->iotag);
+ snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
+ snsinfo->add_sense_length = 0xa;
+ snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
+ snsinfo->sense_key = ILLEGAL_REQUEST;
+ bfa_trc(ioim->bfa, residue);
+ bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
+ SCSI_STATUS_CHECK_CONDITION, sns_len,
+ (u8 *)snsinfo, residue);
+}
+
+static void
+__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_ioim_s *ioim = cbarg;
+ int sns_len = 0xD;
+ u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
+ struct scsi_sense_s *snsinfo;
+
+ if (!complete) {
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+ return;
+ }
+
+ snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
+ ioim->iotag);
+ snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
+ snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
+ snsinfo->asc = SCSI_ASC_TOCC;
+ snsinfo->add_sense_length = 0x6;
+ snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
+ bfa_trc(ioim->bfa, residue);
+ bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
+ SCSI_STATUS_CHECK_CONDITION, sns_len,
+ (u8 *)snsinfo, residue);
+}
+
+static void
+__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_ioim_s *ioim = cbarg;
+ int sns_len = 0xD;
+ u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
+ struct scsi_sense_s *snsinfo;
+
+ if (!complete) {
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+ return;
+ }
+
+ snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
+ ioim->fcpim->fcp, ioim->iotag);
+ snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
+ snsinfo->add_sense_length = 0xa;
+ snsinfo->sense_key = NOT_READY;
+ snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
+ snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
+ bfa_trc(ioim->bfa, residue);
+ bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
+ SCSI_STATUS_CHECK_CONDITION, sns_len,
+ (u8 *)snsinfo, residue);
+}
+
+void
+bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
+ u16 rp_tag, u8 lp_tag)
+{
+ struct bfa_lun_mask_s *lun_list;
+ u8 i;
+
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return;
+
+ lun_list = bfa_get_lun_mask_list(bfa);
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
+ if ((lun_list[i].lp_wwn == lp_wwn) &&
+ (lun_list[i].rp_wwn == rp_wwn)) {
+ lun_list[i].rp_tag = rp_tag;
+ lun_list[i].lp_tag = lp_tag;
+ }
+ }
+ }
+}
+
+/*
+ * set UA for all active luns in LM DB
+ */
+static void
+bfa_ioim_lm_set_ua(struct bfa_s *bfa)
+{
+ struct bfa_lun_mask_s *lunm_list;
+ int i;
+
+ lunm_list = bfa_get_lun_mask_list(bfa);
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
+ continue;
+ lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
+ }
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
+{
+ struct bfa_lunmask_cfg_s *lun_mask;
+
+ bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return BFA_STATUS_FAILED;
+
+ if (bfa_get_lun_mask_status(bfa) == update)
+ return BFA_STATUS_NO_CHANGE;
+
+ lun_mask = bfa_get_lun_mask(bfa);
+ lun_mask->status = update;
+
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
+ bfa_ioim_lm_set_ua(bfa);
+
+ return bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
+{
+ int i;
+ struct bfa_lun_mask_s *lunm_list;
+
+ bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return BFA_STATUS_FAILED;
+
+ lunm_list = bfa_get_lun_mask_list(bfa);
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
+ if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
+ bfa_rport_unset_lunmask(bfa,
+ BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
+ }
+ }
+
+ memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
+ return bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
+{
+ struct bfa_lunmask_cfg_s *lun_mask;
+
+ bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return BFA_STATUS_FAILED;
+
+ lun_mask = bfa_get_lun_mask(bfa);
+ memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
+ wwn_t rpwwn, struct scsi_lun lun)
+{
+ struct bfa_lun_mask_s *lunm_list;
+ struct bfa_rport_s *rp = NULL;
+ int i, free_index = MAX_LUN_MASK_CFG + 1;
+ struct bfa_fcs_lport_s *port = NULL;
+ struct bfa_fcs_rport_s *rp_fcs;
+
+ bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return BFA_STATUS_FAILED;
+
+ port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
+ vf_id, *pwwn);
+ if (port) {
+ *pwwn = port->port_cfg.pwwn;
+ rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
+ rp = rp_fcs->bfa_rport;
+ }
+
+ lunm_list = bfa_get_lun_mask_list(bfa);
+ /* if entry exists */
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
+ free_index = i;
+ if ((lunm_list[i].lp_wwn == *pwwn) &&
+ (lunm_list[i].rp_wwn == rpwwn) &&
+ (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
+ scsilun_to_int((struct scsi_lun *)&lun)))
+ return BFA_STATUS_ENTRY_EXISTS;
+ }
+
+ if (free_index > MAX_LUN_MASK_CFG)
+ return BFA_STATUS_MAX_ENTRY_REACHED;
+
+ if (rp) {
+ lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
+ rp->rport_info.local_pid);
+ lunm_list[free_index].rp_tag = rp->rport_tag;
+ } else {
+ lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
+ lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
+ }
+
+ lunm_list[free_index].lp_wwn = *pwwn;
+ lunm_list[free_index].rp_wwn = rpwwn;
+ lunm_list[free_index].lun = lun;
+ lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
+
+ /* set for all luns in this rp */
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if ((lunm_list[i].lp_wwn == *pwwn) &&
+ (lunm_list[i].rp_wwn == rpwwn))
+ lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
+ }
+
+ return bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
+ wwn_t rpwwn, struct scsi_lun lun)
+{
+ struct bfa_lun_mask_s *lunm_list;
+ struct bfa_rport_s *rp = NULL;
+ struct bfa_fcs_lport_s *port = NULL;
+ struct bfa_fcs_rport_s *rp_fcs;
+ int i;
+
+ /* in min cfg lunm_list could be NULL but no commands should run. */
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return BFA_STATUS_FAILED;
+
+ bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+ bfa_trc(bfa, *pwwn);
+ bfa_trc(bfa, rpwwn);
+ bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
+
+ if (*pwwn == 0) {
+ port = bfa_fcs_lookup_port(
+ &((struct bfad_s *)bfa->bfad)->bfa_fcs,
+ vf_id, *pwwn);
+ if (port) {
+ *pwwn = port->port_cfg.pwwn;
+ rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
+ rp = rp_fcs->bfa_rport;
+ }
+ }
+
+ lunm_list = bfa_get_lun_mask_list(bfa);
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if ((lunm_list[i].lp_wwn == *pwwn) &&
+ (lunm_list[i].rp_wwn == rpwwn) &&
+ (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
+ scsilun_to_int((struct scsi_lun *)&lun))) {
+ lunm_list[i].lp_wwn = 0;
+ lunm_list[i].rp_wwn = 0;
+ int_to_scsilun(0, &lunm_list[i].lun);
+ lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
+ if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
+ lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
+ lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
+ }
+ return bfa_dconf_update(bfa);
+ }
+ }
+
+ /* set for all luns in this rp */
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if ((lunm_list[i].lp_wwn == *pwwn) &&
+ (lunm_list[i].rp_wwn == rpwwn))
+ lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
+ }
+
+ return BFA_STATUS_ENTRY_NOT_EXISTS;
+}
+
+static void
__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
{
struct bfa_ioim_s *ioim = cbarg;
@@ -2077,6 +2757,7 @@ __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
return;
}
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
0, 0, NULL, 0);
}
@@ -2092,6 +2773,7 @@ __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
return;
}
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
0, 0, NULL, 0);
}
@@ -2106,6 +2788,7 @@ __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
return;
}
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
}
@@ -2449,6 +3132,7 @@ bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
ioim->bfa = fcpim->bfa;
ioim->fcpim = fcpim;
ioim->iosp = iosp;
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
INIT_LIST_HEAD(&ioim->sgpg_q);
bfa_reqq_winit(&ioim->iosp->reqq_wait,
bfa_ioim_qresume, ioim);
@@ -2486,6 +3170,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
evt = BFA_IOIM_SM_DONE;
else
evt = BFA_IOIM_SM_COMP;
+ ioim->proc_rsp_data(ioim);
break;
case BFI_IOIM_STS_TIMEDOUT:
@@ -2521,6 +3206,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
if (rsp->abort_tag != ioim->abort_tag) {
bfa_trc(ioim->bfa, rsp->abort_tag);
bfa_trc(ioim->bfa, ioim->abort_tag);
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
return;
}
@@ -2539,6 +3225,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
WARN_ON(1);
}
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_sm_send_event(ioim, evt);
}
@@ -2556,7 +3243,16 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
bfa_ioim_cb_profile_comp(fcpim, ioim);
- bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
+
+ if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED) {
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
+ return;
+ }
+
+ if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
+ else
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
}
/*
@@ -2668,6 +3364,35 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
void
bfa_ioim_start(struct bfa_ioim_s *ioim)
{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
+ struct bfa_lps_s *lps;
+ enum bfa_ioim_lm_status status;
+ struct scsi_lun scsilun;
+
+ if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
+ lps = BFA_IOIM_TO_LPS(ioim);
+ int_to_scsilun(cmnd->device->lun, &scsilun);
+ status = bfa_ioim_lm_check(ioim, lps,
+ ioim->itnim->rport, scsilun);
+ if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
+ bfa_stats(ioim->itnim, lm_lun_not_rdy);
+ return;
+ }
+
+ if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
+ bfa_stats(ioim->itnim, lm_lun_not_sup);
+ return;
+ }
+
+ if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
+ bfa_stats(ioim->itnim, lm_rpl_data_changed);
+ return;
+ }
+ }
+
bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
/*
@@ -3411,6 +4136,13 @@ bfa_fcp_detach(struct bfa_s *bfa)
static void
bfa_fcp_start(struct bfa_s *bfa)
{
+ struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+
+ /*
+ * bfa_init() with flash read is complete. now invalidate the stale
+ * content of lun mask like unit attention, rp tag and lp tag.
+ */
+ bfa_ioim_lm_init(fcp->bfa);
}
static void
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 57b695a..1080bcb 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -79,14 +79,22 @@ bfa_ioim_get_index(u32 n) {
if (n >= (1UL)<<22)
return BFA_IOBUCKET_MAX - 1;
n >>= 8;
- if (n >= (1UL)<<16)
- n >>= 16; pos += 16;
- if (n >= 1 << 8)
- n >>= 8; pos += 8;
- if (n >= 1 << 4)
- n >>= 4; pos += 4;
- if (n >= 1 << 2)
- n >>= 2; pos += 2;
+ if (n >= (1UL)<<16) {
+ n >>= 16;
+ pos += 16;
+ }
+ if (n >= 1 << 8) {
+ n >>= 8;
+ pos += 8;
+ }
+ if (n >= 1 << 4) {
+ n >>= 4;
+ pos += 4;
+ }
+ if (n >= 1 << 2) {
+ n >>= 2;
+ pos += 2;
+ }
if (n >= 1 << 1)
pos += 1;
@@ -102,6 +110,7 @@ struct bfad_ioim_s;
struct bfad_tskim_s;
typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
+typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim);
struct bfa_fcpim_s {
struct bfa_s *bfa;
@@ -115,7 +124,7 @@ struct bfa_fcpim_s {
u32 path_tov;
u16 q_depth;
u8 reqq; /* Request queue to be used */
- u8 rsvd;
+ u8 lun_masking_pending;
struct list_head itnim_q; /* queue of active itnim */
struct list_head ioim_resfree_q; /* IOs waiting for f/w */
struct list_head ioim_comp_q; /* IO global comp Q */
@@ -170,7 +179,9 @@ struct bfa_ioim_s {
bfa_cb_cbfn_t io_cbfn; /* IO completion handler */
struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */
u8 reqq; /* Request queue for I/O */
+ u8 mode; /* IO is passthrough or not */
u64 start_time; /* IO's Profile start val */
+ bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */
};
struct bfa_ioim_sp_s {
@@ -250,6 +261,10 @@ struct bfa_itnim_s {
(__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \
} while (0)
+#define BFA_IOIM_TO_LPS(__ioim) \
+ BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa), \
+ __ioim->itnim->rport->rport_info.lp_tag)
+
static inline bfa_boolean_t
bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
{
@@ -297,6 +312,8 @@ bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
struct bfa_itnim_iostats_s *stats, u8 lp_tag);
void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
struct bfa_itnim_iostats_s *itnim_stats);
+bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
+bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
#define bfa_fcpim_ioredirect_enabled(__bfa) \
(((struct bfa_fcpim_s *)(BFA_FCPIM(__bfa)))->ioredirect)
@@ -397,4 +414,14 @@ void bfa_tskim_start(struct bfa_tskim_s *tskim,
void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
enum bfi_tskim_status tsk_status);
+void bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn,
+ wwn_t rp_wwn, u16 rp_tag, u8 lp_tag);
+bfa_status_t bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 on_off);
+bfa_status_t bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf);
+bfa_status_t bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id,
+ wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
+bfa_status_t bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id,
+ wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
+bfa_status_t bfa_fcpim_lunmask_clear(struct bfa_s *bfa);
+
#endif /* __BFA_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index a9b22bc..eaac57e 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -20,6 +20,7 @@
*/
#include "bfad_drv.h"
+#include "bfad_im.h"
#include "bfa_fcs.h"
#include "bfa_fcbuild.h"
@@ -1327,6 +1328,29 @@ bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_trc(fabric->fcs, status);
}
+
+/*
+ * Send AEN notification
+ */
+static void
+bfa_fcs_fabric_aen_post(struct bfa_fcs_lport_s *port,
+ enum bfa_port_aen_event event)
+{
+ struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.port.pwwn = bfa_fcs_lport_get_pwwn(port);
+ aen_entry->aen_data.port.fwwn = bfa_fcs_lport_get_fabric_name(port);
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
+ BFA_AEN_CAT_PORT, event);
+}
+
/*
*
* @param[in] fabric - fabric
@@ -1358,6 +1382,8 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
"Base port WWN = %s Fabric WWN = %s\n",
pwwn_ptr, fwwn_ptr);
+ bfa_fcs_fabric_aen_post(&fabric->bport,
+ BFA_PORT_AEN_FABRIC_NAME_CHANGE);
}
}
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index a5f1faf..e75e07d 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -675,6 +675,7 @@ struct bfa_fcs_s {
struct bfa_fcs_fabric_s fabric; /* base fabric state machine */
struct bfa_fcs_stats_s stats; /* FCS statistics */
struct bfa_wc_s wc; /* waiting counter */
+ int fcs_aen_seq;
};
/*
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index 29b4108..9272840 100644
--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -37,6 +37,8 @@ static void bfa_fcs_itnim_prli_response(void *fcsarg,
struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs);
+static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_itnim_aen_event event);
/*
* fcs_itnim_sm FCS itnim state machine events
@@ -269,6 +271,7 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Target (WWN = %s) is online for initiator (WWN = %s)\n",
rpwwn_buf, lpwwn_buf);
+ bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_ONLINE);
break;
case BFA_FCS_ITNIM_SM_OFFLINE:
@@ -305,14 +308,17 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
bfa_itnim_offline(itnim->bfa_itnim);
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
wwn2str(rpwwn_buf, itnim->rport->pwwn);
- if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE)
+ if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) {
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"Target (WWN = %s) connectivity lost for "
"initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf);
- else
+ bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT);
+ } else {
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Target (WWN = %s) offlined by initiator (WWN = %s)\n",
rpwwn_buf, lpwwn_buf);
+ bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE);
+ }
break;
case BFA_FCS_ITNIM_SM_DELETE:
@@ -382,6 +388,33 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
}
static void
+bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_itnim_aen_event event)
+{
+ struct bfa_fcs_rport_s *rport = itnim->rport;
+ struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ /* Don't post events for well known addresses */
+ if (BFA_FCS_PID_IS_WKA(rport->pid))
+ return;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.itnim.vf_id = rport->port->fabric->vf_id;
+ aen_entry->aen_data.itnim.ppwwn = bfa_fcs_lport_get_pwwn(
+ bfa_fcs_get_base_port(itnim->fcs));
+ aen_entry->aen_data.itnim.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
+ aen_entry->aen_data.itnim.rpwwn = rport->pwwn;
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
+ BFA_AEN_CAT_ITNIM, event);
+}
+
+static void
bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_itnim_s *itnim = itnim_cbarg;
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index f8251a9..d4f951f 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -16,6 +16,7 @@
*/
#include "bfad_drv.h"
+#include "bfad_im.h"
#include "bfa_fcs.h"
#include "bfa_fcbuild.h"
#include "bfa_fc.h"
@@ -300,6 +301,31 @@ bfa_fcs_lport_sm_deleting(
*/
/*
+ * Send AEN notification
+ */
+static void
+bfa_fcs_lport_aen_post(struct bfa_fcs_lport_s *port,
+ enum bfa_lport_aen_event event)
+{
+ struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
+ aen_entry->aen_data.lport.roles = port->port_cfg.roles;
+ aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
+ bfa_fcs_get_base_port(port->fcs));
+ aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
+ BFA_AEN_CAT_LPORT, event);
+}
+
+/*
* Send a LS reject
*/
static void
@@ -593,6 +619,7 @@ bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port)
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Logical port online: WWN = %s Role = %s\n",
lpwwn_buf, "Initiator");
+ bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_ONLINE);
bfad->bfad_flags |= BFAD_PORT_ONLINE;
}
@@ -611,14 +638,17 @@ bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
if (bfa_sm_cmp_state(port->fabric,
- bfa_fcs_fabric_sm_online) == BFA_TRUE)
+ bfa_fcs_fabric_sm_online) == BFA_TRUE) {
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"Logical port lost fabric connectivity: WWN = %s Role = %s\n",
lpwwn_buf, "Initiator");
- else
+ bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DISCONNECT);
+ } else {
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Logical port taken offline: WWN = %s Role = %s\n",
lpwwn_buf, "Initiator");
+ bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_OFFLINE);
+ }
list_for_each_safe(qe, qen, &port->rport_q) {
rport = (struct bfa_fcs_rport_s *) qe;
@@ -676,6 +706,7 @@ bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Logical port deleted: WWN = %s Role = %s\n",
lpwwn_buf, "Initiator");
+ bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DELETE);
/* Base port will be deleted by the OS driver */
if (port->vport) {
@@ -973,6 +1004,7 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"New logical port created: WWN = %s Role = %s\n",
lpwwn_buf, "Initiator");
+ bfa_fcs_lport_aen_post(lport, BFA_LPORT_AEN_NEW);
bfa_sm_set_state(lport, bfa_fcs_lport_sm_uninit);
bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
@@ -5559,6 +5591,31 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
* fcs_vport_private FCS virtual port private functions
*/
/*
+ * Send AEN notification
+ */
+static void
+bfa_fcs_vport_aen_post(struct bfa_fcs_lport_s *port,
+ enum bfa_lport_aen_event event)
+{
+ struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
+ aen_entry->aen_data.lport.roles = port->port_cfg.roles;
+ aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
+ bfa_fcs_get_base_port(port->fcs));
+ aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
+ BFA_AEN_CAT_LPORT, event);
+}
+
+/*
* This routine will be called to send a FDISC command.
*/
static void
@@ -5585,8 +5642,11 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
- else
+ else {
+ bfa_fcs_vport_aen_post(&vport->lport,
+ BFA_LPORT_AEN_NPIV_DUP_WWN);
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN);
+ }
break;
case FC_LS_RJT_EXP_INSUFF_RES:
@@ -5596,11 +5656,17 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
*/
if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
- else
+ else {
+ bfa_fcs_vport_aen_post(&vport->lport,
+ BFA_LPORT_AEN_NPIV_FABRIC_MAX);
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
+ }
break;
default:
+ if (vport->fdisc_retries == 0)
+ bfa_fcs_vport_aen_post(&vport->lport,
+ BFA_LPORT_AEN_NPIV_UNKNOWN);
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
}
}
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index 2c51445..52628d5 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -20,6 +20,7 @@
*/
#include "bfad_drv.h"
+#include "bfad_im.h"
#include "bfa_fcs.h"
#include "bfa_fcbuild.h"
@@ -2041,6 +2042,35 @@ bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
}
static void
+bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
+ enum bfa_rport_aen_event event,
+ struct bfa_rport_aen_data_s *data)
+{
+ struct bfa_fcs_lport_s *port = rport->port;
+ struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ if (event == BFA_RPORT_AEN_QOS_PRIO)
+ aen_entry->aen_data.rport.priv.qos = data->priv.qos;
+ else if (event == BFA_RPORT_AEN_QOS_FLOWID)
+ aen_entry->aen_data.rport.priv.qos = data->priv.qos;
+
+ aen_entry->aen_data.rport.vf_id = rport->port->fabric->vf_id;
+ aen_entry->aen_data.rport.ppwwn = bfa_fcs_lport_get_pwwn(
+ bfa_fcs_get_base_port(rport->fcs));
+ aen_entry->aen_data.rport.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
+ aen_entry->aen_data.rport.rpwwn = rport->pwwn;
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
+ BFA_AEN_CAT_RPORT, event);
+}
+
+static void
bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
{
struct bfa_fcs_lport_s *port = rport->port;
@@ -2063,10 +2093,12 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
wwn2str(rpwwn_buf, rport->pwwn);
- if (!BFA_FCS_PID_IS_WKA(rport->pid))
+ if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Remote port (WWN = %s) online for logical port (WWN = %s)\n",
rpwwn_buf, lpwwn_buf);
+ bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_ONLINE, NULL);
+ }
}
static void
@@ -2083,16 +2115,21 @@ bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
wwn2str(rpwwn_buf, rport->pwwn);
if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
- if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE)
+ if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE) {
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"Remote port (WWN = %s) connectivity lost for "
"logical port (WWN = %s)\n",
rpwwn_buf, lpwwn_buf);
- else
+ bfa_fcs_rport_aen_post(rport,
+ BFA_RPORT_AEN_DISCONNECT, NULL);
+ } else {
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Remote port (WWN = %s) offlined by "
"logical port (WWN = %s)\n",
rpwwn_buf, lpwwn_buf);
+ bfa_fcs_rport_aen_post(rport,
+ BFA_RPORT_AEN_OFFLINE, NULL);
+ }
}
if (bfa_fcs_lport_is_initiator(port)) {
@@ -2366,8 +2403,11 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg,
struct bfa_rport_qos_attr_s new_qos_attr)
{
struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+ struct bfa_rport_aen_data_s aen_data;
bfa_trc(rport->fcs, rport->pwwn);
+ aen_data.priv.qos = new_qos_attr;
+ bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data);
}
/*
@@ -2390,8 +2430,11 @@ bfa_cb_rport_qos_scn_prio(void *cbarg,
struct bfa_rport_qos_attr_s new_qos_attr)
{
struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+ struct bfa_rport_aen_data_s aen_data;
bfa_trc(rport->fcs, rport->pwwn);
+ aen_data.priv.qos = new_qos_attr;
+ bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_PRIO, &aen_data);
}
/*
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index e7ffd82..ea24d4c 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -42,11 +42,36 @@ bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
bfa->iocfc.bfa_regs.intr_status);
}
+/*
+ * Actions to respond RME Interrupt for Crossbow ASIC:
+ * - Write 1 to Interrupt Status register
+ * INTX - done in bfa_intx()
+ * MSIX - done in bfa_hwcb_rspq_ack_msix()
+ * - Update CI (only if new CI)
+ */
static void
-bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq)
+bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci)
{
writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
- bfa->iocfc.bfa_regs.intr_status);
+ bfa->iocfc.bfa_regs.intr_status);
+
+ if (bfa_rspq_ci(bfa, rspq) == ci)
+ return;
+
+ bfa_rspq_ci(bfa, rspq) = ci;
+ writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+ mmiowb();
+}
+
+void
+bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
+{
+ if (bfa_rspq_ci(bfa, rspq) == ci)
+ return;
+
+ bfa_rspq_ci(bfa, rspq) = ci;
+ writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+ mmiowb();
}
void
@@ -149,8 +174,13 @@ bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
void
bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
{
- bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
- bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
+ if (msix) {
+ bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
+ bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
+ } else {
+ bfa->iocfc.hwif.hw_reqq_ack = NULL;
+ bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
+ }
}
void
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index 989bbce..637527f 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -64,13 +64,36 @@ bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
}
+/*
+ * Actions to respond RME Interrupt for Catapult ASIC:
+ * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx())
+ * - Acknowledge by writing to RME Queue Control register
+ * - Update CI
+ */
void
-bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq)
+bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
{
u32 r32;
r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
+
+ bfa_rspq_ci(bfa, rspq) = ci;
+ writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+ mmiowb();
+}
+
+/*
+ * Actions to respond RME Interrupt for Catapult2 ASIC:
+ * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx())
+ * - Update CI
+ */
+void
+bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
+{
+ bfa_rspq_ci(bfa, rspq) = ci;
+ writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+ mmiowb();
}
void
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index d6c2bf3..1ac5aec 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -16,6 +16,7 @@
*/
#include "bfad_drv.h"
+#include "bfad_im.h"
#include "bfa_ioc.h"
#include "bfi_reg.h"
#include "bfa_defs.h"
@@ -458,6 +459,7 @@ bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
+ bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
}
static void
@@ -502,6 +504,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
+ bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
}
/*
@@ -1966,6 +1969,7 @@ bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
"Heart Beat of IOC has failed\n");
+ bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
}
@@ -1980,6 +1984,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
"Running firmware version is incompatible "
"with the driver version\n");
+ bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
}
bfa_status_t
@@ -2679,6 +2684,43 @@ bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
}
/*
+ * Send AEN notification
+ */
+void
+bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
+{
+ struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+ enum bfa_ioc_type_e ioc_type;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ ioc_type = bfa_ioc_get_type(ioc);
+ switch (ioc_type) {
+ case BFA_IOC_TYPE_FC:
+ aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
+ break;
+ case BFA_IOC_TYPE_FCoE:
+ aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
+ aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
+ break;
+ case BFA_IOC_TYPE_LL:
+ aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
+ break;
+ default:
+ WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
+ break;
+ }
+
+ /* Send the AEN notification */
+ aen_entry->aen_data.ioc.ioc_type = ioc_type;
+ bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
+ BFA_AEN_CAT_IOC, event);
+}
+
+/*
* Retrieve saved firmware trace from a prior IOC failure.
*/
bfa_status_t
@@ -2879,6 +2921,10 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
{
if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
return;
+ if (ioc->attr->nwwn == 0)
+ bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
+ if (ioc->attr->pwwn == 0)
+ bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
}
/*
@@ -3443,6 +3489,54 @@ bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
}
/*
+ * SFP's State Change Notification post to AEN
+ */
+static void
+bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
+{
+ struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+ enum bfa_port_aen_event aen_evt = 0;
+
+ bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
+ ((u64)rsp->event));
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
+ aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
+ aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
+
+ switch (rsp->event) {
+ case BFA_SFP_SCN_INSERTED:
+ aen_evt = BFA_PORT_AEN_SFP_INSERT;
+ break;
+ case BFA_SFP_SCN_REMOVED:
+ aen_evt = BFA_PORT_AEN_SFP_REMOVE;
+ break;
+ case BFA_SFP_SCN_FAILED:
+ aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
+ break;
+ case BFA_SFP_SCN_UNSUPPORT:
+ aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
+ break;
+ case BFA_SFP_SCN_POM:
+ aen_evt = BFA_PORT_AEN_SFP_POM;
+ aen_entry->aen_data.port.level = rsp->pomlvl;
+ break;
+ default:
+ bfa_trc(sfp, rsp->event);
+ WARN_ON(1);
+ }
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
+ BFA_AEN_CAT_PORT, aen_evt);
+}
+
+/*
* SFP get data send
*/
static void
@@ -3482,6 +3576,50 @@ bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
}
/*
+ * SFP scn handler
+ */
+static void
+bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
+{
+ struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
+
+ switch (rsp->event) {
+ case BFA_SFP_SCN_INSERTED:
+ sfp->state = BFA_SFP_STATE_INSERTED;
+ sfp->data_valid = 0;
+ bfa_sfp_scn_aen_post(sfp, rsp);
+ break;
+ case BFA_SFP_SCN_REMOVED:
+ sfp->state = BFA_SFP_STATE_REMOVED;
+ sfp->data_valid = 0;
+ bfa_sfp_scn_aen_post(sfp, rsp);
+ break;
+ case BFA_SFP_SCN_FAILED:
+ sfp->state = BFA_SFP_STATE_FAILED;
+ sfp->data_valid = 0;
+ bfa_sfp_scn_aen_post(sfp, rsp);
+ break;
+ case BFA_SFP_SCN_UNSUPPORT:
+ sfp->state = BFA_SFP_STATE_UNSUPPORT;
+ bfa_sfp_scn_aen_post(sfp, rsp);
+ if (!sfp->lock)
+ bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
+ break;
+ case BFA_SFP_SCN_POM:
+ bfa_sfp_scn_aen_post(sfp, rsp);
+ break;
+ case BFA_SFP_SCN_VALID:
+ sfp->state = BFA_SFP_STATE_VALID;
+ if (!sfp->lock)
+ bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
+ break;
+ default:
+ bfa_trc(sfp, rsp->event);
+ WARN_ON(1);
+ }
+}
+
+/*
* SFP show complete
*/
static void
@@ -3645,7 +3783,7 @@ bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
break;
case BFI_SFP_I2H_SCN:
- bfa_trc(sfp, msg->mh.msg_id);
+ bfa_sfp_scn(sfp, msg);
break;
default:
@@ -3838,6 +3976,26 @@ bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
static void
+bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
+ int inst, int type)
+{
+ struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
+ aen_entry->aen_data.audit.partition_inst = inst;
+ aen_entry->aen_data.audit.partition_type = type;
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
+ BFA_AEN_CAT_AUDIT, event);
+}
+
+static void
bfa_flash_cb(struct bfa_flash_s *flash)
{
flash->op_busy = 0;
@@ -3978,6 +4136,7 @@ bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
struct bfi_flash_erase_rsp_s *erase;
struct bfi_flash_write_rsp_s *write;
struct bfi_flash_read_rsp_s *read;
+ struct bfi_flash_event_s *event;
struct bfi_mbmsg_s *msg;
} m;
@@ -4061,8 +4220,19 @@ bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
}
break;
case BFI_FLASH_I2H_BOOT_VER_RSP:
+ break;
case BFI_FLASH_I2H_EVENT:
- bfa_trc(flash, msg->mh.msg_id);
+ status = be32_to_cpu(m.event->status);
+ bfa_trc(flash, status);
+ if (status == BFA_STATUS_BAD_FWCFG)
+ bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
+ else if (status == BFA_STATUS_INVALID_VENDOR) {
+ u32 param;
+ param = be32_to_cpu(m.event->param);
+ bfa_trc(flash, param);
+ bfa_ioc_aen_post(flash->ioc,
+ BFA_IOC_AEN_INVALID_VENDOR);
+ }
break;
default:
@@ -4204,6 +4374,8 @@ bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
flash->instance = instance;
bfa_flash_erase_send(flash);
+ bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
+ instance, type);
return BFA_STATUS_OK;
}
@@ -5416,3 +5588,396 @@ bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
WARN_ON(1);
}
}
+
+/*
+ * DCONF module specific
+ */
+
+BFA_MODULE(dconf);
+
+/*
+ * DCONF state machine events
+ */
+enum bfa_dconf_event {
+ BFA_DCONF_SM_INIT = 1, /* dconf Init */
+ BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
+ BFA_DCONF_SM_WR = 3, /* binding change, map */
+ BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
+ BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
+ BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
+};
+
+/* forward declaration of DCONF state machine */
+static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+
+static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
+static void bfa_dconf_timer(void *cbarg);
+static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
+static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
+
+/*
+ * Begining state of dconf module. Waiting for an event to start.
+ */
+static void
+bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+ bfa_status_t bfa_status;
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_INIT:
+ if (dconf->min_cfg) {
+ bfa_trc(dconf->bfa, dconf->min_cfg);
+ return;
+ }
+ bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
+ dconf->flashdone = BFA_FALSE;
+ bfa_trc(dconf->bfa, dconf->flashdone);
+ bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
+ BFA_FLASH_PART_DRV, dconf->instance,
+ dconf->dconf,
+ sizeof(struct bfa_dconf_s), 0,
+ bfa_dconf_init_cb, dconf->bfa);
+ if (bfa_status != BFA_STATUS_OK) {
+ bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ return;
+ }
+ break;
+ case BFA_DCONF_SM_EXIT:
+ dconf->flashdone = BFA_TRUE;
+ case BFA_DCONF_SM_IOCDISABLE:
+ case BFA_DCONF_SM_WR:
+ case BFA_DCONF_SM_FLASH_COMP:
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+/*
+ * Read flash for dconf entries and make a call back to the driver once done.
+ */
+static void
+bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event)
+{
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_FLASH_COMP:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+ break;
+ case BFA_DCONF_SM_TIMEOUT:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+ break;
+ case BFA_DCONF_SM_EXIT:
+ dconf->flashdone = BFA_TRUE;
+ bfa_trc(dconf->bfa, dconf->flashdone);
+ case BFA_DCONF_SM_IOCDISABLE:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+/*
+ * DCONF Module is in ready state. Has completed the initialization.
+ */
+static void
+bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_WR:
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
+ break;
+ case BFA_DCONF_SM_EXIT:
+ dconf->flashdone = BFA_TRUE;
+ bfa_trc(dconf->bfa, dconf->flashdone);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ break;
+ case BFA_DCONF_SM_INIT:
+ case BFA_DCONF_SM_IOCDISABLE:
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+/*
+ * entries are dirty, write back to the flash.
+ */
+
+static void
+bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_TIMEOUT:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
+ bfa_dconf_flash_write(dconf);
+ break;
+ case BFA_DCONF_SM_WR:
+ bfa_timer_stop(&dconf->timer);
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+ break;
+ case BFA_DCONF_SM_EXIT:
+ bfa_timer_stop(&dconf->timer);
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
+ bfa_dconf_flash_write(dconf);
+ break;
+ case BFA_DCONF_SM_FLASH_COMP:
+ break;
+ case BFA_DCONF_SM_IOCDISABLE:
+ bfa_timer_stop(&dconf->timer);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+/*
+ * Sync the dconf entries to the flash.
+ */
+static void
+bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event)
+{
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_IOCDISABLE:
+ case BFA_DCONF_SM_FLASH_COMP:
+ bfa_timer_stop(&dconf->timer);
+ case BFA_DCONF_SM_TIMEOUT:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ dconf->flashdone = BFA_TRUE;
+ bfa_trc(dconf->bfa, dconf->flashdone);
+ bfa_ioc_disable(&dconf->bfa->ioc);
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+static void
+bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_FLASH_COMP:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+ break;
+ case BFA_DCONF_SM_WR:
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
+ break;
+ case BFA_DCONF_SM_EXIT:
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
+ break;
+ case BFA_DCONF_SM_IOCDISABLE:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+static void
+bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event)
+{
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_INIT:
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
+ break;
+ case BFA_DCONF_SM_EXIT:
+ dconf->flashdone = BFA_TRUE;
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ break;
+ case BFA_DCONF_SM_IOCDISABLE:
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+/*
+ * Compute and return memory needed by DRV_CFG module.
+ */
+static void
+bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+ struct bfa_s *bfa)
+{
+ struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
+
+ if (cfg->drvcfg.min_cfg)
+ bfa_mem_kva_setup(meminfo, dconf_kva,
+ sizeof(struct bfa_dconf_hdr_s));
+ else
+ bfa_mem_kva_setup(meminfo, dconf_kva,
+ sizeof(struct bfa_dconf_s));
+}
+
+static void
+bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_pcidev_s *pcidev)
+{
+ struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+
+ dconf->bfad = bfad;
+ dconf->bfa = bfa;
+ dconf->instance = bfa->ioc.port_id;
+ bfa_trc(bfa, dconf->instance);
+
+ dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
+ if (cfg->drvcfg.min_cfg) {
+ bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
+ dconf->min_cfg = BFA_TRUE;
+ /*
+ * Set the flashdone flag to TRUE explicitly as no flash
+ * write will happen in min_cfg mode.
+ */
+ dconf->flashdone = BFA_TRUE;
+ } else {
+ dconf->min_cfg = BFA_FALSE;
+ bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
+ }
+
+ bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+}
+
+static void
+bfa_dconf_init_cb(void *arg, bfa_status_t status)
+{
+ struct bfa_s *bfa = arg;
+ struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+
+ dconf->flashdone = BFA_TRUE;
+ bfa_trc(bfa, dconf->flashdone);
+ bfa_iocfc_cb_dconf_modinit(bfa, status);
+ if (status == BFA_STATUS_OK) {
+ bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
+ if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
+ dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
+ if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
+ dconf->dconf->hdr.version = BFI_DCONF_VERSION;
+ }
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
+}
+
+void
+bfa_dconf_modinit(struct bfa_s *bfa)
+{
+ struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
+}
+static void
+bfa_dconf_start(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_dconf_stop(struct bfa_s *bfa)
+{
+}
+
+static void bfa_dconf_timer(void *cbarg)
+{
+ struct bfa_dconf_mod_s *dconf = cbarg;
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
+}
+static void
+bfa_dconf_iocdisable(struct bfa_s *bfa)
+{
+ struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
+}
+
+static void
+bfa_dconf_detach(struct bfa_s *bfa)
+{
+}
+
+static bfa_status_t
+bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
+{
+ bfa_status_t bfa_status;
+ bfa_trc(dconf->bfa, 0);
+
+ bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
+ BFA_FLASH_PART_DRV, dconf->instance,
+ dconf->dconf, sizeof(struct bfa_dconf_s), 0,
+ bfa_dconf_cbfn, dconf);
+ if (bfa_status != BFA_STATUS_OK)
+ WARN_ON(bfa_status);
+ bfa_trc(dconf->bfa, bfa_status);
+
+ return bfa_status;
+}
+
+bfa_status_t
+bfa_dconf_update(struct bfa_s *bfa)
+{
+ struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+ bfa_trc(dconf->bfa, 0);
+ if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
+ return BFA_STATUS_FAILED;
+
+ if (dconf->min_cfg) {
+ bfa_trc(dconf->bfa, dconf->min_cfg);
+ return BFA_STATUS_FAILED;
+ }
+
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
+ return BFA_STATUS_OK;
+}
+
+static void
+bfa_dconf_cbfn(void *arg, bfa_status_t status)
+{
+ struct bfa_dconf_mod_s *dconf = arg;
+ WARN_ON(status);
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
+}
+
+void
+bfa_dconf_modexit(struct bfa_s *bfa)
+{
+ struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+ BFA_DCONF_MOD(bfa)->flashdone = BFA_FALSE;
+ bfa_trc(bfa, BFA_DCONF_MOD(bfa)->flashdone);
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
+}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index c5ecd2e..546d46b 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -327,6 +327,7 @@ struct bfa_ioc_s {
enum bfa_mode_s port_mode;
u8 ad_cap_bm; /* adapter cap bit mask */
u8 port_mode_cfg; /* config port mode */
+ int ioc_aen_seq;
};
struct bfa_ioc_hwif_s {
@@ -366,6 +367,8 @@ struct bfa_cb_qe_s {
struct list_head qe;
bfa_cb_cbfn_t cbfn;
bfa_boolean_t once;
+ bfa_boolean_t pre_rmv; /* set for stack based qe(s) */
+ bfa_status_t fw_status; /* to access fw status in comp proc */
void *cbarg;
};
@@ -658,7 +661,6 @@ struct bfa_phy_s {
struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
struct bfa_mem_dma_s phy_dma;
};
-
#define BFA_PHY(__bfa) (&(__bfa)->modules.phy)
#define BFA_MEM_PHY_DMA(__bfa) (&(BFA_PHY(__bfa)->phy_dma))
@@ -684,6 +686,49 @@ void bfa_phy_memclaim(struct bfa_phy_s *phy,
void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg);
/*
+ * Driver Config( dconf) specific
+ */
+#define BFI_DCONF_SIGNATURE 0xabcdabcd
+#define BFI_DCONF_VERSION 1
+
+#pragma pack(1)
+struct bfa_dconf_hdr_s {
+ u32 signature;
+ u32 version;
+};
+
+struct bfa_dconf_s {
+ struct bfa_dconf_hdr_s hdr;
+ struct bfa_lunmask_cfg_s lun_mask;
+};
+#pragma pack()
+
+struct bfa_dconf_mod_s {
+ bfa_sm_t sm;
+ u8 instance;
+ bfa_boolean_t flashdone;
+ bfa_boolean_t read_data_valid;
+ bfa_boolean_t min_cfg;
+ struct bfa_timer_s timer;
+ struct bfa_s *bfa;
+ void *bfad;
+ void *trcmod;
+ struct bfa_dconf_s *dconf;
+ struct bfa_mem_kva_s kva_seg;
+};
+
+#define BFA_DCONF_MOD(__bfa) \
+ (&(__bfa)->modules.dconf_mod)
+#define BFA_MEM_DCONF_KVA(__bfa) (&(BFA_DCONF_MOD(__bfa)->kva_seg))
+#define bfa_dconf_read_data_valid(__bfa) \
+ (BFA_DCONF_MOD(__bfa)->read_data_valid)
+#define BFA_DCONF_UPDATE_TOV 5000 /* memtest timeout in msec */
+
+void bfa_dconf_modinit(struct bfa_s *bfa);
+void bfa_dconf_modexit(struct bfa_s *bfa);
+bfa_status_t bfa_dconf_update(struct bfa_s *bfa);
+
+/*
* IOC specfic macros
*/
#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
@@ -803,6 +848,7 @@ void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
struct bfi_ioc_image_hdr_s *fwhdr);
bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
struct bfi_ioc_image_hdr_s *fwhdr);
+void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event);
bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
index 1c6efd4..2d36e48 100644
--- a/drivers/scsi/bfa/bfa_modules.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -44,6 +44,7 @@ struct bfa_modules_s {
struct bfa_flash_s flash; /* flash module */
struct bfa_diag_s diag_mod; /* diagnostics module */
struct bfa_phy_s phy; /* phy module */
+ struct bfa_dconf_mod_s dconf_mod; /* DCONF common module */
};
/*
@@ -119,6 +120,7 @@ struct bfa_s {
struct list_head reqq_waitq[BFI_IOC_MAX_CQS];
bfa_boolean_t fcs; /* FCS is attached to BFA */
struct bfa_msix_s msix;
+ int bfa_aen_seq;
};
extern bfa_boolean_t bfa_auto_recover;
@@ -130,5 +132,6 @@ extern struct bfa_module_s hal_mod_lps;
extern struct bfa_module_s hal_mod_uf;
extern struct bfa_module_s hal_mod_rport;
extern struct bfa_module_s hal_mod_fcp;
+extern struct bfa_module_s hal_mod_dconf;
#endif /* __BFA_MODULES_H__ */
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 21caaef..aa8a0ea 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -16,6 +16,7 @@
*/
#include "bfad_drv.h"
+#include "bfad_im.h"
#include "bfa_plog.h"
#include "bfa_cs.h"
#include "bfa_modules.h"
@@ -2007,6 +2008,24 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
}
}
+static void
+bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
+{
+ struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
+ aen_entry->aen_data.port.pwwn = fcport->pwwn;
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
+ BFA_AEN_CAT_PORT, event);
+}
+
/*
* FC PORT state machine functions
*/
@@ -2095,6 +2114,7 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port disabled: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
break;
case BFA_FCPORT_SM_LINKUP:
@@ -2155,6 +2175,7 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port disabled: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
break;
case BFA_FCPORT_SM_STOP:
@@ -2208,6 +2229,12 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port online: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
+
+ /* If QoS is enabled and it is not online, send AEN */
+ if (fcport->cfg.qos_enabled &&
+ fcport->qos_attr.state != BFA_QOS_ONLINE)
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
break;
case BFA_FCPORT_SM_LINKDOWN:
@@ -2234,6 +2261,7 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port disabled: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
break;
case BFA_FCPORT_SM_STOP:
@@ -2279,8 +2307,10 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port offline: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port disabled: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
break;
case BFA_FCPORT_SM_LINKDOWN:
@@ -2290,26 +2320,32 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
wwn2str(pwwn_buf, fcport->pwwn);
- if (BFA_PORT_IS_DISABLED(fcport->bfa))
+ if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port offline: WWN = %s\n", pwwn_buf);
- else
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+ } else {
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"Base port (WWN = %s) "
"lost fabric connectivity\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+ }
break;
case BFA_FCPORT_SM_STOP:
bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
bfa_fcport_reset_linkinfo(fcport);
wwn2str(pwwn_buf, fcport->pwwn);
- if (BFA_PORT_IS_DISABLED(fcport->bfa))
+ if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port offline: WWN = %s\n", pwwn_buf);
- else
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+ } else {
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"Base port (WWN = %s) "
"lost fabric connectivity\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+ }
break;
case BFA_FCPORT_SM_HWFAIL:
@@ -2317,13 +2353,16 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
bfa_fcport_reset_linkinfo(fcport);
bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
wwn2str(pwwn_buf, fcport->pwwn);
- if (BFA_PORT_IS_DISABLED(fcport->bfa))
+ if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port offline: WWN = %s\n", pwwn_buf);
- else
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+ } else {
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"Base port (WWN = %s) "
"lost fabric connectivity\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+ }
break;
default:
@@ -2454,6 +2493,7 @@ bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port enabled: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
break;
case BFA_FCPORT_SM_STOP:
@@ -2508,6 +2548,7 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port enabled: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
break;
case BFA_FCPORT_SM_DISABLE:
@@ -2874,6 +2915,9 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
+ INIT_LIST_HEAD(&fcport->stats_pending_q);
+ INIT_LIST_HEAD(&fcport->statsclr_pending_q);
+
bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
}
@@ -3102,30 +3146,38 @@ bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
static void
__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
{
- struct bfa_fcport_s *fcport = cbarg;
+ struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
+ struct bfa_cb_pending_q_s *cb;
+ struct list_head *qe, *qen;
+ union bfa_fcport_stats_u *ret;
if (complete) {
- if (fcport->stats_status == BFA_STATUS_OK) {
- struct timeval tv;
-
- /* Swap FC QoS or FCoE stats */
- if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
- bfa_fcport_qos_stats_swap(
- &fcport->stats_ret->fcqos,
- &fcport->stats->fcqos);
- } else {
- bfa_fcport_fcoe_stats_swap(
- &fcport->stats_ret->fcoe,
- &fcport->stats->fcoe);
-
- do_gettimeofday(&tv);
- fcport->stats_ret->fcoe.secs_reset =
+ struct timeval tv;
+ if (fcport->stats_status == BFA_STATUS_OK)
+ do_gettimeofday(&tv);
+
+ list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
+ bfa_q_deq(&fcport->stats_pending_q, &qe);
+ cb = (struct bfa_cb_pending_q_s *)qe;
+ if (fcport->stats_status == BFA_STATUS_OK) {
+ ret = (union bfa_fcport_stats_u *)cb->data;
+ /* Swap FC QoS or FCoE stats */
+ if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
+ bfa_fcport_qos_stats_swap(&ret->fcqos,
+ &fcport->stats->fcqos);
+ else {
+ bfa_fcport_fcoe_stats_swap(&ret->fcoe,
+ &fcport->stats->fcoe);
+ ret->fcoe.secs_reset =
tv.tv_sec - fcport->stats_reset_time;
+ }
}
+ bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
+ fcport->stats_status);
}
- fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
+ fcport->stats_status = BFA_STATUS_OK;
} else {
- fcport->stats_busy = BFA_FALSE;
+ INIT_LIST_HEAD(&fcport->stats_pending_q);
fcport->stats_status = BFA_STATUS_OK;
}
}
@@ -3143,8 +3195,7 @@ bfa_fcport_stats_get_timeout(void *cbarg)
}
fcport->stats_status = BFA_STATUS_ETIMER;
- bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
- fcport);
+ __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
}
static void
@@ -3174,7 +3225,9 @@ bfa_fcport_send_stats_get(void *cbarg)
static void
__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
{
- struct bfa_fcport_s *fcport = cbarg;
+ struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+ struct bfa_cb_pending_q_s *cb;
+ struct list_head *qe, *qen;
if (complete) {
struct timeval tv;
@@ -3184,10 +3237,15 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
*/
do_gettimeofday(&tv);
fcport->stats_reset_time = tv.tv_sec;
-
- fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
+ list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
+ bfa_q_deq(&fcport->statsclr_pending_q, &qe);
+ cb = (struct bfa_cb_pending_q_s *)qe;
+ bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
+ fcport->stats_status);
+ }
+ fcport->stats_status = BFA_STATUS_OK;
} else {
- fcport->stats_busy = BFA_FALSE;
+ INIT_LIST_HEAD(&fcport->statsclr_pending_q);
fcport->stats_status = BFA_STATUS_OK;
}
}
@@ -3205,8 +3263,7 @@ bfa_fcport_stats_clr_timeout(void *cbarg)
}
fcport->stats_status = BFA_STATUS_ETIMER;
- bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
- __bfa_cb_fcport_stats_clr, fcport);
+ __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
}
static void
@@ -3402,6 +3459,11 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
fcport->use_flash_cfg = BFA_FALSE;
}
+ if (fcport->cfg.qos_enabled)
+ fcport->qos_attr.state = BFA_QOS_OFFLINE;
+ else
+ fcport->qos_attr.state = BFA_QOS_DISABLED;
+
bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
}
break;
@@ -3426,28 +3488,26 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
/*
* check for timer pop before processing the rsp
*/
- if (fcport->stats_busy == BFA_FALSE ||
- fcport->stats_status == BFA_STATUS_ETIMER)
+ if (list_empty(&fcport->stats_pending_q) ||
+ (fcport->stats_status == BFA_STATUS_ETIMER))
break;
bfa_timer_stop(&fcport->timer);
fcport->stats_status = i2hmsg.pstatsget_rsp->status;
- bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
- __bfa_cb_fcport_stats_get, fcport);
+ __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
break;
case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
/*
* check for timer pop before processing the rsp
*/
- if (fcport->stats_busy == BFA_FALSE ||
- fcport->stats_status == BFA_STATUS_ETIMER)
+ if (list_empty(&fcport->statsclr_pending_q) ||
+ (fcport->stats_status == BFA_STATUS_ETIMER))
break;
bfa_timer_stop(&fcport->timer);
fcport->stats_status = BFA_STATUS_OK;
- bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
- __bfa_cb_fcport_stats_clr, fcport);
+ __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
break;
case BFI_FCPORT_I2H_ENABLE_AEN:
@@ -3779,25 +3839,25 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
* Fetch port statistics (FCQoS or FCoE).
*/
bfa_status_t
-bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
- bfa_cb_port_t cbfn, void *cbarg)
+bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- if (fcport->stats_busy) {
- bfa_trc(bfa, fcport->stats_busy);
- return BFA_STATUS_DEVBUSY;
- }
+ if (bfa_ioc_is_disabled(&bfa->ioc))
+ return BFA_STATUS_IOC_DISABLED;
- fcport->stats_busy = BFA_TRUE;
- fcport->stats_ret = stats;
- fcport->stats_cbfn = cbfn;
- fcport->stats_cbarg = cbarg;
+ if (!list_empty(&fcport->statsclr_pending_q))
+ return BFA_STATUS_DEVBUSY;
- bfa_fcport_send_stats_get(fcport);
+ if (list_empty(&fcport->stats_pending_q)) {
+ list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
+ bfa_fcport_send_stats_get(fcport);
+ bfa_timer_start(bfa, &fcport->timer,
+ bfa_fcport_stats_get_timeout,
+ fcport, BFA_FCPORT_STATS_TOV);
+ } else
+ list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
- bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
- fcport, BFA_FCPORT_STATS_TOV);
return BFA_STATUS_OK;
}
@@ -3805,27 +3865,25 @@ bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
* Reset port statistics (FCQoS or FCoE).
*/
bfa_status_t
-bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
+bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- if (fcport->stats_busy) {
- bfa_trc(bfa, fcport->stats_busy);
+ if (!list_empty(&fcport->stats_pending_q))
return BFA_STATUS_DEVBUSY;
- }
-
- fcport->stats_busy = BFA_TRUE;
- fcport->stats_cbfn = cbfn;
- fcport->stats_cbarg = cbarg;
- bfa_fcport_send_stats_clear(fcport);
+ if (list_empty(&fcport->statsclr_pending_q)) {
+ list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
+ bfa_fcport_send_stats_clear(fcport);
+ bfa_timer_start(bfa, &fcport->timer,
+ bfa_fcport_stats_clr_timeout,
+ fcport, BFA_FCPORT_STATS_TOV);
+ } else
+ list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
- bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
- fcport, BFA_FCPORT_STATS_TOV);
return BFA_STATUS_OK;
}
-
/*
* Fetch port attributes.
*/
@@ -4619,6 +4677,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
rp->fw_handle = msg.create_rsp->fw_handle;
rp->qos_attr = msg.create_rsp->qos_attr;
+ bfa_rport_set_lunmask(bfa, rp);
WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
break;
@@ -4626,6 +4685,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
case BFI_RPORT_I2H_DELETE_RSP:
rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
+ bfa_rport_unset_lunmask(bfa, rp);
bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
break;
@@ -4706,6 +4766,37 @@ bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
}
+/* Set Rport LUN Mask */
+void
+bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
+{
+ struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
+ wwn_t lp_wwn, rp_wwn;
+ u8 lp_tag = (u8)rp->rport_info.lp_tag;
+
+ rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
+ lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
+
+ BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
+ rp->lun_mask = BFA_TRUE;
+ bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
+}
+
+/* Unset Rport LUN mask */
+void
+bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
+{
+ struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
+ wwn_t lp_wwn, rp_wwn;
+
+ rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
+ lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
+
+ BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
+ rp->lun_mask = BFA_FALSE;
+ bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
+ BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
+}
/*
* SGPG related functions
@@ -5517,11 +5608,29 @@ bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
return BFA_STATUS_PORT_NOT_DISABLED;
}
- /* Check if the speed is supported */
- bfa_fcport_get_attr(bfa, &attr);
- bfa_trc(fcdiag, attr.speed_supported);
- if (speed > attr.speed_supported)
- return BFA_STATUS_UNSUPP_SPEED;
+ /*
+ * Check if input speed is supported by the port mode
+ */
+ if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
+ if (!(speed == BFA_PORT_SPEED_1GBPS ||
+ speed == BFA_PORT_SPEED_2GBPS ||
+ speed == BFA_PORT_SPEED_4GBPS ||
+ speed == BFA_PORT_SPEED_8GBPS ||
+ speed == BFA_PORT_SPEED_16GBPS ||
+ speed == BFA_PORT_SPEED_AUTO)) {
+ bfa_trc(fcdiag, speed);
+ return BFA_STATUS_UNSUPP_SPEED;
+ }
+ bfa_fcport_get_attr(bfa, &attr);
+ bfa_trc(fcdiag, attr.speed_supported);
+ if (speed > attr.speed_supported)
+ return BFA_STATUS_UNSUPP_SPEED;
+ } else {
+ if (speed != BFA_PORT_SPEED_10GBPS) {
+ bfa_trc(fcdiag, speed);
+ return BFA_STATUS_UNSUPP_SPEED;
+ }
+ }
/* For Mezz card, port speed entered needs to be checked */
if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index fbe513a..95adb86 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -297,6 +297,7 @@ struct bfa_rport_s {
void *rport_drv; /* fcs/driver rport object */
u16 fw_handle; /* firmware rport handle */
u16 rport_tag; /* BFA rport tag */
+ u8 lun_mask; /* LUN mask flag */
struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */
struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */
@@ -404,6 +405,7 @@ struct bfa_lps_s {
u8 bb_scn; /* local BB_SCN */
u8 lsrjt_rsn; /* LSRJT reason */
u8 lsrjt_expl; /* LSRJT explanation */
+ u8 lun_mask; /* LUN mask flag */
wwn_t pwwn; /* port wwn of lport */
wwn_t nwwn; /* node wwn of lport */
wwn_t pr_pwwn; /* port wwn of lport peer */
@@ -441,7 +443,6 @@ void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
*/
#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port))
-typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status);
/*
* Link notification data structure
@@ -495,13 +496,11 @@ struct bfa_fcport_s {
u8 *stats_kva;
u64 stats_pa;
union bfa_fcport_stats_u *stats;
- union bfa_fcport_stats_u *stats_ret; /* driver stats location */
bfa_status_t stats_status; /* stats/statsclr status */
- bfa_boolean_t stats_busy; /* outstanding stats/statsclr */
+ struct list_head stats_pending_q;
+ struct list_head statsclr_pending_q;
bfa_boolean_t stats_qfull;
u32 stats_reset_time; /* stats reset time stamp */
- bfa_cb_port_t stats_cbfn; /* driver callback function */
- void *stats_cbarg; /* *!< user callback arg */
bfa_boolean_t diag_busy; /* diag busy status */
bfa_boolean_t beacon; /* port beacon status */
bfa_boolean_t link_e2e_beacon; /* link beacon status */
@@ -552,10 +551,9 @@ void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
bfa_boolean_t link_e2e_beacon);
bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa);
bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
- union bfa_fcport_stats_u *stats,
- bfa_cb_port_t cbfn, void *cbarg);
-bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
- void *cbarg);
+ struct bfa_cb_pending_q_s *cb);
+bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa,
+ struct bfa_cb_pending_q_s *cb);
bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa);
bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa);
@@ -578,6 +576,19 @@ void bfa_cb_rport_qos_scn_prio(void *rport,
struct bfa_rport_qos_attr_s new_qos_attr);
/*
+ * Rport LUN masking related
+ */
+#define BFA_RPORT_TAG_INVALID 0xffff
+#define BFA_LP_TAG_INVALID 0xff
+void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
+void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
+bfa_boolean_t bfa_rport_lunmask_active(struct bfa_rport_s *rp);
+wwn_t bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp);
+struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id,
+ wwn_t *lpwwn, wwn_t rpwwn);
+void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn);
+
+/*
* bfa fcxp API functions
*/
struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index beb30a7..66fb725 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1348,7 +1348,7 @@ int
bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
{
struct bfad_s *bfad;
- int error = -ENODEV, retval;
+ int error = -ENODEV, retval, i;
/* For single port cards - only claim function 0 */
if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
@@ -1372,6 +1372,12 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
bfa_trc_init(bfad->trcmod);
bfa_trc(bfad, bfad_inst);
+ /* AEN INIT */
+ INIT_LIST_HEAD(&bfad->free_aen_q);
+ INIT_LIST_HEAD(&bfad->active_aen_q);
+ for (i = 0; i < BFA_AEN_MAX_ENTRY; i++)
+ list_add_tail(&bfad->aen_list[i].qe, &bfad->free_aen_q);
+
if (!(bfad_load_fwimg(pdev))) {
kfree(bfad->trcmod);
goto out_alloc_trace_failure;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 89f863e..06fc00c 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -56,7 +56,7 @@ bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
spin_lock_irqsave(&bfad->bfad_lock, flags);
if (bfad->disable_active) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
- return EBUSY;
+ return -EBUSY;
}
bfad->disable_active = BFA_TRUE;
@@ -90,6 +90,7 @@ bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
iocmd->factorynwwn = pattr.factorynwwn;
iocmd->factorypwwn = pattr.factorypwwn;
+ iocmd->bfad_num = bfad->inst_no;
im_port = bfad->pport.im_port;
iocmd->host = im_port->shost->host_no;
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -178,6 +179,38 @@ out:
}
int
+bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ if (v_cmd == IOCMD_IOC_RESET_STATS) {
+ bfa_ioc_clear_stats(&bfad->bfa);
+ iocmd->status = BFA_STATUS_OK;
+ } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) {
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ }
+
+ return 0;
+}
+
+int
+bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
+
+ if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME)
+ strcpy(bfad->adapter_name, iocmd->name);
+ else if (v_cmd == IOCMD_IOC_SET_PORT_NAME)
+ strcpy(bfad->port_name, iocmd->name);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
@@ -306,6 +339,81 @@ out:
return 0;
}
+int
+bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ return 0;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ return 0;
+}
+
+int
+bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (v_cmd == IOCMD_PORT_CFG_TOPO)
+ cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param);
+ else if (v_cmd == IOCMD_PORT_CFG_SPEED)
+ cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param);
+ else if (v_cmd == IOCMD_PORT_CFG_ALPA)
+ cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param);
+ else if (v_cmd == IOCMD_PORT_CLR_ALPA)
+ cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
+ (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_port_cfg_bbsc(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
+ if (v_cmd == IOCMD_PORT_BBSC_ENABLE)
+ fcport->cfg.bb_scn_state = BFA_TRUE;
+ else if (v_cmd == IOCMD_PORT_BBSC_DISABLE)
+ fcport->cfg.bb_scn_state = BFA_FALSE;
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
static int
bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
{
@@ -354,6 +462,40 @@ out:
}
int
+bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_bsg_reset_stats_s *iocmd =
+ (struct bfa_bsg_reset_stats_s *)cmd;
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+ struct list_head *qe, *qen;
+ struct bfa_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->vpwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ bfa_fcs_lport_clear_stats(fcs_port);
+ /* clear IO stats from all active itnims */
+ list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+ itnim = (struct bfa_itnim_s *) qe;
+ if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag)
+ continue;
+ bfa_itnim_clear_stats(itnim);
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
{
struct bfa_fcs_lport_s *fcs_port;
@@ -389,7 +531,7 @@ bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
void *iocmd_bufptr;
if (iocmd->nrports == 0)
- return EINVAL;
+ return -EINVAL;
if (bfad_chk_iocmd_sz(payload_len,
sizeof(struct bfa_bsg_lport_get_rports_s),
@@ -539,6 +681,152 @@ out:
return 0;
}
+int
+bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_reset_stats_s *iocmd =
+ (struct bfa_bsg_rport_reset_stats_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_rport_s *fcs_rport;
+ struct bfa_rport_s *rport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+ if (fcs_rport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ goto out;
+ }
+
+ memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
+ rport = bfa_fcs_rport_get_halrport(fcs_rport);
+ memset(&rport->stats, 0, sizeof(rport->stats));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_set_speed_s *iocmd =
+ (struct bfa_bsg_rport_set_speed_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_rport_s *fcs_rport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+ if (fcs_rport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ goto out;
+ }
+
+ fcs_rport->rpf.assigned_speed = iocmd->speed;
+ /* Set this speed in f/w only if the RPSC speed is not available */
+ if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
+ bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->vpwwn);
+ if (fcs_vport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+ goto out;
+ }
+
+ bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct bfa_bsg_vport_stats_s *iocmd =
+ (struct bfa_bsg_vport_stats_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->vpwwn);
+ if (fcs_vport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+ goto out;
+ }
+
+ memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats,
+ sizeof(struct bfa_vport_stats_s));
+ memcpy((void *)&iocmd->vport_stats.port_stats,
+ (void *)&fcs_vport->lport.stats,
+ sizeof(struct bfa_lport_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct bfa_bsg_reset_stats_s *iocmd =
+ (struct bfa_bsg_reset_stats_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->vpwwn);
+ if (fcs_vport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+ goto out;
+ }
+
+ memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
+ memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
static int
bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
unsigned int payload_len)
@@ -582,6 +870,66 @@ out:
}
int
+bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ if (cmd == IOCMD_RATELIM_ENABLE)
+ fcport->cfg.ratelimit = BFA_TRUE;
+ else if (cmd == IOCMD_RATELIM_DISABLE)
+ fcport->cfg.ratelimit = BFA_FALSE;
+
+ if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
+ fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
+
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+
+ return 0;
+}
+
+int
+bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+ struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ /* Auto and speeds greater than the supported speed, are invalid */
+ if ((iocmd->speed == BFA_PORT_SPEED_AUTO) ||
+ (iocmd->speed > fcport->speed_sup)) {
+ iocmd->status = BFA_STATUS_UNSUPP_SPEED;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+ }
+
+ fcport->cfg.trl_def_speed = iocmd->speed;
+ iocmd->status = BFA_STATUS_OK;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fcpim_modstats_s *iocmd =
@@ -604,6 +952,28 @@ bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
}
int
+bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
+ (struct bfa_bsg_fcpim_modstatsclr_s *)cmd;
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+ struct list_head *qe, *qen;
+ struct bfa_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+ itnim = (struct bfa_itnim_s *) qe;
+ bfa_itnim_clear_stats(itnim);
+ }
+ memset(&fcpim->del_itn_stats, 0,
+ sizeof(struct bfa_fcpim_del_itn_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
@@ -670,6 +1040,35 @@ bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
}
static int
+bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_reset_stats_s *iocmd =
+ (struct bfa_bsg_rport_reset_stats_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (!fcs_port)
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ else {
+ itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+ if (itnim == NULL)
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ else {
+ iocmd->status = BFA_STATUS_OK;
+ bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn);
+ bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim));
+ }
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+static int
bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_itnim_itnstats_s *iocmd =
@@ -1511,11 +1910,545 @@ out:
return 0;
}
+#define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */
+int
+bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
+ void *iocmd_bufptr;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
+ BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ ||
+ !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) ||
+ !IS_ALIGNED(iocmd->offset, sizeof(u32))) {
+ bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ);
+ iocmd->status = BFA_STATUS_EINVAL;
+ goto out;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
+ (u32 *)&iocmd->offset, &iocmd->bufsz);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) {
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR)
+ bfad->plog_buf.head = bfad->plog_buf.tail = 0;
+ else if (v_cmd == IOCMD_DEBUG_START_DTRC)
+ bfa_trc_init(bfad->trcmod);
+ else if (v_cmd == IOCMD_DEBUG_STOP_DTRC)
+ bfa_trc_stop(bfad->trcmod);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
+
+ if (iocmd->ctl == BFA_TRUE)
+ bfad->plog_buf.plog_enabled = 1;
+ else
+ bfad->plog_buf.plog_enabled = 0;
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_fcpim_profile_s *iocmd =
+ (struct bfa_bsg_fcpim_profile_s *)cmd;
+ struct timeval tv;
+ unsigned long flags;
+
+ do_gettimeofday(&tv);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
+ iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
+ else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
+ iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_itnim_ioprofile_s *iocmd =
+ (struct bfa_bsg_itnim_ioprofile_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->lpwwn);
+ if (!fcs_port)
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ else {
+ itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+ if (itnim == NULL)
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ else
+ iocmd->status = bfa_itnim_get_ioprofile(
+ bfa_fcs_itnim_get_halitn(itnim),
+ &iocmd->ioprofile);
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcport_stats_s *iocmd =
+ (struct bfa_bsg_fcport_stats_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+ struct bfa_cb_pending_q_s cb_qe;
+
+ init_completion(&fcomp.comp);
+ bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+ &fcomp, &iocmd->stats);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+ struct bfa_cb_pending_q_s cb_qe;
+
+ init_completion(&fcomp.comp);
+ bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
+ BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
+ &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
+ BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
+ &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
+ struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp;
+ struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
+ pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
+ pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
+ memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
+ iocmd->status = BFA_STATUS_OK;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
+ BFA_FLASH_PART_PXECFG,
+ bfad->bfa.ioc.port_id, &iocmd->cfg,
+ sizeof(struct bfa_ethboot_cfg_s), 0,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
+ BFA_FLASH_PART_PXECFG,
+ bfad->bfa.ioc.port_id, &iocmd->cfg,
+ sizeof(struct bfa_ethboot_cfg_s), 0,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ if (v_cmd == IOCMD_TRUNK_ENABLE) {
+ trunk->attr.state = BFA_TRUNK_OFFLINE;
+ bfa_fcport_disable(&bfad->bfa);
+ fcport->cfg.trunked = BFA_TRUE;
+ } else if (v_cmd == IOCMD_TRUNK_DISABLE) {
+ trunk->attr.state = BFA_TRUNK_DISABLED;
+ bfa_fcport_disable(&bfad->bfa);
+ fcport->cfg.trunked = BFA_FALSE;
+ }
+
+ if (!bfa_fcport_is_disabled(&bfad->bfa))
+ bfa_fcport_enable(&bfad->bfa);
+
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
+ sizeof(struct bfa_trunk_attr_s));
+ iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
+ if (v_cmd == IOCMD_QOS_ENABLE)
+ fcport->cfg.qos_enabled = BFA_TRUE;
+ else if (v_cmd == IOCMD_QOS_DISABLE)
+ fcport->cfg.qos_enabled = BFA_FALSE;
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->attr.state = fcport->qos_attr.state;
+ iocmd->attr.total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_qos_vc_attr_s *iocmd =
+ (struct bfa_bsg_qos_vc_attr_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
+ unsigned long flags;
+ u32 i = 0;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
+ iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit);
+ iocmd->attr.elp_opmode_flags =
+ be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
+
+ /* Individual VC info */
+ while (i < iocmd->attr.total_vc_count) {
+ iocmd->attr.vc_info[i].vc_credit =
+ bfa_vc_attr->vc_info[i].vc_credit;
+ iocmd->attr.vc_info[i].borrow_credit =
+ bfa_vc_attr->vc_info[i].borrow_credit;
+ iocmd->attr.vc_info[i].priority =
+ bfa_vc_attr->vc_info[i].priority;
+ i++;
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcport_stats_s *iocmd =
+ (struct bfa_bsg_fcport_stats_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+ struct bfa_cb_pending_q_s cb_qe;
+
+ init_completion(&fcomp.comp);
+ bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+ &fcomp, &iocmd->stats);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
+ iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+ struct bfa_cb_pending_q_s cb_qe;
+
+ init_completion(&fcomp.comp);
+ bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+ &fcomp, NULL);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
+ iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_vf_stats_s *iocmd =
+ (struct bfa_bsg_vf_stats_s *)cmd;
+ struct bfa_fcs_fabric_s *fcs_vf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
+ if (fcs_vf == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VFID;
+ goto out;
+ }
+ memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats,
+ sizeof(struct bfa_vf_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_vf_reset_stats_s *iocmd =
+ (struct bfa_bsg_vf_reset_stats_s *)cmd;
+ struct bfa_fcs_fabric_s *fcs_vf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
+ if (fcs_vf == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VFID;
+ goto out;
+ }
+ memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE)
+ iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
+ else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE)
+ iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
+ else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
+ iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
+ (struct bfa_bsg_fcpim_lunmask_query_s *)cmd;
+ struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_fcpim_lunmask_s *iocmd =
+ (struct bfa_bsg_fcpim_lunmask_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD)
+ iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id,
+ &iocmd->pwwn, iocmd->rpwwn, iocmd->lun);
+ else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE)
+ iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa,
+ iocmd->vf_id, &iocmd->pwwn,
+ iocmd->rpwwn, iocmd->lun);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
static int
bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
unsigned int payload_len)
{
- int rc = EINVAL;
+ int rc = -EINVAL;
switch (cmd) {
case IOCMD_IOC_ENABLE:
@@ -1536,6 +2469,14 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
case IOCMD_IOC_GET_FWSTATS:
rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
break;
+ case IOCMD_IOC_RESET_STATS:
+ case IOCMD_IOC_RESET_FWSTATS:
+ rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd);
+ break;
+ case IOCMD_IOC_SET_ADAPTER_NAME:
+ case IOCMD_IOC_SET_PORT_NAME:
+ rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd);
+ break;
case IOCMD_IOCFC_GET_ATTR:
rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
break;
@@ -1554,12 +2495,31 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
case IOCMD_PORT_GET_STATS:
rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
break;
+ case IOCMD_PORT_RESET_STATS:
+ rc = bfad_iocmd_port_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_PORT_CFG_TOPO:
+ case IOCMD_PORT_CFG_SPEED:
+ case IOCMD_PORT_CFG_ALPA:
+ case IOCMD_PORT_CLR_ALPA:
+ rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd);
+ break;
+ case IOCMD_PORT_CFG_MAXFRSZ:
+ rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd);
+ break;
+ case IOCMD_PORT_BBSC_ENABLE:
+ case IOCMD_PORT_BBSC_DISABLE:
+ rc = bfad_iocmd_port_cfg_bbsc(bfad, iocmd, cmd);
+ break;
case IOCMD_LPORT_GET_ATTR:
rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
break;
case IOCMD_LPORT_GET_STATS:
rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
break;
+ case IOCMD_LPORT_RESET_STATS:
+ rc = bfad_iocmd_lport_reset_stats(bfad, iocmd);
+ break;
case IOCMD_LPORT_GET_IOSTATS:
rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
break;
@@ -1575,12 +2535,40 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
case IOCMD_RPORT_GET_STATS:
rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
break;
+ case IOCMD_RPORT_RESET_STATS:
+ rc = bfad_iocmd_rport_clr_stats(bfad, iocmd);
+ break;
+ case IOCMD_RPORT_SET_SPEED:
+ rc = bfad_iocmd_rport_set_speed(bfad, iocmd);
+ break;
+ case IOCMD_VPORT_GET_ATTR:
+ rc = bfad_iocmd_vport_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_VPORT_GET_STATS:
+ rc = bfad_iocmd_vport_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_VPORT_RESET_STATS:
+ rc = bfad_iocmd_vport_clr_stats(bfad, iocmd);
+ break;
case IOCMD_FABRIC_GET_LPORTS:
rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
break;
+ case IOCMD_RATELIM_ENABLE:
+ case IOCMD_RATELIM_DISABLE:
+ rc = bfad_iocmd_ratelim(bfad, cmd, iocmd);
+ break;
+ case IOCMD_RATELIM_DEF_SPEED:
+ rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd);
+ break;
+ case IOCMD_FCPIM_FAILOVER:
+ rc = bfad_iocmd_cfg_fcpim(bfad, iocmd);
+ break;
case IOCMD_FCPIM_MODSTATS:
rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
break;
+ case IOCMD_FCPIM_MODSTATSCLR:
+ rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd);
+ break;
case IOCMD_FCPIM_DEL_ITN_STATS:
rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
break;
@@ -1590,6 +2578,9 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
case IOCMD_ITNIM_GET_IOSTATS:
rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
break;
+ case IOCMD_ITNIM_RESET_STATS:
+ rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd);
+ break;
case IOCMD_ITNIM_GET_ITNSTATS:
rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
break;
@@ -1702,11 +2693,92 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
case IOCMD_DEBUG_PORTLOG:
rc = bfad_iocmd_porglog_get(bfad, iocmd);
break;
+ case IOCMD_DEBUG_FW_CORE:
+ rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_DEBUG_FW_STATE_CLR:
+ case IOCMD_DEBUG_PORTLOG_CLR:
+ case IOCMD_DEBUG_START_DTRC:
+ case IOCMD_DEBUG_STOP_DTRC:
+ rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd);
+ break;
+ case IOCMD_DEBUG_PORTLOG_CTL:
+ rc = bfad_iocmd_porglog_ctl(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_PROFILE_ON:
+ case IOCMD_FCPIM_PROFILE_OFF:
+ rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd);
+ break;
+ case IOCMD_ITNIM_GET_IOPROFILE:
+ rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd);
+ break;
+ case IOCMD_FCPORT_GET_STATS:
+ rc = bfad_iocmd_fcport_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_FCPORT_RESET_STATS:
+ rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_BOOT_CFG:
+ rc = bfad_iocmd_boot_cfg(bfad, iocmd);
+ break;
+ case IOCMD_BOOT_QUERY:
+ rc = bfad_iocmd_boot_query(bfad, iocmd);
+ break;
+ case IOCMD_PREBOOT_QUERY:
+ rc = bfad_iocmd_preboot_query(bfad, iocmd);
+ break;
+ case IOCMD_ETHBOOT_CFG:
+ rc = bfad_iocmd_ethboot_cfg(bfad, iocmd);
+ break;
+ case IOCMD_ETHBOOT_QUERY:
+ rc = bfad_iocmd_ethboot_query(bfad, iocmd);
+ break;
+ case IOCMD_TRUNK_ENABLE:
+ case IOCMD_TRUNK_DISABLE:
+ rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd);
+ break;
+ case IOCMD_TRUNK_GET_ATTR:
+ rc = bfad_iocmd_trunk_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_QOS_ENABLE:
+ case IOCMD_QOS_DISABLE:
+ rc = bfad_iocmd_qos(bfad, iocmd, cmd);
+ break;
+ case IOCMD_QOS_GET_ATTR:
+ rc = bfad_iocmd_qos_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_QOS_GET_VC_ATTR:
+ rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd);
+ break;
+ case IOCMD_QOS_GET_STATS:
+ rc = bfad_iocmd_qos_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_QOS_RESET_STATS:
+ rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_VF_GET_STATS:
+ rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_VF_RESET_STATS:
+ rc = bfad_iocmd_vf_clr_stats(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_LUNMASK_ENABLE:
+ case IOCMD_FCPIM_LUNMASK_DISABLE:
+ case IOCMD_FCPIM_LUNMASK_CLEAR:
+ rc = bfad_iocmd_lunmask(bfad, iocmd, cmd);
+ break;
+ case IOCMD_FCPIM_LUNMASK_QUERY:
+ rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_LUNMASK_ADD:
+ case IOCMD_FCPIM_LUNMASK_DELETE:
+ rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
+ break;
default:
- rc = EINVAL;
+ rc = -EINVAL;
break;
}
- return -rc;
+ return rc;
}
static int
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
index 99b0e8a..e859adb 100644
--- a/drivers/scsi/bfa/bfad_bsg.h
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -30,24 +30,48 @@ enum {
IOCMD_IOC_GET_INFO,
IOCMD_IOC_GET_STATS,
IOCMD_IOC_GET_FWSTATS,
+ IOCMD_IOC_RESET_STATS,
+ IOCMD_IOC_RESET_FWSTATS,
+ IOCMD_IOC_SET_ADAPTER_NAME,
+ IOCMD_IOC_SET_PORT_NAME,
IOCMD_IOCFC_GET_ATTR,
IOCMD_IOCFC_SET_INTR,
IOCMD_PORT_ENABLE,
IOCMD_PORT_DISABLE,
IOCMD_PORT_GET_ATTR,
IOCMD_PORT_GET_STATS,
+ IOCMD_PORT_RESET_STATS,
+ IOCMD_PORT_CFG_TOPO,
+ IOCMD_PORT_CFG_SPEED,
+ IOCMD_PORT_CFG_ALPA,
+ IOCMD_PORT_CFG_MAXFRSZ,
+ IOCMD_PORT_CLR_ALPA,
+ IOCMD_PORT_BBSC_ENABLE,
+ IOCMD_PORT_BBSC_DISABLE,
IOCMD_LPORT_GET_ATTR,
IOCMD_LPORT_GET_RPORTS,
IOCMD_LPORT_GET_STATS,
+ IOCMD_LPORT_RESET_STATS,
IOCMD_LPORT_GET_IOSTATS,
IOCMD_RPORT_GET_ATTR,
IOCMD_RPORT_GET_ADDR,
IOCMD_RPORT_GET_STATS,
+ IOCMD_RPORT_RESET_STATS,
+ IOCMD_RPORT_SET_SPEED,
+ IOCMD_VPORT_GET_ATTR,
+ IOCMD_VPORT_GET_STATS,
+ IOCMD_VPORT_RESET_STATS,
IOCMD_FABRIC_GET_LPORTS,
+ IOCMD_RATELIM_ENABLE,
+ IOCMD_RATELIM_DISABLE,
+ IOCMD_RATELIM_DEF_SPEED,
+ IOCMD_FCPIM_FAILOVER,
IOCMD_FCPIM_MODSTATS,
+ IOCMD_FCPIM_MODSTATSCLR,
IOCMD_FCPIM_DEL_ITN_STATS,
IOCMD_ITNIM_GET_ATTR,
IOCMD_ITNIM_GET_IOSTATS,
+ IOCMD_ITNIM_RESET_STATS,
IOCMD_ITNIM_GET_ITNSTATS,
IOCMD_IOC_PCIFN_CFG,
IOCMD_FCPORT_ENABLE,
@@ -86,6 +110,39 @@ enum {
IOCMD_PHY_READ_FW,
IOCMD_VHBA_QUERY,
IOCMD_DEBUG_PORTLOG,
+ IOCMD_DEBUG_FW_CORE,
+ IOCMD_DEBUG_FW_STATE_CLR,
+ IOCMD_DEBUG_PORTLOG_CLR,
+ IOCMD_DEBUG_START_DTRC,
+ IOCMD_DEBUG_STOP_DTRC,
+ IOCMD_DEBUG_PORTLOG_CTL,
+ IOCMD_FCPIM_PROFILE_ON,
+ IOCMD_FCPIM_PROFILE_OFF,
+ IOCMD_ITNIM_GET_IOPROFILE,
+ IOCMD_FCPORT_GET_STATS,
+ IOCMD_FCPORT_RESET_STATS,
+ IOCMD_BOOT_CFG,
+ IOCMD_BOOT_QUERY,
+ IOCMD_PREBOOT_QUERY,
+ IOCMD_ETHBOOT_CFG,
+ IOCMD_ETHBOOT_QUERY,
+ IOCMD_TRUNK_ENABLE,
+ IOCMD_TRUNK_DISABLE,
+ IOCMD_TRUNK_GET_ATTR,
+ IOCMD_QOS_ENABLE,
+ IOCMD_QOS_DISABLE,
+ IOCMD_QOS_GET_ATTR,
+ IOCMD_QOS_GET_VC_ATTR,
+ IOCMD_QOS_GET_STATS,
+ IOCMD_QOS_RESET_STATS,
+ IOCMD_VF_GET_STATS,
+ IOCMD_VF_RESET_STATS,
+ IOCMD_FCPIM_LUNMASK_ENABLE,
+ IOCMD_FCPIM_LUNMASK_DISABLE,
+ IOCMD_FCPIM_LUNMASK_CLEAR,
+ IOCMD_FCPIM_LUNMASK_QUERY,
+ IOCMD_FCPIM_LUNMASK_ADD,
+ IOCMD_FCPIM_LUNMASK_DELETE,
};
struct bfa_bsg_gen_s {
@@ -94,6 +151,43 @@ struct bfa_bsg_gen_s {
u16 rsvd;
};
+struct bfa_bsg_portlogctl_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ bfa_boolean_t ctl;
+ int inst_no;
+};
+
+struct bfa_bsg_fcpim_profile_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+};
+
+struct bfa_bsg_itnim_ioprofile_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t lpwwn;
+ wwn_t rpwwn;
+ struct bfa_itnim_ioprofile_s ioprofile;
+};
+
+struct bfa_bsg_fcport_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ union bfa_fcport_stats_u stats;
+};
+
+struct bfa_bsg_ioc_name_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ char name[BFA_ADAPTER_SYM_NAME_LEN];
+};
+
struct bfa_bsg_ioc_info_s {
bfa_status_t status;
u16 bfad_num;
@@ -164,6 +258,20 @@ struct bfa_bsg_port_attr_s {
struct bfa_port_attr_s attr;
};
+struct bfa_bsg_port_cfg_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 param;
+ u32 rsvd1;
+};
+
+struct bfa_bsg_port_cfg_maxfrsize_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 maxfrsize;
+};
+
struct bfa_bsg_port_stats_s {
bfa_status_t status;
u16 bfad_num;
@@ -237,6 +345,47 @@ struct bfa_bsg_rport_scsi_addr_s {
u32 lun;
};
+struct bfa_bsg_rport_reset_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+};
+
+struct bfa_bsg_rport_set_speed_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ enum bfa_port_speed speed;
+ u32 rsvd;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+};
+
+struct bfa_bsg_vport_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t vpwwn;
+ struct bfa_vport_attr_s vport_attr;
+};
+
+struct bfa_bsg_vport_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t vpwwn;
+ struct bfa_vport_stats_s vport_stats;
+};
+
+struct bfa_bsg_reset_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t vpwwn;
+};
+
struct bfa_bsg_fabric_get_lports_s {
bfa_status_t status;
u16 bfad_num;
@@ -246,6 +395,19 @@ struct bfa_bsg_fabric_get_lports_s {
u32 rsvd;
};
+struct bfa_bsg_trl_speed_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ enum bfa_port_speed speed;
+};
+
+struct bfa_bsg_fcpim_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 param;
+};
+
struct bfa_bsg_fcpim_modstats_s {
bfa_status_t status;
u16 bfad_num;
@@ -258,6 +420,11 @@ struct bfa_bsg_fcpim_del_itn_stats_s {
struct bfa_fcpim_del_itn_stats_s modstats;
};
+struct bfa_bsg_fcpim_modstatsclr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+};
+
struct bfa_bsg_itnim_attr_s {
bfa_status_t status;
u16 bfad_num;
@@ -485,6 +652,76 @@ struct bfa_bsg_vhba_attr_s {
struct bfa_vhba_attr_s attr;
};
+struct bfa_bsg_boot_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_boot_cfg_s cfg;
+};
+
+struct bfa_bsg_preboot_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_boot_pbc_s cfg;
+};
+
+struct bfa_bsg_ethboot_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_ethboot_cfg_s cfg;
+};
+
+struct bfa_bsg_trunk_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_trunk_attr_s attr;
+};
+
+struct bfa_bsg_qos_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_qos_attr_s attr;
+};
+
+struct bfa_bsg_qos_vc_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_qos_vc_attr_s attr;
+};
+
+struct bfa_bsg_vf_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ struct bfa_vf_stats_s stats;
+};
+
+struct bfa_bsg_vf_reset_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+};
+
+struct bfa_bsg_fcpim_lunmask_query_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ struct bfa_lunmask_cfg_s lun_mask;
+};
+
+struct bfa_bsg_fcpim_lunmask_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+ struct scsi_lun lun;
+};
+
struct bfa_bsg_fcpt_s {
bfa_status_t status;
u16 vf_id;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 2f585f4..5e19a5f 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -55,7 +55,7 @@
#ifdef BFA_DRIVER_VERSION
#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
#else
-#define BFAD_DRIVER_VERSION "3.0.2.1"
+#define BFAD_DRIVER_VERSION "3.0.2.2"
#endif
#define BFAD_PROTO_NAME FCPI_NAME
@@ -223,6 +223,10 @@ struct bfad_s {
char *regdata;
u32 reglen;
struct dentry *bfad_dentry_files[5];
+ struct list_head free_aen_q;
+ struct list_head active_aen_q;
+ struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY];
+ spinlock_t bfad_aen_spinlock;
};
/* BFAD state machine events */
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index f2bf812..0131238 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -656,6 +656,31 @@ bfad_im_port_clean(struct bfad_im_port_s *im_port)
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
+static void bfad_aen_im_notify_handler(struct work_struct *work)
+{
+ struct bfad_im_s *im =
+ container_of(work, struct bfad_im_s, aen_im_notify_work);
+ struct bfa_aen_entry_s *aen_entry;
+ struct bfad_s *bfad = im->bfad;
+ struct Scsi_Host *shost = bfad->pport.im_port->shost;
+ void *event_data;
+ unsigned long flags;
+
+ while (!list_empty(&bfad->active_aen_q)) {
+ spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
+ bfa_q_deq(&bfad->active_aen_q, &aen_entry);
+ spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
+ event_data = (char *)aen_entry + sizeof(struct list_head);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(struct bfa_aen_entry_s) -
+ sizeof(struct list_head),
+ (char *)event_data, BFAD_NL_VENDOR_ID);
+ spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
+ list_add_tail(&aen_entry->qe, &bfad->free_aen_q);
+ spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
+ }
+}
+
bfa_status_t
bfad_im_probe(struct bfad_s *bfad)
{
@@ -676,6 +701,7 @@ bfad_im_probe(struct bfad_s *bfad)
rc = BFA_STATUS_FAILED;
}
+ INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler);
ext:
return rc;
}
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 4fe34d5..004b6cf 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -115,8 +115,30 @@ struct bfad_im_s {
struct bfad_s *bfad;
struct workqueue_struct *drv_workq;
char drv_workq_name[KOBJ_NAME_LEN];
+ struct work_struct aen_im_notify_work;
};
+#define bfad_get_aen_entry(_drv, _entry) do { \
+ unsigned long _flags; \
+ spin_lock_irqsave(&(_drv)->bfad_aen_spinlock, _flags); \
+ bfa_q_deq(&(_drv)->free_aen_q, &(_entry)); \
+ if (_entry) \
+ list_add_tail(&(_entry)->qe, &(_drv)->active_aen_q); \
+ spin_unlock_irqrestore(&(_drv)->bfad_aen_spinlock, _flags); \
+} while (0)
+
+/* post fc_host vendor event */
+#define bfad_im_post_vendor_event(_entry, _drv, _cnt, _cat, _evt) do { \
+ do_gettimeofday(&(_entry)->aen_tv); \
+ (_entry)->bfad_num = (_drv)->inst_no; \
+ (_entry)->seq_num = (_cnt); \
+ (_entry)->aen_category = (_cat); \
+ (_entry)->aen_type = (_evt); \
+ if ((_drv)->bfad_flags & BFAD_FC4_PROBE_DONE) \
+ queue_work((_drv)->im->drv_workq, \
+ &(_drv)->im->aen_im_notify_work); \
+} while (0)
+
struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
struct bfad_s *);
bfa_status_t bfad_thread_workq(struct bfad_s *bfad);
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
index 1e258d5..b2ba0b2 100644
--- a/drivers/scsi/bfa/bfi.h
+++ b/drivers/scsi/bfa/bfi.h
@@ -784,6 +784,17 @@ enum bfi_sfp_i2h_e {
};
/*
+ * SFP state change notification
+ */
+struct bfi_sfp_scn_s {
+ struct bfi_mhdr_s mhr; /* host msg header */
+ u8 event;
+ u8 sfpid;
+ u8 pomlvl; /* pom level: normal/warning/alarm */
+ u8 is_elb; /* e-loopback */
+};
+
+/*
* SFP state
*/
enum bfa_sfp_stat_e {
@@ -926,6 +937,15 @@ struct bfi_flash_erase_rsp_s {
};
/*
+ * Flash event notification
+ */
+struct bfi_flash_event_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ bfa_status_t status;
+ u32 param;
+};
+
+/*
*----------------------------------------------------------------------
* DIAG
*----------------------------------------------------------------------
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index d924236..42228ca 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -2,7 +2,7 @@
#define _BNX2FC_H_
/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
*
- * Copyright (c) 2008 - 2010 Broadcom Corporation
+ * Copyright (c) 2008 - 2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -62,7 +62,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "1.0.3"
+#define BNX2FC_VERSION "1.0.4"
#define PFX "bnx2fc: "
@@ -141,6 +141,10 @@
#define BNX2FC_RNID_HBA 0x7
+#define SRR_RETRY_COUNT 5
+#define REC_RETRY_COUNT 1
+#define BNX2FC_NUM_ERR_BITS 63
+
/* bnx2fc driver uses only one instance of fcoe_percpu_s */
extern struct fcoe_percpu_s bnx2fc_global;
@@ -153,18 +157,13 @@ struct bnx2fc_percpu_s {
};
struct bnx2fc_hba {
- struct list_head link;
+ struct list_head list;
struct cnic_dev *cnic;
struct pci_dev *pcidev;
- struct net_device *netdev;
struct net_device *phys_dev;
unsigned long reg_with_cnic;
#define BNX2FC_CNIC_REGISTERED 1
- struct packet_type fcoe_packet_type;
- struct packet_type fip_packet_type;
struct bnx2fc_cmd_mgr *cmd_mgr;
- struct workqueue_struct *timer_work_queue;
- struct kref kref;
spinlock_t hba_lock;
struct mutex hba_mutex;
unsigned long adapter_state;
@@ -172,15 +171,9 @@ struct bnx2fc_hba {
#define ADAPTER_STATE_GOING_DOWN 1
#define ADAPTER_STATE_LINK_DOWN 2
#define ADAPTER_STATE_READY 3
- u32 flags;
- unsigned long init_done;
- #define BNX2FC_FW_INIT_DONE 0
- #define BNX2FC_CTLR_INIT_DONE 1
- #define BNX2FC_CREATE_DONE 2
- struct fcoe_ctlr ctlr;
- struct list_head vports;
- u8 vlan_enabled;
- int vlan_id;
+ unsigned long flags;
+ #define BNX2FC_FLAG_FW_INIT_DONE 0
+ #define BNX2FC_FLAG_DESTROY_CMPL 1
u32 next_conn_id;
struct fcoe_task_ctx_entry **task_ctx;
dma_addr_t *task_ctx_dma;
@@ -199,38 +192,41 @@ struct bnx2fc_hba {
char *dummy_buffer;
dma_addr_t dummy_buf_dma;
+ /* Active list of offloaded sessions */
+ struct bnx2fc_rport **tgt_ofld_list;
+
+ /* statistics */
struct fcoe_statistics_params *stats_buffer;
dma_addr_t stats_buf_dma;
-
- /*
- * PCI related info.
- */
- u16 pci_did;
- u16 pci_vid;
- u16 pci_sdid;
- u16 pci_svid;
- u16 pci_func;
- u16 pci_devno;
-
- struct task_struct *l2_thread;
-
- /* linkdown handling */
- wait_queue_head_t shutdown_wait;
- int wait_for_link_down;
+ struct completion stat_req_done;
/*destroy handling */
struct timer_list destroy_timer;
wait_queue_head_t destroy_wait;
- /* Active list of offloaded sessions */
- struct bnx2fc_rport *tgt_ofld_list[BNX2FC_NUM_MAX_SESS];
+ /* linkdown handling */
+ wait_queue_head_t shutdown_wait;
+ int wait_for_link_down;
int num_ofld_sess;
+ struct list_head vports;
+};
- /* statistics */
- struct completion stat_req_done;
+struct bnx2fc_interface {
+ struct list_head list;
+ unsigned long if_flags;
+ #define BNX2FC_CTLR_INIT_DONE 0
+ struct bnx2fc_hba *hba;
+ struct net_device *netdev;
+ struct packet_type fcoe_packet_type;
+ struct packet_type fip_packet_type;
+ struct workqueue_struct *timer_work_queue;
+ struct kref kref;
+ struct fcoe_ctlr ctlr;
+ u8 vlan_enabled;
+ int vlan_id;
};
-#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_hba, ctlr)
+#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr)
struct bnx2fc_lport {
struct list_head list;
@@ -252,9 +248,11 @@ struct bnx2fc_rport {
struct fc_rport_priv *rdata;
void __iomem *ctx_base;
#define DPM_TRIGER_TYPE 0x40
+ u32 io_timeout;
u32 fcoe_conn_id;
u32 context_id;
u32 sid;
+ int dev_type;
unsigned long flags;
#define BNX2FC_FLAG_SESSION_READY 0x1
@@ -262,10 +260,9 @@ struct bnx2fc_rport {
#define BNX2FC_FLAG_DISABLED 0x3
#define BNX2FC_FLAG_DESTROYED 0x4
#define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5
-#define BNX2FC_FLAG_DESTROY_CMPL 0x6
-#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x7
-#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x8
-#define BNX2FC_FLAG_EXPL_LOGO 0x9
+#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x6
+#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7
+#define BNX2FC_FLAG_EXPL_LOGO 0x8
u8 src_addr[ETH_ALEN];
u32 max_sqes;
@@ -327,12 +324,9 @@ struct bnx2fc_rport {
spinlock_t cq_lock;
atomic_t num_active_ios;
u32 flush_in_prog;
- unsigned long work_time_slice;
unsigned long timestamp;
struct list_head free_task_list;
struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
- atomic_t pi;
- atomic_t ci;
struct list_head active_cmd_queue;
struct list_head els_queue;
struct list_head io_retire_queue;
@@ -367,6 +361,8 @@ struct bnx2fc_els_cb_arg {
struct bnx2fc_cmd *aborted_io_req;
struct bnx2fc_cmd *io_req;
u16 l2_oxid;
+ u32 offset;
+ enum fc_rctl r_ctl;
};
/* bnx2fc command structure */
@@ -380,6 +376,7 @@ struct bnx2fc_cmd {
#define BNX2FC_ABTS 3
#define BNX2FC_ELS 4
#define BNX2FC_CLEANUP 5
+#define BNX2FC_SEQ_CLEANUP 6
u8 io_req_flags;
struct kref refcount;
struct fcoe_port *port;
@@ -393,6 +390,7 @@ struct bnx2fc_cmd {
struct completion tm_done;
int wait_for_comp;
u16 xid;
+ struct fcoe_err_report_entry err_entry;
struct fcoe_task_ctx_entry *task;
struct io_bdt *bd_tbl;
struct fcp_rsp *rsp;
@@ -409,6 +407,12 @@ struct bnx2fc_cmd {
#define BNX2FC_FLAG_IO_COMPL 0x9
#define BNX2FC_FLAG_ELS_DONE 0xa
#define BNX2FC_FLAG_ELS_TIMEOUT 0xb
+#define BNX2FC_FLAG_CMD_LOST 0xc
+#define BNX2FC_FLAG_SRR_SENT 0xd
+ u8 rec_retry;
+ u8 srr_retry;
+ u32 srr_offset;
+ u8 srr_rctl;
u32 fcp_resid;
u32 fcp_rsp_len;
u32 fcp_sns_len;
@@ -439,6 +443,7 @@ struct bnx2fc_unsol_els {
+struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt);
struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);
void bnx2fc_cmd_release(struct kref *ref);
int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd);
@@ -476,6 +481,10 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req);
void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
u16 orig_xid);
+void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnup_req,
+ struct fcoe_task_ctx_entry *task,
+ struct bnx2fc_cmd *orig_io_req,
+ u32 offset);
void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task);
void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
@@ -525,5 +534,13 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
unsigned char *buf,
u32 frame_len, u16 l2_oxid);
int bnx2fc_send_stat_req(struct bnx2fc_hba *hba);
+int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, struct bnx2fc_cmd *io_req);
+int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req);
+int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl);
+void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnup_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 rx_state);
+int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
+ enum fc_rctl r_ctl);
#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h
index 7f6aff6..3416d9a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_debug.h
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h
@@ -21,21 +21,21 @@ extern unsigned int bnx2fc_debug_level;
#define BNX2FC_ELS_DBG(fmt, arg...) \
BNX2FC_CHK_LOGGING(LOG_ELS, \
- printk(KERN_ALERT PFX fmt, ##arg))
+ printk(KERN_INFO PFX fmt, ##arg))
#define BNX2FC_MISC_DBG(fmt, arg...) \
BNX2FC_CHK_LOGGING(LOG_MISC, \
- printk(KERN_ALERT PFX fmt, ##arg))
+ printk(KERN_INFO PFX fmt, ##arg))
#define BNX2FC_IO_DBG(io_req, fmt, arg...) \
do { \
if (!io_req || !io_req->port || !io_req->port->lport || \
!io_req->port->lport->host) \
BNX2FC_CHK_LOGGING(LOG_IO, \
- printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
+ printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
else \
BNX2FC_CHK_LOGGING(LOG_IO, \
- shost_printk(KERN_ALERT, \
+ shost_printk(KERN_INFO, \
(io_req)->port->lport->host, \
PFX "xid:0x%x " fmt, \
(io_req)->xid, ##arg)); \
@@ -46,10 +46,10 @@ extern unsigned int bnx2fc_debug_level;
if (!tgt || !tgt->port || !tgt->port->lport || \
!tgt->port->lport->host || !tgt->rport) \
BNX2FC_CHK_LOGGING(LOG_TGT, \
- printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
+ printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
else \
BNX2FC_CHK_LOGGING(LOG_TGT, \
- shost_printk(KERN_ALERT, \
+ shost_printk(KERN_INFO, \
(tgt)->port->lport->host, \
PFX "port:%x " fmt, \
(tgt)->rport->port_id, ##arg)); \
@@ -60,10 +60,10 @@ extern unsigned int bnx2fc_debug_level;
do { \
if (!lport || !lport->host) \
BNX2FC_CHK_LOGGING(LOG_HBA, \
- printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
+ printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
else \
BNX2FC_CHK_LOGGING(LOG_HBA, \
- shost_printk(KERN_ALERT, lport->host, \
+ shost_printk(KERN_INFO, lport->host, \
PFX fmt, ##arg)); \
} while (0)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index 7e89143..d66dcbd 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -3,7 +3,7 @@
* This file contains helper routines that handle ELS requests
* and responses.
*
- * Copyright (c) 2008 - 2010 Broadcom Corporation
+ * Copyright (c) 2008 - 2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -253,13 +253,417 @@ int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
return rc;
}
+void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
+{
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr, *fh;
+ struct bnx2fc_cmd *srr_req;
+ struct bnx2fc_cmd *orig_io_req;
+ struct fc_frame *fp;
+ unsigned char *buf;
+ void *resp_buf;
+ u32 resp_len, hdr_len;
+ u8 opcode;
+ int rc = 0;
+
+ orig_io_req = cb_arg->aborted_io_req;
+ srr_req = cb_arg->io_req;
+ if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
+ BNX2FC_IO_DBG(srr_req, "srr_compl: xid - 0x%x completed",
+ orig_io_req->xid);
+ goto srr_compl_done;
+ }
+ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
+ BNX2FC_IO_DBG(srr_req, "rec abts in prog "
+ "orig_io - 0x%x\n",
+ orig_io_req->xid);
+ goto srr_compl_done;
+ }
+ if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
+ /* SRR timedout */
+ BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
+ "orig_io - 0x%x\n",
+ orig_io_req->xid);
+ rc = bnx2fc_initiate_abts(srr_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
+ "failed. issue cleanup\n");
+ bnx2fc_initiate_cleanup(srr_req);
+ }
+ orig_io_req->srr_retry++;
+ if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
+ struct bnx2fc_rport *tgt = orig_io_req->tgt;
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = bnx2fc_send_srr(orig_io_req,
+ orig_io_req->srr_offset,
+ orig_io_req->srr_rctl);
+ spin_lock_bh(&tgt->tgt_lock);
+ if (!rc)
+ goto srr_compl_done;
+ }
+
+ rc = bnx2fc_initiate_abts(orig_io_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
+ "failed xid = 0x%x. issue cleanup\n",
+ orig_io_req->xid);
+ bnx2fc_initiate_cleanup(orig_io_req);
+ }
+ goto srr_compl_done;
+ }
+ mp_req = &(srr_req->mp_req);
+ fc_hdr = &(mp_req->resp_fc_hdr);
+ resp_len = mp_req->resp_len;
+ resp_buf = mp_req->resp_buf;
+
+ hdr_len = sizeof(*fc_hdr);
+ buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (!buf) {
+ printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
+ goto srr_compl_done;
+ }
+ memcpy(buf, fc_hdr, hdr_len);
+ memcpy(buf + hdr_len, resp_buf, resp_len);
+
+ fp = fc_frame_alloc(NULL, resp_len);
+ if (!fp) {
+ printk(KERN_ERR PFX "fc_frame_alloc failure\n");
+ goto free_buf;
+ }
+
+ fh = (struct fc_frame_header *) fc_frame_header_get(fp);
+ /* Copy FC Frame header and payload into the frame */
+ memcpy(fh, buf, hdr_len + resp_len);
+
+ opcode = fc_frame_payload_op(fp);
+ switch (opcode) {
+ case ELS_LS_ACC:
+ BNX2FC_IO_DBG(srr_req, "SRR success\n");
+ break;
+ case ELS_LS_RJT:
+ BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
+ rc = bnx2fc_initiate_abts(orig_io_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
+ "failed xid = 0x%x. issue cleanup\n",
+ orig_io_req->xid);
+ bnx2fc_initiate_cleanup(orig_io_req);
+ }
+ break;
+ default:
+ BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
+ opcode);
+ break;
+ }
+ fc_frame_free(fp);
+free_buf:
+ kfree(buf);
+srr_compl_done:
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+}
+
+void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
+{
+ struct bnx2fc_cmd *orig_io_req, *new_io_req;
+ struct bnx2fc_cmd *rec_req;
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr, *fh;
+ struct fc_els_ls_rjt *rjt;
+ struct fc_els_rec_acc *acc;
+ struct bnx2fc_rport *tgt;
+ struct fcoe_err_report_entry *err_entry;
+ struct scsi_cmnd *sc_cmd;
+ enum fc_rctl r_ctl;
+ unsigned char *buf;
+ void *resp_buf;
+ struct fc_frame *fp;
+ u8 opcode;
+ u32 offset;
+ u32 e_stat;
+ u32 resp_len, hdr_len;
+ int rc = 0;
+ bool send_seq_clnp = false;
+ bool abort_io = false;
+
+ BNX2FC_MISC_DBG("Entered rec_compl callback\n");
+ rec_req = cb_arg->io_req;
+ orig_io_req = cb_arg->aborted_io_req;
+ BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
+ tgt = orig_io_req->tgt;
+
+ if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
+ BNX2FC_IO_DBG(rec_req, "completed"
+ "orig_io - 0x%x\n",
+ orig_io_req->xid);
+ goto rec_compl_done;
+ }
+ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
+ BNX2FC_IO_DBG(rec_req, "abts in prog "
+ "orig_io - 0x%x\n",
+ orig_io_req->xid);
+ goto rec_compl_done;
+ }
+ /* Handle REC timeout case */
+ if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
+ BNX2FC_IO_DBG(rec_req, "timed out, abort "
+ "orig_io - 0x%x\n",
+ orig_io_req->xid);
+ /* els req is timed out. send abts for els */
+ rc = bnx2fc_initiate_abts(rec_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
+ "failed. issue cleanup\n");
+ bnx2fc_initiate_cleanup(rec_req);
+ }
+ orig_io_req->rec_retry++;
+ /* REC timedout. send ABTS to the orig IO req */
+ if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = bnx2fc_send_rec(orig_io_req);
+ spin_lock_bh(&tgt->tgt_lock);
+ if (!rc)
+ goto rec_compl_done;
+ }
+ rc = bnx2fc_initiate_abts(orig_io_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
+ "failed xid = 0x%x. issue cleanup\n",
+ orig_io_req->xid);
+ bnx2fc_initiate_cleanup(orig_io_req);
+ }
+ goto rec_compl_done;
+ }
+ mp_req = &(rec_req->mp_req);
+ fc_hdr = &(mp_req->resp_fc_hdr);
+ resp_len = mp_req->resp_len;
+ acc = resp_buf = mp_req->resp_buf;
+
+ hdr_len = sizeof(*fc_hdr);
+
+ buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (!buf) {
+ printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
+ goto rec_compl_done;
+ }
+ memcpy(buf, fc_hdr, hdr_len);
+ memcpy(buf + hdr_len, resp_buf, resp_len);
+
+ fp = fc_frame_alloc(NULL, resp_len);
+ if (!fp) {
+ printk(KERN_ERR PFX "fc_frame_alloc failure\n");
+ goto free_buf;
+ }
+
+ fh = (struct fc_frame_header *) fc_frame_header_get(fp);
+ /* Copy FC Frame header and payload into the frame */
+ memcpy(fh, buf, hdr_len + resp_len);
+
+ opcode = fc_frame_payload_op(fp);
+ if (opcode == ELS_LS_RJT) {
+ BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ if ((rjt->er_reason == ELS_RJT_LOGIC ||
+ rjt->er_reason == ELS_RJT_UNAB) &&
+ rjt->er_explan == ELS_EXPL_OXID_RXID) {
+ BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
+ new_io_req = bnx2fc_cmd_alloc(tgt);
+ if (!new_io_req)
+ goto abort_io;
+ new_io_req->sc_cmd = orig_io_req->sc_cmd;
+ /* cleanup orig_io_req that is with the FW */
+ set_bit(BNX2FC_FLAG_CMD_LOST,
+ &orig_io_req->req_flags);
+ bnx2fc_initiate_cleanup(orig_io_req);
+ /* Post a new IO req with the same sc_cmd */
+ BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = bnx2fc_post_io_req(tgt, new_io_req);
+ spin_lock_bh(&tgt->tgt_lock);
+ if (!rc)
+ goto free_frame;
+ BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
+ }
+abort_io:
+ rc = bnx2fc_initiate_abts(orig_io_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
+ "failed. issue cleanup\n");
+ bnx2fc_initiate_cleanup(orig_io_req);
+ }
+ } else if (opcode == ELS_LS_ACC) {
+ /* REVISIT: Check if the exchange is already aborted */
+ offset = ntohl(acc->reca_fc4value);
+ e_stat = ntohl(acc->reca_e_stat);
+ if (e_stat & ESB_ST_SEQ_INIT) {
+ BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
+ goto free_frame;
+ }
+ BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
+ e_stat, offset);
+ /* Seq initiative is with us */
+ err_entry = (struct fcoe_err_report_entry *)
+ &orig_io_req->err_entry;
+ sc_cmd = orig_io_req->sc_cmd;
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ /* SCSI WRITE command */
+ if (offset == orig_io_req->data_xfer_len) {
+ BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
+ /* FCP_RSP lost */
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ offset = 0;
+ } else {
+ /* start transmitting from offset */
+ BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
+ send_seq_clnp = true;
+ r_ctl = FC_RCTL_DD_DATA_DESC;
+ if (bnx2fc_initiate_seq_cleanup(orig_io_req,
+ offset, r_ctl))
+ abort_io = true;
+ /* XFER_RDY */
+ }
+ } else {
+ /* SCSI READ command */
+ if (err_entry->data.rx_buf_off ==
+ orig_io_req->data_xfer_len) {
+ /* FCP_RSP lost */
+ BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ offset = 0;
+ } else {
+ /* request retransmission from this offset */
+ send_seq_clnp = true;
+ offset = err_entry->data.rx_buf_off;
+ BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
+ /* FCP_DATA lost */
+ r_ctl = FC_RCTL_DD_SOL_DATA;
+ if (bnx2fc_initiate_seq_cleanup(orig_io_req,
+ offset, r_ctl))
+ abort_io = true;
+ }
+ }
+ if (abort_io) {
+ rc = bnx2fc_initiate_abts(orig_io_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
+ " failed. issue cleanup\n");
+ bnx2fc_initiate_cleanup(orig_io_req);
+ }
+ } else if (!send_seq_clnp) {
+ BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
+ spin_lock_bh(&tgt->tgt_lock);
+
+ if (rc) {
+ BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
+ " IO will abort\n");
+ }
+ }
+ }
+free_frame:
+ fc_frame_free(fp);
+free_buf:
+ kfree(buf);
+rec_compl_done:
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+ kfree(cb_arg);
+}
+
+int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
+{
+ struct fc_els_rec rec;
+ struct bnx2fc_rport *tgt = orig_io_req->tgt;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ struct bnx2fc_els_cb_arg *cb_arg = NULL;
+ u32 sid = tgt->sid;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ BNX2FC_IO_DBG(orig_io_req, "Sending REC\n");
+ memset(&rec, 0, sizeof(rec));
+
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n");
+ rc = -ENOMEM;
+ goto rec_err;
+ }
+ kref_get(&orig_io_req->refcount);
+
+ cb_arg->aborted_io_req = orig_io_req;
+
+ rec.rec_cmd = ELS_REC;
+ hton24(rec.rec_s_id, sid);
+ rec.rec_ox_id = htons(orig_io_req->xid);
+ rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
+
+ rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
+ bnx2fc_rec_compl, cb_arg,
+ r_a_tov);
+rec_err:
+ if (rc) {
+ BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ kfree(cb_arg);
+ }
+ return rc;
+}
+
+int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
+{
+ struct fcp_srr srr;
+ struct bnx2fc_rport *tgt = orig_io_req->tgt;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ struct bnx2fc_els_cb_arg *cb_arg = NULL;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n");
+ memset(&srr, 0, sizeof(srr));
+
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n");
+ rc = -ENOMEM;
+ goto srr_err;
+ }
+ kref_get(&orig_io_req->refcount);
+
+ cb_arg->aborted_io_req = orig_io_req;
+
+ srr.srr_op = ELS_SRR;
+ srr.srr_ox_id = htons(orig_io_req->xid);
+ srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
+ srr.srr_rel_off = htonl(offset);
+ srr.srr_r_ctl = r_ctl;
+ orig_io_req->srr_offset = offset;
+ orig_io_req->srr_rctl = r_ctl;
+
+ rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
+ bnx2fc_srr_compl, cb_arg,
+ r_a_tov);
+srr_err:
+ if (rc) {
+ BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ kfree(cb_arg);
+ } else
+ set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
+
+ return rc;
+}
+
static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
void *data, u32 data_len,
void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
{
struct fcoe_port *port = tgt->port;
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
struct fc_rport *rport = tgt->rport;
struct fc_lport *lport = port->lport;
struct bnx2fc_cmd *els_req;
@@ -274,12 +678,12 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
rc = fc_remote_port_chkready(rport);
if (rc) {
- printk(KERN_ALERT PFX "els 0x%x: rport not ready\n", op);
+ printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op);
rc = -EINVAL;
goto els_err;
}
if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
- printk(KERN_ALERT PFX "els 0x%x: link is not ready\n", op);
+ printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op);
rc = -EINVAL;
goto els_err;
}
@@ -305,7 +709,7 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
rc = bnx2fc_init_mp_req(els_req);
if (rc == FAILED) {
- printk(KERN_ALERT PFX "ELS MP request init failed\n");
+ printk(KERN_ERR PFX "ELS MP request init failed\n");
spin_lock_bh(&tgt->tgt_lock);
kref_put(&els_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
@@ -324,7 +728,7 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
memcpy(mp_req->req_buf, data, data_len);
} else {
- printk(KERN_ALERT PFX "Invalid ELS op 0x%x\n", op);
+ printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op);
els_req->cb_func = NULL;
els_req->cb_arg = NULL;
spin_lock_bh(&tgt->tgt_lock);
@@ -342,9 +746,14 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
did = tgt->rport->port_id;
sid = tgt->sid;
- __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
- FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
- FC_FC_SEQ_INIT, 0);
+ if (op == ELS_SRR)
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
+ FC_TYPE_FCP, FC_FC_FIRST_SEQ |
+ FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+ else
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
+ FC_TYPE_ELS, FC_FC_FIRST_SEQ |
+ FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
/* Obtain exchange id */
xid = els_req->xid;
@@ -352,7 +761,8 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
index = xid % BNX2FC_TASKS_PER_PAGE;
/* Initialize task context for this IO request */
- task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[task_idx];
task = &(task_page[index]);
bnx2fc_init_mp_task(els_req, task);
@@ -496,8 +906,8 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
void *arg, u32 timeout)
{
struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_hba *hba = port->priv;
- struct fcoe_ctlr *fip = &hba->ctlr;
+ struct bnx2fc_interface *interface = port->priv;
+ struct fcoe_ctlr *fip = &interface->ctlr;
struct fc_frame_header *fh = fc_frame_header_get(fp);
switch (op) {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index a97aff3..7cb2cd4 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -3,7 +3,7 @@
* cnic modules to create FCoE instances, send/receive non-offloaded
* FIP/FCoE packets, listen to link events etc.
*
- * Copyright (c) 2008 - 2010 Broadcom Corporation
+ * Copyright (c) 2008 - 2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -15,13 +15,14 @@
#include "bnx2fc.h"
static struct list_head adapter_list;
+static struct list_head if_list;
static u32 adapter_count;
static DEFINE_MUTEX(bnx2fc_dev_lock);
DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION
-#define DRV_MODULE_RELDATE "Jun 10, 2011"
+#define DRV_MODULE_RELDATE "Jun 23, 2011"
static char version[] __devinitdata =
@@ -61,7 +62,7 @@ static int bnx2fc_disable(struct net_device *netdev);
static void bnx2fc_recv_frame(struct sk_buff *skb);
-static void bnx2fc_start_disc(struct bnx2fc_hba *hba);
+static void bnx2fc_start_disc(struct bnx2fc_interface *interface);
static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
static int bnx2fc_net_config(struct fc_lport *lp);
static int bnx2fc_lport_config(struct fc_lport *lport);
@@ -70,18 +71,20 @@ static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba);
static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
-static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
+static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
struct device *parent, int npiv);
static void bnx2fc_destroy_work(struct work_struct *work);
static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
+static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
+ *phys_dev);
static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic);
static int bnx2fc_fw_init(struct bnx2fc_hba *hba);
static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba);
static void bnx2fc_port_shutdown(struct fc_lport *lport);
-static void bnx2fc_stop(struct bnx2fc_hba *hba);
+static void bnx2fc_stop(struct bnx2fc_interface *interface);
static int __init bnx2fc_mod_init(void);
static void __exit bnx2fc_mod_exit(void);
@@ -142,7 +145,8 @@ static void bnx2fc_abort_io(struct fc_lport *lport)
static void bnx2fc_cleanup(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct bnx2fc_rport *tgt;
int i;
@@ -219,7 +223,8 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
struct fcoe_crc_eof *cp;
struct sk_buff *skb;
struct fc_frame_header *fh;
- struct bnx2fc_hba *hba;
+ struct bnx2fc_interface *interface;
+ struct bnx2fc_hba *hba;
struct fcoe_port *port;
struct fcoe_hdr *hp;
struct bnx2fc_rport *tgt;
@@ -230,7 +235,8 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
int wlen, rc = 0;
port = (struct fcoe_port *)lport_priv(lport);
- hba = port->priv;
+ interface = port->priv;
+ hba = interface->hba;
fh = fc_frame_header_get(fp);
@@ -242,12 +248,12 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
}
if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
- if (!hba->ctlr.sel_fcf) {
+ if (!interface->ctlr.sel_fcf) {
BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
kfree_skb(skb);
return -EINVAL;
}
- if (fcoe_ctlr_els_send(&hba->ctlr, lport, skb))
+ if (fcoe_ctlr_els_send(&interface->ctlr, lport, skb))
return 0;
}
@@ -316,19 +322,19 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
skb_reset_network_header(skb);
skb->mac_len = elen;
skb->protocol = htons(ETH_P_FCOE);
- skb->dev = hba->netdev;
+ skb->dev = interface->netdev;
/* fill up mac and fcoe headers */
eh = eth_hdr(skb);
eh->h_proto = htons(ETH_P_FCOE);
- if (hba->ctlr.map_dest)
+ if (interface->ctlr.map_dest)
fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
else
/* insert GW address */
- memcpy(eh->h_dest, hba->ctlr.dest_addr, ETH_ALEN);
+ memcpy(eh->h_dest, interface->ctlr.dest_addr, ETH_ALEN);
- if (unlikely(hba->ctlr.flogi_oxid != FC_XID_UNKNOWN))
- memcpy(eh->h_source, hba->ctlr.ctl_src_addr, ETH_ALEN);
+ if (unlikely(interface->ctlr.flogi_oxid != FC_XID_UNKNOWN))
+ memcpy(eh->h_source, interface->ctlr.ctl_src_addr, ETH_ALEN);
else
memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
@@ -377,22 +383,23 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype, struct net_device *olddev)
{
struct fc_lport *lport;
- struct bnx2fc_hba *hba;
+ struct bnx2fc_interface *interface;
struct fc_frame_header *fh;
struct fcoe_rcv_info *fr;
struct fcoe_percpu_s *bg;
unsigned short oxid;
- hba = container_of(ptype, struct bnx2fc_hba, fcoe_packet_type);
- lport = hba->ctlr.lp;
+ interface = container_of(ptype, struct bnx2fc_interface,
+ fcoe_packet_type);
+ lport = interface->ctlr.lp;
if (unlikely(lport == NULL)) {
- printk(KERN_ALERT PFX "bnx2fc_rcv: lport is NULL\n");
+ printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n");
goto err;
}
if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
- printk(KERN_ALERT PFX "bnx2fc_rcv: Wrong FC type frame\n");
+ printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
goto err;
}
@@ -411,7 +418,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
fr = fcoe_dev_from_skb(skb);
fr->fr_dev = lport;
- fr->ptype = ptype;
bg = &bnx2fc_global;
spin_lock_bh(&bg->fcoe_rx_list.lock);
@@ -469,7 +475,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
fr = fcoe_dev_from_skb(skb);
lport = fr->fr_dev;
if (unlikely(lport == NULL)) {
- printk(KERN_ALERT PFX "Invalid lport struct\n");
+ printk(KERN_ERR PFX "Invalid lport struct\n");
kfree_skb(skb);
return;
}
@@ -594,7 +600,8 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
struct fc_host_statistics *bnx2fc_stats;
struct fc_lport *lport = shost_priv(shost);
struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct fcoe_statistics_params *fw_stats;
int rc = 0;
@@ -631,7 +638,7 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
{
struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
struct Scsi_Host *shost = lport->host;
int rc = 0;
@@ -654,7 +661,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s",
BNX2FC_NAME, BNX2FC_VERSION,
- hba->netdev->name);
+ interface->netdev->name);
return 0;
}
@@ -662,8 +669,8 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
static void bnx2fc_link_speed_update(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_hba *hba = port->priv;
- struct net_device *netdev = hba->netdev;
+ struct bnx2fc_interface *interface = port->priv;
+ struct net_device *netdev = interface->netdev;
struct ethtool_cmd ecmd;
if (!dev_ethtool_get_settings(netdev, &ecmd)) {
@@ -691,7 +698,8 @@ static void bnx2fc_link_speed_update(struct fc_lport *lport)
static int bnx2fc_link_ok(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct net_device *dev = hba->phys_dev;
int rc = 0;
@@ -713,7 +721,7 @@ static int bnx2fc_link_ok(struct fc_lport *lport)
*/
void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
{
- if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
+ if (test_bit(__LINK_STATE_NOCARRIER, &hba->phys_dev->state))
set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
else
clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
@@ -722,11 +730,13 @@ void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
static int bnx2fc_net_config(struct fc_lport *lport)
{
struct bnx2fc_hba *hba;
+ struct bnx2fc_interface *interface;
struct fcoe_port *port;
u64 wwnn, wwpn;
port = lport_priv(lport);
- hba = port->priv;
+ interface = port->priv;
+ hba = interface->hba;
/* require support for get_pauseparam ethtool op. */
if (!hba->phys_dev->ethtool_ops ||
@@ -743,11 +753,11 @@ static int bnx2fc_net_config(struct fc_lport *lport)
bnx2fc_link_speed_update(lport);
if (!lport->vport) {
- wwnn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 1, 0);
+ wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 1, 0);
BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
fc_set_wwnn(lport, wwnn);
- wwpn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 2, 0);
+ wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 2, 0);
BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
fc_set_wwpn(lport, wwpn);
}
@@ -759,9 +769,9 @@ static void bnx2fc_destroy_timer(unsigned long data)
{
struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data;
- BNX2FC_HBA_DBG(hba->ctlr.lp, "ERROR:bnx2fc_destroy_timer - "
+ BNX2FC_MISC_DBG("ERROR:bnx2fc_destroy_timer - "
"Destroy compl not received!!\n");
- hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
+ set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
wake_up_interruptible(&hba->destroy_wait);
}
@@ -779,54 +789,35 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
u16 vlan_id)
{
struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
- struct fc_lport *lport = hba->ctlr.lp;
+ struct fc_lport *lport;
struct fc_lport *vport;
+ struct bnx2fc_interface *interface;
+ int wait_for_upload = 0;
u32 link_possible = 1;
/* Ignore vlans for now */
if (vlan_id != 0)
return;
- if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
- BNX2FC_MISC_DBG("driver not ready. event=%s %ld\n",
- hba->netdev->name, event);
- return;
- }
-
- /*
- * ASSUMPTION:
- * indicate_netevent cannot be called from cnic unless bnx2fc
- * does register_device
- */
- BUG_ON(!lport);
-
- BNX2FC_HBA_DBG(lport, "enter netevent handler - event=%s %ld\n",
- hba->netdev->name, event);
-
switch (event) {
case NETDEV_UP:
- BNX2FC_HBA_DBG(lport, "Port up, adapter_state = %ld\n",
- hba->adapter_state);
if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
printk(KERN_ERR "indicate_netevent: "\
- "adapter is not UP!!\n");
+ "hba is not UP!!\n");
break;
case NETDEV_DOWN:
- BNX2FC_HBA_DBG(lport, "Port down\n");
clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
link_possible = 0;
break;
case NETDEV_GOING_DOWN:
- BNX2FC_HBA_DBG(lport, "Port going down\n");
set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
link_possible = 0;
break;
case NETDEV_CHANGE:
- BNX2FC_HBA_DBG(lport, "NETDEV_CHANGE\n");
break;
default:
@@ -834,15 +825,22 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
return;
}
- bnx2fc_link_speed_update(lport);
+ mutex_lock(&bnx2fc_dev_lock);
+ list_for_each_entry(interface, &if_list, list) {
- if (link_possible && !bnx2fc_link_ok(lport)) {
- printk(KERN_ERR "indicate_netevent: call ctlr_link_up\n");
- fcoe_ctlr_link_up(&hba->ctlr);
- } else {
- printk(KERN_ERR "indicate_netevent: call ctlr_link_down\n");
- if (fcoe_ctlr_link_down(&hba->ctlr)) {
- clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ if (interface->hba != hba)
+ continue;
+
+ lport = interface->ctlr.lp;
+ BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
+ interface->netdev->name, event);
+
+ bnx2fc_link_speed_update(lport);
+
+ if (link_possible && !bnx2fc_link_ok(lport)) {
+ printk(KERN_ERR "indicate_netevent: ctlr_link_up\n");
+ fcoe_ctlr_link_up(&interface->ctlr);
+ } else if (fcoe_ctlr_link_down(&interface->ctlr)) {
mutex_lock(&lport->lp_mutex);
list_for_each_entry(vport, &lport->vports, list)
fc_host_port_type(vport->host) =
@@ -853,24 +851,26 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
get_cpu())->LinkFailureCount++;
put_cpu();
fcoe_clean_pending_queue(lport);
+ wait_for_upload = 1;
+ }
+ }
+ mutex_unlock(&bnx2fc_dev_lock);
- init_waitqueue_head(&hba->shutdown_wait);
- BNX2FC_HBA_DBG(lport, "indicate_netevent "
- "num_ofld_sess = %d\n",
- hba->num_ofld_sess);
- hba->wait_for_link_down = 1;
- BNX2FC_HBA_DBG(lport, "waiting for uploads to "
- "compl proc = %s\n",
- current->comm);
- wait_event_interruptible(hba->shutdown_wait,
- (hba->num_ofld_sess == 0));
- BNX2FC_HBA_DBG(lport, "wakeup - num_ofld_sess = %d\n",
+ if (wait_for_upload) {
+ clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ init_waitqueue_head(&hba->shutdown_wait);
+ BNX2FC_MISC_DBG("indicate_netevent "
+ "num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+ hba->wait_for_link_down = 1;
+ wait_event_interruptible(hba->shutdown_wait,
+ (hba->num_ofld_sess == 0));
+ BNX2FC_MISC_DBG("wakeup - num_ofld_sess = %d\n",
hba->num_ofld_sess);
- hba->wait_for_link_down = 0;
+ hba->wait_for_link_down = 0;
- if (signal_pending(current))
- flush_signals(current);
- }
+ if (signal_pending(current))
+ flush_signals(current);
}
}
@@ -889,23 +889,12 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
static int bnx2fc_em_config(struct fc_lport *lport)
{
- struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_hba *hba = port->priv;
-
if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID,
FCOE_MAX_XID, NULL)) {
printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
return -ENOMEM;
}
- hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID,
- BNX2FC_MAX_XID);
-
- if (!hba->cmd_mgr) {
- printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
- fc_exch_mgr_free(lport);
- return -ENOMEM;
- }
return 0;
}
@@ -918,11 +907,8 @@ static int bnx2fc_lport_config(struct fc_lport *lport)
lport->e_d_tov = 2 * 1000;
lport->r_a_tov = 10 * 1000;
- /* REVISIT: enable when supporting tape devices
lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
- */
- lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS);
lport->does_npiv = 1;
memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen));
@@ -952,9 +938,10 @@ static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype,
struct net_device *orig_dev)
{
- struct bnx2fc_hba *hba;
- hba = container_of(ptype, struct bnx2fc_hba, fip_packet_type);
- fcoe_ctlr_recv(&hba->ctlr, skb);
+ struct bnx2fc_interface *interface;
+ interface = container_of(ptype, struct bnx2fc_interface,
+ fip_packet_type);
+ fcoe_ctlr_recv(&interface->ctlr, skb);
return 0;
}
@@ -1005,17 +992,17 @@ static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
struct Scsi_Host *shost = vport_to_shost(vport);
struct fc_lport *n_port = shost_priv(shost);
struct fcoe_port *port = lport_priv(n_port);
- struct bnx2fc_hba *hba = port->priv;
- struct net_device *netdev = hba->netdev;
+ struct bnx2fc_interface *interface = port->priv;
+ struct net_device *netdev = interface->netdev;
struct fc_lport *vn_port;
- if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) {
printk(KERN_ERR PFX "vn ports cannot be created on"
- "this hba\n");
+ "this interface\n");
return -EIO;
}
mutex_lock(&bnx2fc_dev_lock);
- vn_port = bnx2fc_if_create(hba, &vport->dev, 1);
+ vn_port = bnx2fc_if_create(interface, &vport->dev, 1);
mutex_unlock(&bnx2fc_dev_lock);
if (IS_ERR(vn_port)) {
@@ -1065,10 +1052,10 @@ static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable)
}
-static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
+static int bnx2fc_netdev_setup(struct bnx2fc_interface *interface)
{
- struct net_device *netdev = hba->netdev;
- struct net_device *physdev = hba->phys_dev;
+ struct net_device *netdev = interface->netdev;
+ struct net_device *physdev = interface->hba->phys_dev;
struct netdev_hw_addr *ha;
int sel_san_mac = 0;
@@ -1083,7 +1070,8 @@ static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
(is_valid_ether_addr(ha->addr))) {
- memcpy(hba->ctlr.ctl_src_addr, ha->addr, ETH_ALEN);
+ memcpy(interface->ctlr.ctl_src_addr, ha->addr,
+ ETH_ALEN);
sel_san_mac = 1;
BNX2FC_MISC_DBG("Found SAN MAC\n");
}
@@ -1093,15 +1081,15 @@ static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
if (!sel_san_mac)
return -ENODEV;
- hba->fip_packet_type.func = bnx2fc_fip_recv;
- hba->fip_packet_type.type = htons(ETH_P_FIP);
- hba->fip_packet_type.dev = netdev;
- dev_add_pack(&hba->fip_packet_type);
+ interface->fip_packet_type.func = bnx2fc_fip_recv;
+ interface->fip_packet_type.type = htons(ETH_P_FIP);
+ interface->fip_packet_type.dev = netdev;
+ dev_add_pack(&interface->fip_packet_type);
- hba->fcoe_packet_type.func = bnx2fc_rcv;
- hba->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
- hba->fcoe_packet_type.dev = netdev;
- dev_add_pack(&hba->fcoe_packet_type);
+ interface->fcoe_packet_type.func = bnx2fc_rcv;
+ interface->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
+ interface->fcoe_packet_type.dev = netdev;
+ dev_add_pack(&interface->fcoe_packet_type);
return 0;
}
@@ -1137,53 +1125,54 @@ static void bnx2fc_release_transport(void)
static void bnx2fc_interface_release(struct kref *kref)
{
- struct bnx2fc_hba *hba;
+ struct bnx2fc_interface *interface;
struct net_device *netdev;
- struct net_device *phys_dev;
- hba = container_of(kref, struct bnx2fc_hba, kref);
+ interface = container_of(kref, struct bnx2fc_interface, kref);
BNX2FC_MISC_DBG("Interface is being released\n");
- netdev = hba->netdev;
- phys_dev = hba->phys_dev;
+ netdev = interface->netdev;
/* tear-down FIP controller */
- if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done))
- fcoe_ctlr_destroy(&hba->ctlr);
+ if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags))
+ fcoe_ctlr_destroy(&interface->ctlr);
+
+ kfree(interface);
- /* Free the command manager */
- if (hba->cmd_mgr) {
- bnx2fc_cmd_mgr_free(hba->cmd_mgr);
- hba->cmd_mgr = NULL;
- }
dev_put(netdev);
module_put(THIS_MODULE);
}
-static inline void bnx2fc_interface_get(struct bnx2fc_hba *hba)
+static inline void bnx2fc_interface_get(struct bnx2fc_interface *interface)
{
- kref_get(&hba->kref);
+ kref_get(&interface->kref);
}
-static inline void bnx2fc_interface_put(struct bnx2fc_hba *hba)
+static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface)
{
- kref_put(&hba->kref, bnx2fc_interface_release);
+ kref_put(&interface->kref, bnx2fc_interface_release);
}
-static void bnx2fc_interface_destroy(struct bnx2fc_hba *hba)
+static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba)
{
+ /* Free the command manager */
+ if (hba->cmd_mgr) {
+ bnx2fc_cmd_mgr_free(hba->cmd_mgr);
+ hba->cmd_mgr = NULL;
+ }
+ kfree(hba->tgt_ofld_list);
bnx2fc_unbind_pcidev(hba);
kfree(hba);
}
/**
- * bnx2fc_interface_create - create a new fcoe instance
+ * bnx2fc_hba_create - create a new bnx2fc hba
*
* @cnic: pointer to cnic device
*
- * Creates a new FCoE instance on the given device which include allocating
- * hba structure, scsi_host and lport structures.
+ * Creates a new FCoE hba on the given device.
+ *
*/
-static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic)
+static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
{
struct bnx2fc_hba *hba;
int rc;
@@ -1198,65 +1187,83 @@ static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic)
hba->cnic = cnic;
rc = bnx2fc_bind_pcidev(hba);
- if (rc)
+ if (rc) {
+ printk(KERN_ERR PFX "create_adapter: bind error\n");
goto bind_err;
+ }
hba->phys_dev = cnic->netdev;
- /* will get overwritten after we do vlan discovery */
- hba->netdev = hba->phys_dev;
+ hba->next_conn_id = 0;
+
+ hba->tgt_ofld_list =
+ kzalloc(sizeof(struct bnx2fc_rport *) * BNX2FC_NUM_MAX_SESS,
+ GFP_KERNEL);
+ if (!hba->tgt_ofld_list) {
+ printk(KERN_ERR PFX "Unable to allocate tgt offload list\n");
+ goto tgtofld_err;
+ }
+
+ hba->num_ofld_sess = 0;
+
+ hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID,
+ BNX2FC_MAX_XID);
+ if (!hba->cmd_mgr) {
+ printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
+ goto cmgr_err;
+ }
init_waitqueue_head(&hba->shutdown_wait);
init_waitqueue_head(&hba->destroy_wait);
+ INIT_LIST_HEAD(&hba->vports);
return hba;
+
+cmgr_err:
+ kfree(hba->tgt_ofld_list);
+tgtofld_err:
+ bnx2fc_unbind_pcidev(hba);
bind_err:
- printk(KERN_ERR PFX "create_interface: bind error\n");
kfree(hba);
return NULL;
}
-static int bnx2fc_interface_setup(struct bnx2fc_hba *hba,
- enum fip_state fip_mode)
+struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
+ struct net_device *netdev,
+ enum fip_state fip_mode)
{
+ struct bnx2fc_interface *interface;
int rc = 0;
- struct net_device *netdev = hba->netdev;
- struct fcoe_ctlr *fip = &hba->ctlr;
+ interface = kzalloc(sizeof(*interface), GFP_KERNEL);
+ if (!interface) {
+ printk(KERN_ERR PFX "Unable to allocate interface structure\n");
+ return NULL;
+ }
dev_hold(netdev);
- kref_init(&hba->kref);
-
- hba->flags = 0;
+ kref_init(&interface->kref);
+ interface->hba = hba;
+ interface->netdev = netdev;
/* Initialize FIP */
- memset(fip, 0, sizeof(*fip));
- fcoe_ctlr_init(fip, fip_mode);
- hba->ctlr.send = bnx2fc_fip_send;
- hba->ctlr.update_mac = bnx2fc_update_src_mac;
- hba->ctlr.get_src_addr = bnx2fc_get_src_mac;
- set_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done);
-
- INIT_LIST_HEAD(&hba->vports);
- rc = bnx2fc_netdev_setup(hba);
- if (rc)
- goto setup_err;
+ fcoe_ctlr_init(&interface->ctlr, fip_mode);
+ interface->ctlr.send = bnx2fc_fip_send;
+ interface->ctlr.update_mac = bnx2fc_update_src_mac;
+ interface->ctlr.get_src_addr = bnx2fc_get_src_mac;
+ set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
- hba->next_conn_id = 0;
+ rc = bnx2fc_netdev_setup(interface);
+ if (!rc)
+ return interface;
- memset(hba->tgt_ofld_list, 0, sizeof(hba->tgt_ofld_list));
- hba->num_ofld_sess = 0;
-
- return 0;
-
-setup_err:
- fcoe_ctlr_destroy(&hba->ctlr);
+ fcoe_ctlr_destroy(&interface->ctlr);
dev_put(netdev);
- bnx2fc_interface_put(hba);
- return rc;
+ kfree(interface);
+ return NULL;
}
/**
* bnx2fc_if_create - Create FCoE instance on a given interface
*
- * @hba: FCoE interface to create a local port on
+ * @interface: FCoE interface to create a local port on
* @parent: Device pointer to be the parent in sysfs for the SCSI host
* @npiv: Indicates if the port is vport or not
*
@@ -1264,7 +1271,7 @@ setup_err:
*
* Returns: Allocated fc_lport or an error pointer
*/
-static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
+static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
struct device *parent, int npiv)
{
struct fc_lport *lport, *n_port;
@@ -1272,11 +1279,12 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
struct Scsi_Host *shost;
struct fc_vport *vport = dev_to_vport(parent);
struct bnx2fc_lport *blport;
+ struct bnx2fc_hba *hba;
int rc = 0;
blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
if (!blport) {
- BNX2FC_HBA_DBG(hba->ctlr.lp, "Unable to alloc bnx2fc_lport\n");
+ BNX2FC_HBA_DBG(interface->ctlr.lp, "Unable to alloc blport\n");
return NULL;
}
@@ -1293,7 +1301,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
shost = lport->host;
port = lport_priv(lport);
port->lport = lport;
- port->priv = hba;
+ port->priv = interface;
INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
/* Configure fcoe_port */
@@ -1317,7 +1325,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
rc = bnx2fc_shost_config(lport, parent);
if (rc) {
printk(KERN_ERR PFX "Couldnt configure shost for %s\n",
- hba->netdev->name);
+ interface->netdev->name);
goto lp_config_err;
}
@@ -1343,8 +1351,9 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
goto shost_err;
}
- bnx2fc_interface_get(hba);
+ bnx2fc_interface_get(interface);
+ hba = interface->hba;
spin_lock_bh(&hba->hba_lock);
blport->lport = lport;
list_add_tail(&blport->list, &hba->vports);
@@ -1361,21 +1370,19 @@ free_blport:
return NULL;
}
-static void bnx2fc_netdev_cleanup(struct bnx2fc_hba *hba)
+static void bnx2fc_netdev_cleanup(struct bnx2fc_interface *interface)
{
/* Dont listen for Ethernet packets anymore */
- __dev_remove_pack(&hba->fcoe_packet_type);
- __dev_remove_pack(&hba->fip_packet_type);
+ __dev_remove_pack(&interface->fcoe_packet_type);
+ __dev_remove_pack(&interface->fip_packet_type);
synchronize_net();
}
-static void bnx2fc_if_destroy(struct fc_lport *lport)
+static void bnx2fc_if_destroy(struct fc_lport *lport, struct bnx2fc_hba *hba)
{
struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_hba *hba = port->priv;
struct bnx2fc_lport *blport, *tmp;
- BNX2FC_HBA_DBG(hba->ctlr.lp, "ENTERED bnx2fc_if_destroy\n");
/* Stop the transmit retry timer */
del_timer_sync(&port->timer);
@@ -1409,8 +1416,6 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
/* Release Scsi_Host */
scsi_host_put(lport->host);
-
- bnx2fc_interface_put(hba);
}
/**
@@ -1425,46 +1430,31 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
*/
static int bnx2fc_destroy(struct net_device *netdev)
{
- struct bnx2fc_hba *hba = NULL;
- struct net_device *phys_dev;
+ struct bnx2fc_interface *interface = NULL;
+ struct bnx2fc_hba *hba;
+ struct fc_lport *lport;
int rc = 0;
rtnl_lock();
-
mutex_lock(&bnx2fc_dev_lock);
- /* obtain physical netdev */
- if (netdev->priv_flags & IFF_802_1Q_VLAN)
- phys_dev = vlan_dev_real_dev(netdev);
- else {
- printk(KERN_ERR PFX "Not a vlan device\n");
- rc = -ENODEV;
- goto netdev_err;
- }
- hba = bnx2fc_hba_lookup(phys_dev);
- if (!hba || !hba->ctlr.lp) {
+ interface = bnx2fc_interface_lookup(netdev);
+ if (!interface || !interface->ctlr.lp) {
rc = -ENODEV;
- printk(KERN_ERR PFX "bnx2fc_destroy: hba or lport not found\n");
- goto netdev_err;
- }
-
- if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
- printk(KERN_ERR PFX "bnx2fc_destroy: Create not called\n");
+ printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n");
goto netdev_err;
}
- bnx2fc_netdev_cleanup(hba);
-
- bnx2fc_stop(hba);
-
- bnx2fc_if_destroy(hba->ctlr.lp);
+ hba = interface->hba;
- destroy_workqueue(hba->timer_work_queue);
+ bnx2fc_netdev_cleanup(interface);
+ lport = interface->ctlr.lp;
+ bnx2fc_stop(interface);
+ list_del(&interface->list);
+ destroy_workqueue(interface->timer_work_queue);
+ bnx2fc_interface_put(interface);
+ bnx2fc_if_destroy(lport, hba);
- if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
- bnx2fc_fw_destroy(hba);
-
- clear_bit(BNX2FC_CREATE_DONE, &hba->init_done);
netdev_err:
mutex_unlock(&bnx2fc_dev_lock);
rtnl_unlock();
@@ -1475,16 +1465,20 @@ static void bnx2fc_destroy_work(struct work_struct *work)
{
struct fcoe_port *port;
struct fc_lport *lport;
+ struct bnx2fc_interface *interface;
+ struct bnx2fc_hba *hba;
port = container_of(work, struct fcoe_port, destroy_work);
lport = port->lport;
+ interface = port->priv;
+ hba = interface->hba;
BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
bnx2fc_port_shutdown(lport);
rtnl_lock();
mutex_lock(&bnx2fc_dev_lock);
- bnx2fc_if_destroy(lport);
+ bnx2fc_if_destroy(lport, hba);
mutex_unlock(&bnx2fc_dev_lock);
rtnl_unlock();
}
@@ -1556,28 +1550,27 @@ static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
static void bnx2fc_ulp_start(void *handle)
{
struct bnx2fc_hba *hba = handle;
- struct fc_lport *lport = hba->ctlr.lp;
+ struct bnx2fc_interface *interface;
+ struct fc_lport *lport;
- BNX2FC_MISC_DBG("Entered %s\n", __func__);
mutex_lock(&bnx2fc_dev_lock);
- if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
- goto start_disc;
-
- if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done))
+ if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags))
bnx2fc_fw_init(hba);
-start_disc:
- mutex_unlock(&bnx2fc_dev_lock);
-
BNX2FC_MISC_DBG("bnx2fc started.\n");
- /* Kick off Fabric discovery*/
- if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
- printk(KERN_ERR PFX "ulp_init: start discovery\n");
- lport->tt.frame_send = bnx2fc_xmit;
- bnx2fc_start_disc(hba);
+ list_for_each_entry(interface, &if_list, list) {
+ if (interface->hba == hba) {
+ lport = interface->ctlr.lp;
+ /* Kick off Fabric discovery*/
+ printk(KERN_ERR PFX "ulp_init: start discovery\n");
+ lport->tt.frame_send = bnx2fc_xmit;
+ bnx2fc_start_disc(interface);
+ }
}
+
+ mutex_unlock(&bnx2fc_dev_lock);
}
static void bnx2fc_port_shutdown(struct fc_lport *lport)
@@ -1587,37 +1580,25 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport)
fc_lport_destroy(lport);
}
-static void bnx2fc_stop(struct bnx2fc_hba *hba)
+static void bnx2fc_stop(struct bnx2fc_interface *interface)
{
struct fc_lport *lport;
struct fc_lport *vport;
- BNX2FC_MISC_DBG("ENTERED %s - init_done = %ld\n", __func__,
- hba->init_done);
- if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done) &&
- test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
- lport = hba->ctlr.lp;
- bnx2fc_port_shutdown(lport);
- BNX2FC_HBA_DBG(lport, "bnx2fc_stop: waiting for %d "
- "offloaded sessions\n",
- hba->num_ofld_sess);
- wait_event_interruptible(hba->shutdown_wait,
- (hba->num_ofld_sess == 0));
- mutex_lock(&lport->lp_mutex);
- list_for_each_entry(vport, &lport->vports, list)
- fc_host_port_type(vport->host) = FC_PORTTYPE_UNKNOWN;
- mutex_unlock(&lport->lp_mutex);
- fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
- fcoe_ctlr_link_down(&hba->ctlr);
- fcoe_clean_pending_queue(lport);
-
- mutex_lock(&hba->hba_mutex);
- clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
- clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+ if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags))
+ return;
- clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
- mutex_unlock(&hba->hba_mutex);
- }
+ lport = interface->ctlr.lp;
+ bnx2fc_port_shutdown(lport);
+
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vport, &lport->vports, list)
+ fc_host_port_type(vport->host) =
+ FC_PORTTYPE_UNKNOWN;
+ mutex_unlock(&lport->lp_mutex);
+ fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
+ fcoe_ctlr_link_down(&interface->ctlr);
+ fcoe_clean_pending_queue(lport);
}
static int bnx2fc_fw_init(struct bnx2fc_hba *hba)
@@ -1656,8 +1637,7 @@ static int bnx2fc_fw_init(struct bnx2fc_hba *hba)
}
- /* Mark HBA to indicate that the FW INIT is done */
- set_bit(BNX2FC_FW_INIT_DONE, &hba->init_done);
+ set_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags);
return 0;
err_unbind:
@@ -1668,7 +1648,7 @@ err_out:
static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
{
- if (test_and_clear_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ if (test_and_clear_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) {
if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) {
init_timer(&hba->destroy_timer);
hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT +
@@ -1677,8 +1657,8 @@ static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
hba->destroy_timer.data = (unsigned long)hba;
add_timer(&hba->destroy_timer);
wait_event_interruptible(hba->destroy_wait,
- (hba->flags &
- BNX2FC_FLAG_DESTROY_CMPL));
+ test_bit(BNX2FC_FLAG_DESTROY_CMPL,
+ &hba->flags));
/* This should never happen */
if (signal_pending(current))
flush_signals(current);
@@ -1699,40 +1679,57 @@ static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
*/
static void bnx2fc_ulp_stop(void *handle)
{
- struct bnx2fc_hba *hba = (struct bnx2fc_hba *)handle;
+ struct bnx2fc_hba *hba = handle;
+ struct bnx2fc_interface *interface;
printk(KERN_ERR "ULP_STOP\n");
mutex_lock(&bnx2fc_dev_lock);
- bnx2fc_stop(hba);
+ if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags))
+ goto exit;
+ list_for_each_entry(interface, &if_list, list) {
+ if (interface->hba == hba)
+ bnx2fc_stop(interface);
+ }
+ BUG_ON(hba->num_ofld_sess != 0);
+
+ mutex_lock(&hba->hba_mutex);
+ clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ clear_bit(ADAPTER_STATE_GOING_DOWN,
+ &hba->adapter_state);
+
+ clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ mutex_unlock(&hba->hba_mutex);
+
bnx2fc_fw_destroy(hba);
+exit:
mutex_unlock(&bnx2fc_dev_lock);
}
-static void bnx2fc_start_disc(struct bnx2fc_hba *hba)
+static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
{
struct fc_lport *lport;
int wait_cnt = 0;
BNX2FC_MISC_DBG("Entered %s\n", __func__);
/* Kick off FIP/FLOGI */
- if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) {
printk(KERN_ERR PFX "Init not done yet\n");
return;
}
- lport = hba->ctlr.lp;
+ lport = interface->ctlr.lp;
BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
if (!bnx2fc_link_ok(lport)) {
BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
- fcoe_ctlr_link_up(&hba->ctlr);
+ fcoe_ctlr_link_up(&interface->ctlr);
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
- set_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
}
/* wait for the FCF to be selected before issuing FLOGI */
- while (!hba->ctlr.sel_fcf) {
+ while (!interface->ctlr.sel_fcf) {
msleep(250);
/* give up after 3 secs */
if (++wait_cnt > 12)
@@ -1758,15 +1755,15 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
BNX2FC_MISC_DBG("Entered %s\n", __func__);
/* bnx2fc works only when bnx2x is loaded */
- if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+ if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) ||
+ (dev->max_fcoe_conn == 0)) {
printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s,"
- " flags: %lx\n",
- dev->netdev->name, dev->flags);
+ " flags: %lx fcoe_conn: %d\n",
+ dev->netdev->name, dev->flags, dev->max_fcoe_conn);
return;
}
- /* Configure FCoE interface */
- hba = bnx2fc_interface_create(dev);
+ hba = bnx2fc_hba_create(dev);
if (!hba) {
printk(KERN_ERR PFX "hba initialization failed\n");
return;
@@ -1774,7 +1771,7 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
/* Add HBA to the adapter list */
mutex_lock(&bnx2fc_dev_lock);
- list_add_tail(&hba->link, &adapter_list);
+ list_add_tail(&hba->list, &adapter_list);
adapter_count++;
mutex_unlock(&bnx2fc_dev_lock);
@@ -1782,7 +1779,7 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
rc = dev->register_device(dev, CNIC_ULP_FCOE,
(void *) hba);
if (rc)
- printk(KERN_ALERT PFX "register_device failed, rc = %d\n", rc);
+ printk(KERN_ERR PFX "register_device failed, rc = %d\n", rc);
else
set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
}
@@ -1790,52 +1787,21 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
static int bnx2fc_disable(struct net_device *netdev)
{
- struct bnx2fc_hba *hba;
- struct net_device *phys_dev;
- struct ethtool_drvinfo drvinfo;
+ struct bnx2fc_interface *interface;
int rc = 0;
rtnl_lock();
-
mutex_lock(&bnx2fc_dev_lock);
- /* obtain physical netdev */
- if (netdev->priv_flags & IFF_802_1Q_VLAN)
- phys_dev = vlan_dev_real_dev(netdev);
- else {
- printk(KERN_ERR PFX "Not a vlan device\n");
- rc = -ENODEV;
- goto nodev;
- }
-
- /* verify if the physical device is a netxtreme2 device */
- if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
- memset(&drvinfo, 0, sizeof(drvinfo));
- phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
- if (strcmp(drvinfo.driver, "bnx2x")) {
- printk(KERN_ERR PFX "Not a netxtreme2 device\n");
- rc = -ENODEV;
- goto nodev;
- }
- } else {
- printk(KERN_ERR PFX "unable to obtain drv_info\n");
- rc = -ENODEV;
- goto nodev;
- }
-
- printk(KERN_ERR PFX "phys_dev is netxtreme2 device\n");
-
- /* obtain hba and initialize rest of the structure */
- hba = bnx2fc_hba_lookup(phys_dev);
- if (!hba || !hba->ctlr.lp) {
+ interface = bnx2fc_interface_lookup(netdev);
+ if (!interface || !interface->ctlr.lp) {
rc = -ENODEV;
- printk(KERN_ERR PFX "bnx2fc_disable: hba or lport not found\n");
+ printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
} else {
- fcoe_ctlr_link_down(&hba->ctlr);
- fcoe_clean_pending_queue(hba->ctlr.lp);
+ fcoe_ctlr_link_down(&interface->ctlr);
+ fcoe_clean_pending_queue(interface->ctlr.lp);
}
-nodev:
mutex_unlock(&bnx2fc_dev_lock);
rtnl_unlock();
return rc;
@@ -1844,48 +1810,19 @@ nodev:
static int bnx2fc_enable(struct net_device *netdev)
{
- struct bnx2fc_hba *hba;
- struct net_device *phys_dev;
- struct ethtool_drvinfo drvinfo;
+ struct bnx2fc_interface *interface;
int rc = 0;
rtnl_lock();
-
- BNX2FC_MISC_DBG("Entered %s\n", __func__);
mutex_lock(&bnx2fc_dev_lock);
- /* obtain physical netdev */
- if (netdev->priv_flags & IFF_802_1Q_VLAN)
- phys_dev = vlan_dev_real_dev(netdev);
- else {
- printk(KERN_ERR PFX "Not a vlan device\n");
- rc = -ENODEV;
- goto nodev;
- }
- /* verify if the physical device is a netxtreme2 device */
- if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
- memset(&drvinfo, 0, sizeof(drvinfo));
- phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
- if (strcmp(drvinfo.driver, "bnx2x")) {
- printk(KERN_ERR PFX "Not a netxtreme2 device\n");
- rc = -ENODEV;
- goto nodev;
- }
- } else {
- printk(KERN_ERR PFX "unable to obtain drv_info\n");
+ interface = bnx2fc_interface_lookup(netdev);
+ if (!interface || !interface->ctlr.lp) {
rc = -ENODEV;
- goto nodev;
- }
-
- /* obtain hba and initialize rest of the structure */
- hba = bnx2fc_hba_lookup(phys_dev);
- if (!hba || !hba->ctlr.lp) {
- rc = -ENODEV;
- printk(KERN_ERR PFX "bnx2fc_enable: hba or lport not found\n");
- } else if (!bnx2fc_link_ok(hba->ctlr.lp))
- fcoe_ctlr_link_up(&hba->ctlr);
+ printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
+ } else if (!bnx2fc_link_ok(interface->ctlr.lp))
+ fcoe_ctlr_link_up(&interface->ctlr);
-nodev:
mutex_unlock(&bnx2fc_dev_lock);
rtnl_unlock();
return rc;
@@ -1903,6 +1840,7 @@ nodev:
*/
static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
{
+ struct bnx2fc_interface *interface;
struct bnx2fc_hba *hba;
struct net_device *phys_dev;
struct fc_lport *lport;
@@ -1938,7 +1876,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
memset(&drvinfo, 0, sizeof(drvinfo));
phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
- if (strcmp(drvinfo.driver, "bnx2x")) {
+ if (strncmp(drvinfo.driver, "bnx2x", strlen("bnx2x"))) {
printk(KERN_ERR PFX "Not a netxtreme2 device\n");
rc = -EINVAL;
goto netdev_err;
@@ -1949,7 +1887,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
goto netdev_err;
}
- /* obtain hba and initialize rest of the structure */
+ /* obtain interface and initialize rest of the structure */
hba = bnx2fc_hba_lookup(phys_dev);
if (!hba) {
rc = -ENODEV;
@@ -1957,67 +1895,61 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
goto netdev_err;
}
- if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
- rc = bnx2fc_fw_init(hba);
- if (rc)
- goto netdev_err;
- }
-
- if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ if (bnx2fc_interface_lookup(netdev)) {
rc = -EEXIST;
goto netdev_err;
}
- /* update netdev with vlan netdev */
- hba->netdev = netdev;
- hba->vlan_id = vlan_id;
- hba->vlan_enabled = 1;
-
- rc = bnx2fc_interface_setup(hba, fip_mode);
- if (rc) {
- printk(KERN_ERR PFX "bnx2fc_interface_setup failed\n");
+ interface = bnx2fc_interface_create(hba, netdev, fip_mode);
+ if (!interface) {
+ printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
goto ifput_err;
}
- hba->timer_work_queue =
+ interface->vlan_id = vlan_id;
+ interface->vlan_enabled = 1;
+
+ interface->timer_work_queue =
create_singlethread_workqueue("bnx2fc_timer_wq");
- if (!hba->timer_work_queue) {
+ if (!interface->timer_work_queue) {
printk(KERN_ERR PFX "ulp_init could not create timer_wq\n");
rc = -EINVAL;
goto ifput_err;
}
- lport = bnx2fc_if_create(hba, &hba->pcidev->dev, 0);
+ lport = bnx2fc_if_create(interface, &interface->hba->pcidev->dev, 0);
if (!lport) {
printk(KERN_ERR PFX "Failed to create interface (%s)\n",
netdev->name);
- bnx2fc_netdev_cleanup(hba);
+ bnx2fc_netdev_cleanup(interface);
rc = -EINVAL;
goto if_create_err;
}
+ /* Add interface to if_list */
+ list_add_tail(&interface->list, &if_list);
+
lport->boot_time = jiffies;
/* Make this master N_port */
- hba->ctlr.lp = lport;
+ interface->ctlr.lp = lport;
- set_bit(BNX2FC_CREATE_DONE, &hba->init_done);
- printk(KERN_ERR PFX "create: START DISC\n");
- bnx2fc_start_disc(hba);
+ BNX2FC_HBA_DBG(lport, "create: START DISC\n");
+ bnx2fc_start_disc(interface);
/*
* Release from kref_init in bnx2fc_interface_setup, on success
* lport should be holding a reference taken in bnx2fc_if_create
*/
- bnx2fc_interface_put(hba);
+ bnx2fc_interface_put(interface);
/* put netdev that was held while calling dev_get_by_name */
mutex_unlock(&bnx2fc_dev_lock);
rtnl_unlock();
return 0;
if_create_err:
- destroy_workqueue(hba->timer_work_queue);
+ destroy_workqueue(interface->timer_work_queue);
ifput_err:
- bnx2fc_interface_put(hba);
+ bnx2fc_interface_put(interface);
netdev_err:
module_put(THIS_MODULE);
mod_err:
@@ -2027,7 +1959,7 @@ mod_err:
}
/**
- * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc adapter instance
+ * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc hba instance
*
* @cnic: Pointer to cnic device instance
*
@@ -2047,19 +1979,30 @@ static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic)
return NULL;
}
-static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev)
+static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
+ *netdev)
+{
+ struct bnx2fc_interface *interface;
+
+ /* Called with bnx2fc_dev_lock held */
+ list_for_each_entry(interface, &if_list, list) {
+ if (interface->netdev == netdev)
+ return interface;
+ }
+ return NULL;
+}
+
+static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device
+ *phys_dev)
{
- struct list_head *list;
- struct list_head *temp;
struct bnx2fc_hba *hba;
/* Called with bnx2fc_dev_lock held */
- list_for_each_safe(list, temp, &adapter_list) {
- hba = (struct bnx2fc_hba *)list;
+ list_for_each_entry(hba, &adapter_list, list) {
if (hba->phys_dev == phys_dev)
return hba;
}
- printk(KERN_ERR PFX "hba_lookup: hba NULL\n");
+ printk(KERN_ERR PFX "adapter_lookup: hba NULL\n");
return NULL;
}
@@ -2071,6 +2014,8 @@ static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev)
static void bnx2fc_ulp_exit(struct cnic_dev *dev)
{
struct bnx2fc_hba *hba;
+ struct bnx2fc_interface *interface, *tmp;
+ struct fc_lport *lport;
BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n");
@@ -2089,13 +2034,20 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
return;
}
- list_del_init(&hba->link);
+ list_del_init(&hba->list);
adapter_count--;
- if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ list_for_each_entry_safe(interface, tmp, &if_list, list) {
/* destroy not called yet, move to quiesced list */
- bnx2fc_netdev_cleanup(hba);
- bnx2fc_if_destroy(hba->ctlr.lp);
+ if (interface->hba == hba) {
+ bnx2fc_netdev_cleanup(interface);
+ bnx2fc_stop(interface);
+
+ list_del(&interface->list);
+ lport = interface->ctlr.lp;
+ bnx2fc_interface_put(interface);
+ bnx2fc_if_destroy(lport, hba);
+ }
}
mutex_unlock(&bnx2fc_dev_lock);
@@ -2103,7 +2055,7 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
/* unregister cnic device */
if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
- bnx2fc_interface_destroy(hba);
+ bnx2fc_hba_destroy(hba);
}
/**
@@ -2259,6 +2211,7 @@ static int __init bnx2fc_mod_init(void)
}
INIT_LIST_HEAD(&adapter_list);
+ INIT_LIST_HEAD(&if_list);
mutex_init(&bnx2fc_dev_lock);
adapter_count = 0;
@@ -2336,16 +2289,17 @@ static void __exit bnx2fc_mod_exit(void)
mutex_unlock(&bnx2fc_dev_lock);
/* Unregister with cnic */
- list_for_each_entry_safe(hba, next, &to_be_deleted, link) {
- list_del_init(&hba->link);
- printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p, kref = %d\n",
- hba, atomic_read(&hba->kref.refcount));
+ list_for_each_entry_safe(hba, next, &to_be_deleted, list) {
+ list_del_init(&hba->list);
+ printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p\n",
+ hba);
bnx2fc_ulp_stop(hba);
/* unregister cnic device */
if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED,
&hba->reg_with_cnic))
- hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
- bnx2fc_interface_destroy(hba);
+ hba->cnic->unregister_device(hba->cnic,
+ CNIC_ULP_FCOE);
+ bnx2fc_hba_destroy(hba);
}
cnic_unregister_driver(CNIC_ULP_FCOE);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 09bdd9b..72cfb14 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -2,7 +2,7 @@
* This file contains the code that low level functions that interact
* with 57712 FCoE firmware.
*
- * Copyright (c) 2008 - 2010 Broadcom Corporation
+ * Copyright (c) 2008 - 2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -23,7 +23,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
struct fcoe_kcqe *ofld_kcqe);
static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
- struct fcoe_kcqe *conn_destroy);
+ struct fcoe_kcqe *destroy_kcqe);
int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
{
@@ -67,7 +67,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
int rc = 0;
if (!hba->cnic) {
- printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n");
+ printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n");
return -ENODEV;
}
@@ -103,6 +103,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
+
fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
((u64) hba->hash_tbl_pbl_dma >> 32);
@@ -165,7 +166,8 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
struct bnx2fc_rport *tgt)
{
struct fc_lport *lport = port->lport;
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct kwqe *kwqe_arr[4];
struct fcoe_kwqe_conn_offload1 ofld_req1;
struct fcoe_kwqe_conn_offload2 ofld_req2;
@@ -227,7 +229,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
ofld_req3.hdr.flags =
(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
- ofld_req3.vlan_tag = hba->vlan_id <<
+ ofld_req3.vlan_tag = interface->vlan_id <<
FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
@@ -277,8 +279,20 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
+ /*
+ * Info from PRLI response, this info is used for sequence level error
+ * recovery support
+ */
+ if (tgt->dev_type == TYPE_TAPE) {
+ ofld_req3.flags |= 1 <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT;
+ ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED)
+ ? 1 : 0) <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT);
+ }
+
/* vlan flag */
- ofld_req3.flags |= (hba->vlan_enabled <<
+ ofld_req3.flags |= (interface->vlan_enabled <<
FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
/* C2_VALID and ACK flags are not set as they are not suppported */
@@ -300,12 +314,13 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
- ofld_req4.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */
- ofld_req4.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4];
- ofld_req4.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3];
- ofld_req4.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2];
- ofld_req4.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1];
- ofld_req4.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0];
+ ofld_req4.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
+ /* fcf mac */
+ ofld_req4.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
+ ofld_req4.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
+ ofld_req4.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
+ ofld_req4.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
+ ofld_req4.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
@@ -335,7 +350,8 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
struct bnx2fc_rport *tgt)
{
struct kwqe *kwqe_arr[2];
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct fcoe_kwqe_conn_enable_disable enbl_req;
struct fc_lport *lport = port->lport;
struct fc_rport *rport = tgt->rport;
@@ -358,12 +374,12 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
- enbl_req.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */
- enbl_req.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4];
- enbl_req.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3];
- enbl_req.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2];
- enbl_req.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1];
- enbl_req.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0];
+ enbl_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
+ enbl_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
+ enbl_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
+ enbl_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
+ enbl_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
+ enbl_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
port_id = fc_host_port_id(lport->host);
if (port_id != tgt->sid) {
@@ -379,10 +395,10 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
enbl_req.d_id[0] = (port_id & 0x000000FF);
enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
- enbl_req.vlan_tag = hba->vlan_id <<
+ enbl_req.vlan_tag = interface->vlan_id <<
FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
- enbl_req.vlan_flag = hba->vlan_enabled;
+ enbl_req.vlan_flag = interface->vlan_enabled;
enbl_req.context_id = tgt->context_id;
enbl_req.conn_id = tgt->fcoe_conn_id;
@@ -402,7 +418,8 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
int bnx2fc_send_session_disable_req(struct fcoe_port *port,
struct bnx2fc_rport *tgt)
{
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct fcoe_kwqe_conn_enable_disable disable_req;
struct kwqe *kwqe_arr[2];
struct fc_rport *rport = tgt->rport;
@@ -423,12 +440,12 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
- disable_req.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */
- disable_req.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4];
- disable_req.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3];
- disable_req.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2];
- disable_req.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1];
- disable_req.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0];
+ disable_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
+ disable_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
+ disable_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
+ disable_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
+ disable_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
+ disable_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
port_id = tgt->sid;
disable_req.s_id[0] = (port_id & 0x000000FF);
@@ -442,11 +459,11 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
disable_req.context_id = tgt->context_id;
disable_req.conn_id = tgt->fcoe_conn_id;
- disable_req.vlan_tag = hba->vlan_id <<
+ disable_req.vlan_tag = interface->vlan_id <<
FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
disable_req.vlan_tag |=
3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
- disable_req.vlan_flag = hba->vlan_enabled;
+ disable_req.vlan_flag = interface->vlan_enabled;
kwqe_arr[0] = (struct kwqe *) &disable_req;
@@ -525,7 +542,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
{
struct fcoe_port *port = tgt->port;
struct fc_lport *lport = port->lport;
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
struct bnx2fc_unsol_els *unsol_els;
struct fc_frame_header *fh;
struct fc_frame *fp;
@@ -586,7 +603,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
fr_eof(fp) = FC_EOF_T;
fr_crc(fp) = cpu_to_le32(~crc);
unsol_els->lport = lport;
- unsol_els->hba = hba;
+ unsol_els->hba = interface->hba;
unsol_els->fp = fp;
INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
@@ -608,9 +625,12 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
u32 frame_len, len;
struct bnx2fc_cmd *io_req = NULL;
struct fcoe_task_ctx_entry *task, *task_page;
- struct bnx2fc_hba *hba = tgt->port->priv;
+ struct bnx2fc_interface *interface = tgt->port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
int task_idx, index;
int rc = 0;
+ u64 err_warn_bit_map;
+ u8 err_warn = 0xff;
BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
@@ -673,39 +693,43 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
- bnx2fc_return_rqe(tgt, 1);
if (xid > BNX2FC_MAX_XID) {
BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
xid);
- spin_unlock_bh(&tgt->tgt_lock);
- break;
+ goto ret_err_rqe;
}
task_idx = xid / BNX2FC_TASKS_PER_PAGE;
index = xid % BNX2FC_TASKS_PER_PAGE;
task_page = (struct fcoe_task_ctx_entry *)
- hba->task_ctx[task_idx];
+ hba->task_ctx[task_idx];
task = &(task_page[index]);
io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
- if (!io_req) {
- spin_unlock_bh(&tgt->tgt_lock);
- break;
- }
+ if (!io_req)
+ goto ret_err_rqe;
if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
- spin_unlock_bh(&tgt->tgt_lock);
- break;
+ goto ret_err_rqe;
}
if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
&io_req->req_flags)) {
BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
"progress.. ignore unsol err\n");
- spin_unlock_bh(&tgt->tgt_lock);
- break;
+ goto ret_err_rqe;
+ }
+
+ err_warn_bit_map = (u64)
+ ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
+ (u64)err_entry->data.err_warn_bitmap_lo;
+ for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
+ if (err_warn_bit_map & (u64)((u64)1 << i)) {
+ err_warn = i;
+ break;
+ }
}
/*
@@ -715,26 +739,61 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
* logging out the target, when the ABTS eventually
* times out.
*/
- if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
- &io_req->req_flags)) {
- /*
- * Cancel the timeout_work, as we received IO
- * completion with FW error.
- */
- if (cancel_delayed_work(&io_req->timeout_work))
- kref_put(&io_req->refcount,
- bnx2fc_cmd_release); /* timer hold */
-
- rc = bnx2fc_initiate_abts(io_req);
- if (rc != SUCCESS) {
- BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts "
- "failed. issue cleanup\n");
- rc = bnx2fc_initiate_cleanup(io_req);
- BUG_ON(rc);
- }
- } else
+ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
"in ABTS processing\n", xid);
+ goto ret_err_rqe;
+ }
+ BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn);
+ if (tgt->dev_type != TYPE_TAPE)
+ goto skip_rec;
+ switch (err_warn) {
+ case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION:
+ case FCOE_ERROR_CODE_DATA_OOO_RO:
+ case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT:
+ case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET:
+ case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ:
+ case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
+ BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
+ xid);
+ memset(&io_req->err_entry, 0,
+ sizeof(struct fcoe_err_report_entry));
+ memcpy(&io_req->err_entry, err_entry,
+ sizeof(struct fcoe_err_report_entry));
+ if (!test_bit(BNX2FC_FLAG_SRR_SENT,
+ &io_req->req_flags)) {
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = bnx2fc_send_rec(io_req);
+ spin_lock_bh(&tgt->tgt_lock);
+
+ if (rc)
+ goto skip_rec;
+ } else
+ printk(KERN_ERR PFX "SRR in progress\n");
+ goto ret_err_rqe;
+ break;
+ default:
+ break;
+ }
+
+skip_rec:
+ set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags);
+ /*
+ * Cancel the timeout_work, as we received IO
+ * completion with FW error.
+ */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+
+ rc = bnx2fc_initiate_abts(io_req);
+ if (rc != SUCCESS) {
+ printk(KERN_ERR PFX "err_warn: initiate_abts "
+ "failed xid = 0x%x. issue cleanup\n",
+ io_req->xid);
+ bnx2fc_initiate_cleanup(io_req);
+ }
+ret_err_rqe:
+ bnx2fc_return_rqe(tgt, 1);
spin_unlock_bh(&tgt->tgt_lock);
break;
@@ -755,6 +814,47 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
+ if (xid > BNX2FC_MAX_XID) {
+ BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
+ goto ret_warn_rqe;
+ }
+
+ err_warn_bit_map = (u64)
+ ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
+ (u64)err_entry->data.err_warn_bitmap_lo;
+ for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
+ if (err_warn_bit_map & (u64) (1 << i)) {
+ err_warn = i;
+ break;
+ }
+ }
+ BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
+
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
+ if (!io_req)
+ goto ret_warn_rqe;
+
+ if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
+ printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
+ goto ret_warn_rqe;
+ }
+
+ memset(&io_req->err_entry, 0,
+ sizeof(struct fcoe_err_report_entry));
+ memcpy(&io_req->err_entry, err_entry,
+ sizeof(struct fcoe_err_report_entry));
+
+ if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION)
+ /* REC_TOV is not a warning code */
+ BUG_ON(1);
+ else
+ BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n");
+ret_warn_rqe:
bnx2fc_return_rqe(tgt, 1);
spin_unlock_bh(&tgt->tgt_lock);
break;
@@ -770,7 +870,8 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
struct fcoe_task_ctx_entry *task;
struct fcoe_task_ctx_entry *task_page;
struct fcoe_port *port = tgt->port;
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct bnx2fc_cmd *io_req;
int task_idx, index;
u16 xid;
@@ -781,7 +882,7 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
spin_lock_bh(&tgt->tgt_lock);
xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
if (xid >= BNX2FC_MAX_TASKS) {
- printk(KERN_ALERT PFX "ERROR:xid out of range\n");
+ printk(KERN_ERR PFX "ERROR:xid out of range\n");
spin_unlock_bh(&tgt->tgt_lock);
return;
}
@@ -861,6 +962,13 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
kref_put(&io_req->refcount, bnx2fc_cmd_release);
break;
+ case BNX2FC_SEQ_CLEANUP:
+ BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
+ io_req->xid);
+ bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ break;
+
default:
printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
break;
@@ -962,8 +1070,10 @@ unlock:
1 - tgt->cq_curr_toggle_bit;
}
}
- bnx2fc_arm_cq(tgt);
- atomic_add(num_free_sqes, &tgt->free_sqes);
+ if (num_free_sqes) {
+ bnx2fc_arm_cq(tgt);
+ atomic_add(num_free_sqes, &tgt->free_sqes);
+ }
spin_unlock_bh(&tgt->cq_lock);
return 0;
}
@@ -983,7 +1093,7 @@ static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
if (!tgt) {
- printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id);
+ printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id);
return;
}
@@ -1004,6 +1114,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
{
struct bnx2fc_rport *tgt;
struct fcoe_port *port;
+ struct bnx2fc_interface *interface;
u32 conn_id;
u32 context_id;
int rc;
@@ -1018,8 +1129,9 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
ofld_kcqe->fcoe_conn_context_id);
port = tgt->port;
- if (hba != tgt->port->priv) {
- printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n");
+ interface = tgt->port->priv;
+ if (hba != interface->hba) {
+ printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
goto ofld_cmpl_err;
}
/*
@@ -1040,7 +1152,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
/* now enable the session */
rc = bnx2fc_send_session_enable_req(port, tgt);
if (rc) {
- printk(KERN_ALERT PFX "enable session failed\n");
+ printk(KERN_ERR PFX "enable session failed\n");
goto ofld_cmpl_err;
}
}
@@ -1063,6 +1175,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
struct fcoe_kcqe *ofld_kcqe)
{
struct bnx2fc_rport *tgt;
+ struct bnx2fc_interface *interface;
u32 conn_id;
u32 context_id;
@@ -1070,7 +1183,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
conn_id = ofld_kcqe->fcoe_conn_id;
tgt = hba->tgt_ofld_list[conn_id];
if (!tgt) {
- printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n");
+ printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n");
return;
}
@@ -1082,16 +1195,17 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
* and enable
*/
if (tgt->context_id != context_id) {
- printk(KERN_ALERT PFX "context id mis-match\n");
+ printk(KERN_ERR PFX "context id mis-match\n");
return;
}
- if (hba != tgt->port->priv) {
- printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
+ interface = tgt->port->priv;
+ if (hba != interface->hba) {
+ printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
goto enbl_cmpl_err;
}
- if (ofld_kcqe->completion_status) {
+ if (ofld_kcqe->completion_status)
goto enbl_cmpl_err;
- } else {
+ else {
/* enable successful - rport ready for issuing IOs */
set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
@@ -1114,14 +1228,14 @@ static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
conn_id = disable_kcqe->fcoe_conn_id;
tgt = hba->tgt_ofld_list[conn_id];
if (!tgt) {
- printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n");
+ printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n");
return;
}
BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
if (disable_kcqe->completion_status) {
- printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n",
+ printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
disable_kcqe->completion_status);
return;
} else {
@@ -1143,14 +1257,14 @@ static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
conn_id = destroy_kcqe->fcoe_conn_id;
tgt = hba->tgt_ofld_list[conn_id];
if (!tgt) {
- printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n");
+ printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n");
return;
}
BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
if (destroy_kcqe->completion_status) {
- printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n",
+ printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n",
destroy_kcqe->completion_status);
return;
} else {
@@ -1182,6 +1296,7 @@ static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
break;
case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
+ break;
default:
printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
}
@@ -1240,7 +1355,7 @@ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
} else {
printk(KERN_ERR PFX "DESTROY success\n");
}
- hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
+ set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
wake_up_interruptible(&hba->destroy_wait);
break;
@@ -1262,7 +1377,7 @@ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
case FCOE_KCQE_OPCODE_FCOE_ERROR:
/* fall thru */
default:
- printk(KERN_ALERT PFX "unknown opcode 0x%x\n",
+ printk(KERN_ERR PFX "unknown opcode 0x%x\n",
kcqe->op_code);
}
}
@@ -1305,7 +1420,8 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
struct fcoe_port *port = tgt->port;
u32 reg_off;
resource_size_t reg_base;
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
reg_base = pci_resource_start(hba->pcidev,
BNX2X_DOORBELL_PCI_BAR);
@@ -1344,6 +1460,96 @@ void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
tgt->conn_db->rq_prod = tgt->rq_prod_idx;
}
+void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
+ struct fcoe_task_ctx_entry *task,
+ struct bnx2fc_cmd *orig_io_req,
+ u32 offset)
+{
+ struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
+ struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
+ struct bnx2fc_interface *interface = tgt->port->priv;
+ struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
+ struct fcoe_task_ctx_entry *orig_task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct fcoe_ext_mul_sges_ctx *sgl;
+ u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
+ u8 orig_task_type;
+ u16 orig_xid = orig_io_req->xid;
+ u32 context_id = tgt->context_id;
+ u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
+ u32 orig_offset = offset;
+ int bd_count;
+ int orig_task_idx, index;
+ int i;
+
+ memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
+ orig_task_type = FCOE_TASK_TYPE_WRITE;
+ else
+ orig_task_type = FCOE_TASK_TYPE_READ;
+
+ /* Tx flags */
+ task->txwr_rxrd.const_ctx.tx_flags =
+ FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
+ /* init flags */
+ task->txwr_rxrd.const_ctx.init_flags = task_type <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
+ task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
+ task->rxwr_txrd.const_ctx.init_flags = context_id <<
+ FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
+ task->rxwr_txrd.const_ctx.init_flags = context_id <<
+ FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
+
+ task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
+
+ task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
+ task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
+
+ bd_count = orig_io_req->bd_tbl->bd_valid;
+
+ /* obtain the appropriate bd entry from relative offset */
+ for (i = 0; i < bd_count; i++) {
+ if (offset < bd[i].buf_len)
+ break;
+ offset -= bd[i].buf_len;
+ }
+ phys_addr += (i * sizeof(struct fcoe_bd_ctx));
+
+ if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
+ (u32)phys_addr;
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
+ (u32)((u64)phys_addr >> 32);
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
+ bd_count;
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
+ offset; /* adjusted offset */
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
+ } else {
+ orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
+ index = orig_xid % BNX2FC_TASKS_PER_PAGE;
+
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[orig_task_idx];
+ orig_task = &(task_page[index]);
+
+ /* Multiple SGEs were used for this IO */
+ sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
+ sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
+ sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
+ sgl->mul_sgl.sgl_size = bd_count;
+ sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
+ sgl->mul_sgl.cur_sge_idx = i;
+
+ memset(&task->rxwr_only.rx_seq_ctx, 0,
+ sizeof(struct fcoe_rx_seq_ctx));
+ task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
+ task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
+ }
+}
void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
u16 orig_xid)
@@ -1360,7 +1566,12 @@ void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
- task->txwr_rxrd.const_ctx.init_flags |=
+ if (tgt->dev_type == TYPE_TAPE)
+ task->txwr_rxrd.const_ctx.init_flags |=
+ FCOE_TASK_DEV_TYPE_TAPE <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+ else
+ task->txwr_rxrd.const_ctx.init_flags |=
FCOE_TASK_DEV_TYPE_DISK <<
FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
@@ -1420,7 +1631,12 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
/* init flags */
task->txwr_rxrd.const_ctx.init_flags = task_type <<
FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
- task->txwr_rxrd.const_ctx.init_flags |=
+ if (tgt->dev_type == TYPE_TAPE)
+ task->txwr_rxrd.const_ctx.init_flags |=
+ FCOE_TASK_DEV_TYPE_TAPE <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+ else
+ task->txwr_rxrd.const_ctx.init_flags |=
FCOE_TASK_DEV_TYPE_DISK <<
FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
@@ -1477,6 +1693,7 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
struct bnx2fc_rport *tgt = io_req->tgt;
struct fcoe_cached_sge_ctx *cached_sge;
struct fcoe_ext_mul_sges_ctx *sgl;
+ int dev_type = tgt->dev_type;
u64 *fcp_cmnd;
u64 tmp_fcp_cmnd[4];
u32 context_id;
@@ -1494,20 +1711,40 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
task_type = FCOE_TASK_TYPE_READ;
/* Tx only */
+ bd_count = bd_tbl->bd_valid;
if (task_type == FCOE_TASK_TYPE_WRITE) {
- task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
- (u32)bd_tbl->bd_tbl_dma;
- task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
- (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
- task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
- bd_tbl->bd_valid;
+ if ((dev_type == TYPE_DISK) && (bd_count == 1)) {
+ struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
+
+ task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo =
+ fcoe_bd_tbl->buf_addr_lo;
+ task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi =
+ fcoe_bd_tbl->buf_addr_hi;
+ task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem =
+ fcoe_bd_tbl->buf_len;
+
+ task->txwr_rxrd.const_ctx.init_flags |= 1 <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
+ } else {
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
+ (u32)bd_tbl->bd_tbl_dma;
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
+ (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
+ bd_tbl->bd_valid;
+ }
}
/*Tx Write Rx Read */
/* Init state to NORMAL */
- task->txwr_rxrd.const_ctx.init_flags = task_type <<
+ task->txwr_rxrd.const_ctx.init_flags |= task_type <<
FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
- task->txwr_rxrd.const_ctx.init_flags |=
+ if (dev_type == TYPE_TAPE)
+ task->txwr_rxrd.const_ctx.init_flags |=
+ FCOE_TASK_DEV_TYPE_TAPE <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+ else
+ task->txwr_rxrd.const_ctx.init_flags |=
FCOE_TASK_DEV_TYPE_DISK <<
FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
@@ -1550,7 +1787,8 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
bd_count = bd_tbl->bd_valid;
- if (task_type == FCOE_TASK_TYPE_READ) {
+ if (task_type == FCOE_TASK_TYPE_READ &&
+ dev_type == TYPE_DISK) {
if (bd_count == 1) {
struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
@@ -1582,6 +1820,11 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
(u32)((u64)bd_tbl->bd_tbl_dma >> 32);
sgl->mul_sgl.sgl_size = bd_count;
}
+ } else {
+ sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
+ sgl->mul_sgl.cur_sge_addr.hi =
+ (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
+ sgl->mul_sgl.sgl_size = bd_count;
}
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 45eba6d..6cc3789 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1,7 +1,7 @@
/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
* IO manager and SCSI IO processing.
*
- * Copyright (c) 2008 - 2010 Broadcom Corporation
+ * Copyright (c) 2008 - 2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -18,8 +18,6 @@ static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
int bd_index);
static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
-static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
- struct bnx2fc_cmd *io_req);
static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
@@ -29,10 +27,11 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
unsigned int timer_msec)
{
- struct bnx2fc_hba *hba = io_req->port->priv;
+ struct bnx2fc_interface *interface = io_req->port->priv;
- if (queue_delayed_work(hba->timer_work_queue, &io_req->timeout_work,
- msecs_to_jiffies(timer_msec)))
+ if (queue_delayed_work(interface->timer_work_queue,
+ &io_req->timeout_work,
+ msecs_to_jiffies(timer_msec)))
kref_get(&io_req->refcount);
}
@@ -217,6 +216,11 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
return;
BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
+ if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) {
+ /* Do not call scsi done for this IO */
+ return;
+ }
+
bnx2fc_unmap_sg_list(io_req);
io_req->sc_cmd = NULL;
if (!sc_cmd) {
@@ -419,8 +423,8 @@ free_cmgr:
struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
{
struct fcoe_port *port = tgt->port;
- struct bnx2fc_hba *hba = port->priv;
- struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
struct bnx2fc_cmd *io_req;
struct list_head *listp;
struct io_bdt *bd_tbl;
@@ -485,11 +489,12 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
kref_init(&io_req->refcount);
return io_req;
}
-static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
+
+struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
{
struct fcoe_port *port = tgt->port;
- struct bnx2fc_hba *hba = port->priv;
- struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
struct bnx2fc_cmd *io_req;
struct list_head *listp;
struct io_bdt *bd_tbl;
@@ -570,7 +575,8 @@ void bnx2fc_cmd_release(struct kref *ref)
static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
{
struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
- struct bnx2fc_hba *hba = io_req->port->priv;
+ struct bnx2fc_interface *interface = io_req->port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
size_t sz = sizeof(struct fcoe_bd_ctx);
/* clear tm flags */
@@ -606,7 +612,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
struct bnx2fc_mp_req *mp_req;
struct fcoe_bd_ctx *mp_req_bd;
struct fcoe_bd_ctx *mp_resp_bd;
- struct bnx2fc_hba *hba = io_req->port->priv;
+ struct bnx2fc_interface *interface = io_req->port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
dma_addr_t addr;
size_t sz;
@@ -682,7 +689,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
struct fc_rport_libfc_priv *rp = rport->dd_data;
struct fcoe_port *port;
- struct bnx2fc_hba *hba;
+ struct bnx2fc_interface *interface;
struct bnx2fc_rport *tgt;
struct bnx2fc_cmd *io_req;
struct bnx2fc_mp_req *tm_req;
@@ -699,10 +706,10 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
lport = shost_priv(host);
port = lport_priv(lport);
- hba = port->priv;
+ interface = port->priv;
if (rport == NULL) {
- printk(KERN_ALERT PFX "device_reset: rport is NULL\n");
+ printk(KERN_ERR PFX "device_reset: rport is NULL\n");
rc = FAILED;
goto tmf_err;
}
@@ -745,7 +752,9 @@ retry_tmf:
rc = bnx2fc_init_mp_req(io_req);
if (rc == FAILED) {
printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
+ spin_lock_bh(&tgt->tgt_lock);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
goto tmf_err;
}
@@ -774,7 +783,8 @@ retry_tmf:
index = xid % BNX2FC_TASKS_PER_PAGE;
/* Initialize task context for this IO request */
- task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[task_idx];
task = &(task_page[index]);
bnx2fc_init_mp_task(io_req, task);
@@ -806,10 +816,10 @@ retry_tmf:
spin_unlock_bh(&tgt->tgt_lock);
if (!rc) {
- printk(KERN_ERR PFX "task mgmt command failed...\n");
+ BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n");
rc = FAILED;
} else {
- printk(KERN_ERR PFX "task mgmt command success...\n");
+ BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n");
rc = SUCCESS;
}
tmf_err:
@@ -822,7 +832,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
struct bnx2fc_rport *tgt = io_req->tgt;
struct fc_rport *rport = tgt->rport;
struct fc_rport_priv *rdata = tgt->rdata;
- struct bnx2fc_hba *hba;
+ struct bnx2fc_interface *interface;
struct fcoe_port *port;
struct bnx2fc_cmd *abts_io_req;
struct fcoe_task_ctx_entry *task;
@@ -839,7 +849,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
port = io_req->port;
- hba = port->priv;
+ interface = port->priv;
lport = port->lport;
if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
@@ -849,7 +859,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
}
if (rport == NULL) {
- printk(KERN_ALERT PFX "initiate_abts: rport is NULL\n");
+ printk(KERN_ERR PFX "initiate_abts: rport is NULL\n");
rc = FAILED;
goto abts_err;
}
@@ -896,7 +906,8 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
index = xid % BNX2FC_TASKS_PER_PAGE;
/* Initialize task context for this IO request */
- task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[task_idx];
task = &(task_page[index]);
bnx2fc_init_mp_task(abts_io_req, task);
@@ -924,11 +935,81 @@ abts_err:
return rc;
}
+int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
+ enum fc_rctl r_ctl)
+{
+ struct fc_lport *lport;
+ struct bnx2fc_rport *tgt = orig_io_req->tgt;
+ struct bnx2fc_interface *interface;
+ struct fcoe_port *port;
+ struct bnx2fc_cmd *seq_clnp_req;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct bnx2fc_els_cb_arg *cb_arg = NULL;
+ int task_idx, index;
+ u16 xid;
+ int rc = 0;
+
+ BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n",
+ orig_io_req->xid);
+ kref_get(&orig_io_req->refcount);
+
+ port = orig_io_req->port;
+ interface = port->priv;
+ lport = port->lport;
+
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n");
+ rc = -ENOMEM;
+ goto cleanup_err;
+ }
+
+ seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
+ if (!seq_clnp_req) {
+ printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
+ rc = -ENOMEM;
+ kfree(cb_arg);
+ goto cleanup_err;
+ }
+ /* Initialize rest of io_req fields */
+ seq_clnp_req->sc_cmd = NULL;
+ seq_clnp_req->port = port;
+ seq_clnp_req->tgt = tgt;
+ seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */
+
+ xid = seq_clnp_req->xid;
+
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ cb_arg->aborted_io_req = orig_io_req;
+ cb_arg->io_req = seq_clnp_req;
+ cb_arg->r_ctl = r_ctl;
+ cb_arg->offset = offset;
+ seq_clnp_req->cb_arg = cb_arg;
+
+ printk(KERN_ERR PFX "call init_seq_cleanup_task\n");
+ bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset);
+
+ /* Obtain free SQ entry */
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+cleanup_err:
+ return rc;
+}
+
int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
{
struct fc_lport *lport;
struct bnx2fc_rport *tgt = io_req->tgt;
- struct bnx2fc_hba *hba;
+ struct bnx2fc_interface *interface;
struct fcoe_port *port;
struct bnx2fc_cmd *cleanup_io_req;
struct fcoe_task_ctx_entry *task;
@@ -941,7 +1022,7 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
port = io_req->port;
- hba = port->priv;
+ interface = port->priv;
lport = port->lport;
cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
@@ -963,7 +1044,8 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
index = xid % BNX2FC_TASKS_PER_PAGE;
/* Initialize task context for this IO request */
- task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[task_idx];
task = &(task_page[index]);
orig_xid = io_req->xid;
@@ -1031,7 +1113,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
lport = shost_priv(sc_cmd->device->host);
if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
- printk(KERN_ALERT PFX "eh_abort: link not ready\n");
+ printk(KERN_ERR PFX "eh_abort: link not ready\n");
return rc;
}
@@ -1062,7 +1144,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
* io_req is no longer in the active_q.
*/
if (tgt->flush_in_prog) {
- printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+ printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
"flush in progress\n", io_req->xid);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
@@ -1070,7 +1152,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
}
if (io_req->on_active_queue == 0) {
- printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+ printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
"not on active_q\n", io_req->xid);
/*
* This condition can happen only due to the FW bug,
@@ -1108,7 +1190,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
rc = bnx2fc_initiate_abts(io_req);
} else {
- printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+ printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
"already in abts processing\n", io_req->xid);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
@@ -1149,6 +1231,42 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
return rc;
}
+void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 rx_state)
+{
+ struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg;
+ struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req;
+ u32 offset = cb_arg->offset;
+ enum fc_rctl r_ctl = cb_arg->r_ctl;
+ int rc = 0;
+ struct bnx2fc_rport *tgt = orig_io_req->tgt;
+
+ BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x"
+ "cmd_type = %d\n",
+ seq_clnp_req->xid, seq_clnp_req->cmd_type);
+
+ if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) {
+ printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n",
+ seq_clnp_req->xid);
+ goto free_cb_arg;
+ }
+ kref_get(&orig_io_req->refcount);
+
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
+ spin_lock_bh(&tgt->tgt_lock);
+
+ if (rc)
+ printk(KERN_ERR PFX "clnup_compl: Unable to send SRR"
+ " IO will abort\n");
+ seq_clnp_req->cb_arg = NULL;
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+free_cb_arg:
+ kfree(cb_arg);
+ return;
+}
+
void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
u8 num_rq)
@@ -1378,7 +1496,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
fc_hdr->fh_r_ctl);
}
if (!sc_cmd->SCp.ptr) {
- printk(KERN_ALERT PFX "tm_compl: SCp.ptr is NULL\n");
+ printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n");
return;
}
switch (io_req->fcp_status) {
@@ -1410,7 +1528,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
io_req->on_tmf_queue = 0;
} else {
- printk(KERN_ALERT PFX "Command not on active_cmd_queue!\n");
+ printk(KERN_ERR PFX "Command not on active_cmd_queue!\n");
return;
}
@@ -1597,7 +1715,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
/* Invalid sense sense length. */
- printk(KERN_ALERT PFX "invalid sns length %d\n",
+ printk(KERN_ERR PFX "invalid sns length %d\n",
rq_buff_len);
/* reset rq_buff_len */
rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
@@ -1780,7 +1898,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
scsi_set_resid(sc_cmd, io_req->fcp_resid);
break;
default:
- printk(KERN_ALERT PFX "scsi_cmd_compl: fcp_status = %d\n",
+ printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n",
io_req->fcp_status);
break;
}
@@ -1789,14 +1907,15 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
kref_put(&io_req->refcount, bnx2fc_cmd_release);
}
-static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
+int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
struct bnx2fc_cmd *io_req)
{
struct fcoe_task_ctx_entry *task;
struct fcoe_task_ctx_entry *task_page;
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
struct fcoe_port *port = tgt->port;
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct fc_lport *lport = port->lport;
struct fcoe_dev_stats *stats;
int task_idx, index;
@@ -1854,7 +1973,8 @@ static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
}
/* Time IO req */
- bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
+ if (tgt->io_timeout)
+ bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
/* Obtain free SQ entry */
bnx2fc_add_2_sq(tgt, xid);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 3e892bd..d5311b5 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -2,7 +2,7 @@
* Handles operations such as session offload/upload etc, and manages
* session resources such as connection id and qp resources.
*
- * Copyright (c) 2008 - 2010 Broadcom Corporation
+ * Copyright (c) 2008 - 2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -65,7 +65,8 @@ static void bnx2fc_offload_session(struct fcoe_port *port,
{
struct fc_lport *lport = rdata->local_port;
struct fc_rport *rport = rdata->rport;
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
int rval;
int i = 0;
@@ -237,7 +238,8 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
static void bnx2fc_upload_session(struct fcoe_port *port,
struct bnx2fc_rport *tgt)
{
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n",
tgt->num_active_ios.counter);
@@ -316,7 +318,8 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
{
struct fc_rport *rport = rdata->rport;
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
@@ -350,6 +353,14 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
tgt->rq_cons_idx = 0;
atomic_set(&tgt->num_active_ios, 0);
+ if (rdata->flags & FC_RP_FLAGS_RETRY) {
+ tgt->dev_type = TYPE_TAPE;
+ tgt->io_timeout = 0; /* use default ULP timeout */
+ } else {
+ tgt->dev_type = TYPE_DISK;
+ tgt->io_timeout = BNX2FC_IO_TIMEOUT;
+ }
+
/* initialize sq doorbell */
sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE;
sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE <<
@@ -392,7 +403,8 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
enum fc_rport_event event)
{
struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct fc_rport *rport = rdata->rport;
struct fc_rport_libfc_priv *rp;
struct bnx2fc_rport *tgt;
@@ -403,7 +415,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
switch (event) {
case RPORT_EV_READY:
if (!rport) {
- printk(KERN_ALERT PFX "rport is NULL: ERROR!\n");
+ printk(KERN_ERR PFX "rport is NULL: ERROR!\n");
break;
}
@@ -415,7 +427,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
* We should not come here, as lport will
* take care of fabric login
*/
- printk(KERN_ALERT PFX "%x - rport_event_handler ERROR\n",
+ printk(KERN_ERR PFX "%x - rport_event_handler ERROR\n",
rdata->ids.port_id);
break;
}
@@ -483,7 +495,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
break;
if (!rport) {
- printk(KERN_ALERT PFX "%x - rport not created Yet!!\n",
+ printk(KERN_INFO PFX "%x - rport not created Yet!!\n",
port_id);
break;
}
@@ -537,7 +549,8 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
u32 port_id)
{
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct bnx2fc_rport *tgt;
struct fc_rport_priv *rdata;
int i;
@@ -552,7 +565,7 @@ struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
"obtained\n");
return tgt;
} else {
- printk(KERN_ERR PFX "rport 0x%x "
+ BNX2FC_TGT_DBG(tgt, "rport 0x%x "
"is in DELETED state\n",
rdata->ids.port_id);
return NULL;
@@ -633,7 +646,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
&tgt->sq_dma, GFP_KERNEL);
if (!tgt->sq) {
- printk(KERN_ALERT PFX "unable to allocate SQ memory %d\n",
+ printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
tgt->sq_mem_size);
goto mem_alloc_failure;
}
@@ -646,7 +659,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
&tgt->cq_dma, GFP_KERNEL);
if (!tgt->cq) {
- printk(KERN_ALERT PFX "unable to allocate CQ memory %d\n",
+ printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
tgt->cq_mem_size);
goto mem_alloc_failure;
}
@@ -659,7 +672,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
&tgt->rq_dma, GFP_KERNEL);
if (!tgt->rq) {
- printk(KERN_ALERT PFX "unable to allocate RQ memory %d\n",
+ printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
tgt->rq_mem_size);
goto mem_alloc_failure;
}
@@ -671,7 +684,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
&tgt->rq_pbl_dma, GFP_KERNEL);
if (!tgt->rq_pbl) {
- printk(KERN_ALERT PFX "unable to allocate RQ PBL %d\n",
+ printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
tgt->rq_pbl_size);
goto mem_alloc_failure;
}
@@ -697,7 +710,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
&tgt->xferq_dma, GFP_KERNEL);
if (!tgt->xferq) {
- printk(KERN_ALERT PFX "unable to allocate XFERQ %d\n",
+ printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
tgt->xferq_mem_size);
goto mem_alloc_failure;
}
@@ -711,7 +724,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
&tgt->confq_dma, GFP_KERNEL);
if (!tgt->confq) {
- printk(KERN_ALERT PFX "unable to allocate CONFQ %d\n",
+ printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
tgt->confq_mem_size);
goto mem_alloc_failure;
}
@@ -726,7 +739,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->confq_pbl_size,
&tgt->confq_pbl_dma, GFP_KERNEL);
if (!tgt->confq_pbl) {
- printk(KERN_ALERT PFX "unable to allocate CONFQ PBL %d\n",
+ printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
tgt->confq_pbl_size);
goto mem_alloc_failure;
}
@@ -751,7 +764,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->conn_db_mem_size,
&tgt->conn_db_dma, GFP_KERNEL);
if (!tgt->conn_db) {
- printk(KERN_ALERT PFX "unable to allocate conn_db %d\n",
+ printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
tgt->conn_db_mem_size);
goto mem_alloc_failure;
}
@@ -767,7 +780,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
&tgt->lcq_dma, GFP_KERNEL);
if (!tgt->lcq) {
- printk(KERN_ALERT PFX "unable to allocate lcq %d\n",
+ printk(KERN_ERR PFX "unable to allocate lcq %d\n",
tgt->lcq_mem_size);
goto mem_alloc_failure;
}
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 030a96c..dba72a4 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -332,11 +332,11 @@ int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
{
struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_login_request *login_wqe;
- struct iscsi_login *login_hdr;
+ struct iscsi_login_req *login_hdr;
u32 dword;
bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
- login_hdr = (struct iscsi_login *)task->hdr;
+ login_hdr = (struct iscsi_login_req *)task->hdr;
login_wqe = (struct bnx2i_login_request *)
bnx2i_conn->ep->qp.sq_prod_qe;
@@ -563,7 +563,7 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
nopout_wqe->itt = ((u16)task->itt |
(ISCSI_TASK_TYPE_MPATH <<
ISCSI_TMF_REQUEST_TYPE_SHIFT));
- nopout_wqe->ttt = nopout_hdr->ttt;
+ nopout_wqe->ttt = be32_to_cpu(nopout_hdr->ttt);
nopout_wqe->flags = 0;
if (!unsol)
nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
@@ -1349,7 +1349,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
struct bnx2i_cmd_response *resp_cqe;
struct bnx2i_cmd *bnx2i_cmd;
struct iscsi_task *task;
- struct iscsi_cmd_rsp *hdr;
+ struct iscsi_scsi_rsp *hdr;
u32 datalen = 0;
resp_cqe = (struct bnx2i_cmd_response *)cqe;
@@ -1376,7 +1376,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
}
bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
- hdr = (struct iscsi_cmd_rsp *)task->hdr;
+ hdr = (struct iscsi_scsi_rsp *)task->hdr;
resp_cqe = (struct bnx2i_cmd_response *)cqe;
hdr->opcode = resp_cqe->op_code;
hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 5c55a75..cffd4d7 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1213,7 +1213,7 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
struct bnx2i_conn *bnx2i_conn = conn->dd_data;
struct scsi_cmnd *sc = task->sc;
struct bnx2i_cmd *cmd = task->dd_data;
- struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
+ struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1 >
hba->max_sqes)
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 2e7c136..27c9d65 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -128,25 +128,7 @@ struct c4_inquiry {
u8 reserved[2];
};
-struct rdac_controller {
- u8 subsys_id[SUBSYS_ID_LEN];
- u8 slot_id[SLOT_ID_LEN];
- int use_ms10;
- struct kref kref;
- struct list_head node; /* list of all controllers */
- union {
- struct rdac_pg_legacy legacy;
- struct rdac_pg_expanded expanded;
- } mode_select;
- u8 index;
- u8 array_name[ARRAY_LABEL_LEN];
- spinlock_t ms_lock;
- int ms_queued;
- struct work_struct ms_work;
- struct scsi_device *ms_sdev;
- struct list_head ms_head;
-};
-
+#define UNIQUE_ID_LEN 16
struct c8_inquiry {
u8 peripheral_info;
u8 page_code; /* 0xC8 */
@@ -159,12 +141,31 @@ struct c8_inquiry {
u8 vol_user_label_len;
u8 vol_user_label[60];
u8 array_uniq_id_len;
- u8 array_unique_id[16];
+ u8 array_unique_id[UNIQUE_ID_LEN];
u8 array_user_label_len;
u8 array_user_label[60];
u8 lun[8];
};
+struct rdac_controller {
+ u8 array_id[UNIQUE_ID_LEN];
+ int use_ms10;
+ struct kref kref;
+ struct list_head node; /* list of all controllers */
+ union {
+ struct rdac_pg_legacy legacy;
+ struct rdac_pg_expanded expanded;
+ } mode_select;
+ u8 index;
+ u8 array_name[ARRAY_LABEL_LEN];
+ struct Scsi_Host *host;
+ spinlock_t ms_lock;
+ int ms_queued;
+ struct work_struct ms_work;
+ struct scsi_device *ms_sdev;
+ struct list_head ms_head;
+};
+
struct c2_inquiry {
u8 peripheral_info;
u8 page_code; /* 0xC2 */
@@ -369,16 +370,17 @@ static void release_controller(struct kref *kref)
kfree(ctlr);
}
-static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id,
- char *array_name)
+static struct rdac_controller *get_controller(int index, char *array_name,
+ u8 *array_id, struct scsi_device *sdev)
{
struct rdac_controller *ctlr, *tmp;
spin_lock(&list_lock);
list_for_each_entry(tmp, &ctlr_list, node) {
- if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) &&
- (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) {
+ if ((memcmp(tmp->array_id, array_id, UNIQUE_ID_LEN) == 0) &&
+ (tmp->index == index) &&
+ (tmp->host == sdev->host)) {
kref_get(&tmp->kref);
spin_unlock(&list_lock);
return tmp;
@@ -389,16 +391,11 @@ static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id,
goto done;
/* initialize fields of controller */
- memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
- memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
+ memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN);
+ ctlr->index = index;
+ ctlr->host = sdev->host;
memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN);
- /* update the controller index */
- if (slot_id[1] == 0x31)
- ctlr->index = 0;
- else
- ctlr->index = 1;
-
kref_init(&ctlr->kref);
ctlr->use_ms10 = -1;
ctlr->ms_queued = 0;
@@ -444,7 +441,7 @@ done:
}
static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
- char *array_name)
+ char *array_name, u8 *array_id)
{
int err, i;
struct c8_inquiry *inqp;
@@ -463,6 +460,8 @@ static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
*(array_name+i) = inqp->array_user_label[(2*i)+1];
*(array_name+ARRAY_LABEL_LEN-1) = '\0';
+ memset(array_id, 0, UNIQUE_ID_LEN);
+ memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len);
}
return err;
}
@@ -504,16 +503,20 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
}
static int initialize_controller(struct scsi_device *sdev,
- struct rdac_dh_data *h, char *array_name)
+ struct rdac_dh_data *h, char *array_name, u8 *array_id)
{
- int err;
+ int err, index;
struct c4_inquiry *inqp;
err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c4;
- h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id,
- array_name);
+ /* get the controller index */
+ if (inqp->slot_id[1] == 0x31)
+ index = 0;
+ else
+ index = 1;
+ h->ctlr = get_controller(index, array_name, array_id, sdev);
if (!h->ctlr)
err = SCSI_DH_RES_TEMP_UNAVAIL;
}
@@ -835,6 +838,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
unsigned long flags;
int err;
char array_name[ARRAY_LABEL_LEN];
+ char array_id[UNIQUE_ID_LEN];
scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+ sizeof(*h) , GFP_KERNEL);
@@ -849,11 +853,11 @@ static int rdac_bus_attach(struct scsi_device *sdev)
h->lun = UNINITIALIZED_LUN;
h->state = RDAC_STATE_ACTIVE;
- err = get_lun_info(sdev, h, array_name);
+ err = get_lun_info(sdev, h, array_name, array_id);
if (err != SCSI_DH_OK)
goto failed;
- err = initialize_controller(sdev, h, array_name);
+ err = initialize_controller(sdev, h, array_name, array_id);
if (err != SCSI_DH_OK)
goto failed;
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h
index 179ad77..bd9e31e 100644
--- a/drivers/scsi/dpt/dpti_i2o.h
+++ b/drivers/scsi/dpt/dpti_i2o.h
@@ -22,7 +22,7 @@
#include <linux/i2o-dev.h>
#include <linux/notifier.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
/*
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 563c25c..34c8d82 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -431,6 +431,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
u8 flogi_maddr[ETH_ALEN];
const struct net_device_ops *ops;
+ rtnl_lock();
+
/*
* Don't listen for Ethernet packets anymore.
* synchronize_net() ensures that the packet handlers are not running
@@ -460,6 +462,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
" specific feature for LLD.\n");
}
+ rtnl_unlock();
+
/* Release the self-reference taken during fcoe_interface_create() */
fcoe_interface_put(fcoe);
}
@@ -486,6 +490,19 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
}
/**
+ * fcoe_port_send() - Send an Ethernet-encapsulated FIP/FCoE frame
+ * @port: The FCoE port
+ * @skb: The FIP/FCoE packet to be sent
+ */
+static void fcoe_port_send(struct fcoe_port *port, struct sk_buff *skb)
+{
+ if (port->fcoe_pending_queue.qlen)
+ fcoe_check_wait_queue(port->lport, skb);
+ else if (fcoe_start_io(skb))
+ fcoe_check_wait_queue(port->lport, skb);
+}
+
+/**
* fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame
* @fip: The FCoE controller
* @skb: The FIP packet to be sent
@@ -493,7 +510,7 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
skb->dev = fcoe_from_ctlr(fip)->netdev;
- dev_queue_xmit(skb);
+ fcoe_port_send(lport_priv(fip->lp), skb);
}
/**
@@ -1256,30 +1273,20 @@ static int fcoe_cpu_callback(struct notifier_block *nfb,
/**
* fcoe_select_cpu() - Selects CPU to handle post-processing of incoming
* command.
- * @curr_cpu: CPU which received request
*
- * This routine selects next CPU based on cpumask.
+ * This routine selects next CPU based on cpumask to distribute
+ * incoming requests in round robin.
*
- * Returns: int (CPU number). Caller to verify if returned CPU is online or not.
+ * Returns: int CPU number
*/
-static unsigned int fcoe_select_cpu(unsigned int curr_cpu)
+static inline unsigned int fcoe_select_cpu(void)
{
static unsigned int selected_cpu;
- if (num_online_cpus() == 1)
- return curr_cpu;
- /*
- * Doing following check, to skip "curr_cpu (smp_processor_id)"
- * from selection of CPU is intentional. This is to avoid same CPU
- * doing post-processing of command. "curr_cpu" to just receive
- * incoming request in case where rx_id is UNKNOWN and all other
- * CPU to actually process the command(s)
- */
- do {
- selected_cpu = cpumask_next(selected_cpu, cpu_online_mask);
- if (selected_cpu >= nr_cpu_ids)
- selected_cpu = cpumask_first(cpu_online_mask);
- } while (selected_cpu == curr_cpu);
+ selected_cpu = cpumask_next(selected_cpu, cpu_online_mask);
+ if (selected_cpu >= nr_cpu_ids)
+ selected_cpu = cpumask_first(cpu_online_mask);
+
return selected_cpu;
}
@@ -1349,30 +1356,26 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
fr = fcoe_dev_from_skb(skb);
fr->fr_dev = lport;
- fr->ptype = ptype;
/*
* In case the incoming frame's exchange is originated from
* the initiator, then received frame's exchange id is ANDed
* with fc_cpu_mask bits to get the same cpu on which exchange
- * was originated, otherwise just use the current cpu.
+ * was originated, otherwise select cpu using rx exchange id
+ * or fcoe_select_cpu().
*/
if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
else {
- cpu = smp_processor_id();
-
- if ((fh->fh_type == FC_TYPE_FCP) &&
- (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) {
- do {
- cpu = fcoe_select_cpu(cpu);
- } while (!cpu_online(cpu));
- } else if ((fh->fh_type == FC_TYPE_FCP) &&
- (ntohs(fh->fh_rx_id) != FC_XID_UNKNOWN)) {
+ if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)
+ cpu = fcoe_select_cpu();
+ else
cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask;
- } else
- cpu = smp_processor_id();
}
+
+ if (cpu >= nr_cpu_ids)
+ goto err;
+
fps = &per_cpu(fcoe_percpu, cpu);
spin_lock_bh(&fps->fcoe_rx_list.lock);
if (unlikely(!fps->thread)) {
@@ -1571,11 +1574,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
/* send down to lld */
fr_dev(fp) = lport;
- if (port->fcoe_pending_queue.qlen)
- fcoe_check_wait_queue(lport, skb);
- else if (fcoe_start_io(skb))
- fcoe_check_wait_queue(lport, skb);
-
+ fcoe_port_send(port, skb);
return 0;
}
@@ -1955,11 +1954,8 @@ static void fcoe_destroy_work(struct work_struct *work)
fcoe_if_destroy(port->lport);
/* Do not tear down the fcoe interface for NPIV port */
- if (!npiv) {
- rtnl_lock();
+ if (!npiv)
fcoe_interface_cleanup(fcoe);
- rtnl_unlock();
- }
mutex_unlock(&fcoe_config_mutex);
}
@@ -2013,8 +2009,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
netdev->name);
rc = -EIO;
+ rtnl_unlock();
fcoe_interface_cleanup(fcoe);
- goto out_nodev;
+ goto out_nortnl;
}
/* Make this the "master" N_Port */
@@ -2031,6 +2028,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
out_nodev:
rtnl_unlock();
+out_nortnl:
mutex_unlock(&fcoe_config_mutex);
return rc;
}
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 6bba23a..b200b73 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -46,7 +46,7 @@
#include <linux/cciss_ioctl.h>
#include <linux/string.h>
#include <linux/bitmap.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/kthread.h>
#include "hpsa_cmd.h"
#include "hpsa.h"
@@ -676,6 +676,16 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
removed[*nremoved] = h->dev[entry];
(*nremoved)++;
+
+ /*
+ * New physical devices won't have target/lun assigned yet
+ * so we need to preserve the values in the slot we are replacing.
+ */
+ if (new_entry->target == -1) {
+ new_entry->target = h->dev[entry]->target;
+ new_entry->lun = h->dev[entry]->lun;
+ }
+
h->dev[entry] = new_entry;
added[*nadded] = new_entry;
(*nadded)++;
@@ -1219,8 +1229,8 @@ static void complete_scsi_command(struct CommandList *cp)
dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
break;
case CMD_UNSOLICITED_ABORT:
- cmd->result = DID_RESET << 16;
- dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
+ cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
+ dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
"abort\n", cp);
break;
case CMD_TIMEOUT:
@@ -1548,10 +1558,17 @@ static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
}
static int hpsa_update_device_info(struct ctlr_info *h,
- unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
+ unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
+ unsigned char *is_OBDR_device)
{
-#define OBDR_TAPE_INQ_SIZE 49
+
+#define OBDR_SIG_OFFSET 43
+#define OBDR_TAPE_SIG "$DR-10"
+#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
+#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
+
unsigned char *inq_buff;
+ unsigned char *obdr_sig;
inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
if (!inq_buff)
@@ -1583,6 +1600,16 @@ static int hpsa_update_device_info(struct ctlr_info *h,
else
this_device->raid_level = RAID_UNKNOWN;
+ if (is_OBDR_device) {
+ /* See if this is a One-Button-Disaster-Recovery device
+ * by looking for "$DR-10" at offset 43 in inquiry data.
+ */
+ obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
+ *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
+ strncmp(obdr_sig, OBDR_TAPE_SIG,
+ OBDR_SIG_LEN) == 0);
+ }
+
kfree(inq_buff);
return 0;
@@ -1716,7 +1743,7 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
return 0;
}
- if (hpsa_update_device_info(h, scsi3addr, this_device))
+ if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
return 0;
(*nmsa2xxx_enclosures)++;
hpsa_set_bus_target_lun(this_device, bus, target, 0);
@@ -1808,7 +1835,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
*/
struct ReportLUNdata *physdev_list = NULL;
struct ReportLUNdata *logdev_list = NULL;
- unsigned char *inq_buff = NULL;
u32 nphysicals = 0;
u32 nlogicals = 0;
u32 ndev_allocated = 0;
@@ -1824,11 +1850,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
GFP_KERNEL);
physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
- inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
- if (!currentsd || !physdev_list || !logdev_list ||
- !inq_buff || !tmpdevice) {
+ if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
dev_err(&h->pdev->dev, "out of memory\n");
goto out;
}
@@ -1863,7 +1887,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
/* adjust our table of devices */
nmsa2xxx_enclosures = 0;
for (i = 0; i < nphysicals + nlogicals + 1; i++) {
- u8 *lunaddrbytes;
+ u8 *lunaddrbytes, is_OBDR = 0;
/* Figure out where the LUN ID info is coming from */
lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
@@ -1874,7 +1898,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
continue;
/* Get device type, vendor, model, device id */
- if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice))
+ if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
+ &is_OBDR))
continue; /* skip it if we can't talk to it. */
figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
tmpdevice);
@@ -1898,7 +1923,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
hpsa_set_bus_target_lun(this_device, bus, target, lun);
switch (this_device->devtype) {
- case TYPE_ROM: {
+ case TYPE_ROM:
/* We don't *really* support actual CD-ROM devices,
* just "One Button Disaster Recovery" tape drive
* which temporarily pretends to be a CD-ROM drive.
@@ -1906,15 +1931,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
* device by checking for "$DR-10" in bytes 43-48 of
* the inquiry data.
*/
- char obdr_sig[7];
-#define OBDR_TAPE_SIG "$DR-10"
- strncpy(obdr_sig, &inq_buff[43], 6);
- obdr_sig[6] = '\0';
- if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
- /* Not OBDR device, ignore it. */
- break;
- }
- ncurrent++;
+ if (is_OBDR)
+ ncurrent++;
break;
case TYPE_DISK:
if (i < nphysicals)
@@ -1947,7 +1965,6 @@ out:
for (i = 0; i < ndev_allocated; i++)
kfree(currentsd[i]);
kfree(currentsd);
- kfree(inq_buff);
kfree(physdev_list);
kfree(logdev_list);
}
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 6d8dcd4..7f53cea 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -214,7 +214,7 @@ static void SA5_submit_command(struct ctlr_info *h,
dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
c->Header.Tag.lower);
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
- (void) readl(h->vaddr + SA5_REQUEST_PORT_OFFSET);
+ (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
h->commands_outstanding++;
if (h->commands_outstanding > h->max_outstanding)
h->max_outstanding = h->commands_outstanding;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 888086c..8d63630 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -8778,14 +8778,14 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
if (rc != PCIBIOS_SUCCESSFUL) {
dev_err(&pdev->dev, "Failed to save PCI config space\n");
rc = -EIO;
- goto cleanup_nomem;
+ goto out_msi_disable;
}
if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
- goto cleanup_nomem;
+ goto out_msi_disable;
if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
- goto cleanup_nomem;
+ goto out_msi_disable;
if (ioa_cfg->sis64)
ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
@@ -8800,7 +8800,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
if (rc < 0) {
dev_err(&pdev->dev,
"Couldn't allocate enough memory for device driver!\n");
- goto cleanup_nomem;
+ goto out_msi_disable;
}
/*
@@ -8845,10 +8845,10 @@ out:
cleanup_nolog:
ipr_free_mem(ioa_cfg);
-cleanup_nomem:
- iounmap(ipr_regs);
out_msi_disable:
pci_disable_msi(pdev);
+cleanup_nomem:
+ iounmap(ipr_regs);
out_release_regions:
pci_release_regions(pdev);
out_scsi_host_put:
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 26072f1..6981b77 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -531,6 +531,9 @@ static void sci_controller_process_completions(struct isci_host *ihost)
break;
case SCU_COMPLETION_TYPE_EVENT:
+ sci_controller_event_completion(ihost, ent);
+ break;
+
case SCU_COMPLETION_TYPE_NOTIFY: {
event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
(SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
@@ -1091,6 +1094,7 @@ static void isci_host_completion_routine(unsigned long data)
struct isci_request *request;
struct isci_request *next_request;
struct sas_task *task;
+ u16 active;
INIT_LIST_HEAD(&completed_request_list);
INIT_LIST_HEAD(&errored_request_list);
@@ -1181,6 +1185,13 @@ static void isci_host_completion_routine(unsigned long data)
}
}
+ /* the coalesence timeout doubles at each encoding step, so
+ * update it based on the ilog2 value of the outstanding requests
+ */
+ active = isci_tci_active(ihost);
+ writel(SMU_ICC_GEN_VAL(NUMBER, active) |
+ SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
+ &ihost->smu_registers->interrupt_coalesce_control);
}
/**
@@ -1471,7 +1482,7 @@ static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
/* set the default interrupt coalescence number and timeout value. */
- sci_controller_set_interrupt_coalescence(ihost, 0x10, 250);
+ sci_controller_set_interrupt_coalescence(ihost, 0, 0);
}
static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 062101a..9f33831 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -369,6 +369,9 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
#define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))
#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
+/* interrupt coalescing baseline: 9 == 3 to 5us interrupt delay per command */
+#define ISCI_COALESCE_BASE 9
+
/* expander attached sata devices require 3 rnc slots */
static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
{
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 61e0d09..29aa34e 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -59,10 +59,19 @@
#include <linux/firmware.h>
#include <linux/efi.h>
#include <asm/string.h>
+#include <scsi/scsi_host.h>
#include "isci.h"
#include "task.h"
#include "probe_roms.h"
+#define MAJ 1
+#define MIN 0
+#define BUILD 0
+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
+ __stringify(BUILD)
+
+MODULE_VERSION(DRV_VERSION);
+
static struct scsi_transport_template *isci_transport_template;
static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = {
@@ -113,6 +122,22 @@ unsigned char max_concurr_spinup = 1;
module_param(max_concurr_spinup, byte, 0);
MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
+static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
+ struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+ struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
+}
+
+static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
+
+struct device_attribute *isci_host_attrs[] = {
+ &dev_attr_isci_id,
+ NULL
+};
+
static struct scsi_host_template isci_sht = {
.module = THIS_MODULE,
@@ -138,6 +163,7 @@ static struct scsi_host_template isci_sht = {
.slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+ .shost_attrs = isci_host_attrs,
};
static struct sas_domain_function_template isci_transport_ops = {
@@ -232,17 +258,6 @@ static int isci_register_sas_ha(struct isci_host *isci_host)
return 0;
}
-static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
- struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
- struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
-}
-
-static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
-
static void isci_unregister(struct isci_host *isci_host)
{
struct Scsi_Host *shost;
@@ -251,7 +266,6 @@ static void isci_unregister(struct isci_host *isci_host)
return;
shost = isci_host->shost;
- device_remove_file(&shost->shost_dev, &dev_attr_isci_id);
sas_unregister_ha(&isci_host->sas_ha);
@@ -415,14 +429,8 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
if (err)
goto err_shost_remove;
- err = device_create_file(&shost->shost_dev, &dev_attr_isci_id);
- if (err)
- goto err_unregister_ha;
-
return isci_host;
- err_unregister_ha:
- sas_unregister_ha(&(isci_host->sas_ha));
err_shost_remove:
scsi_remove_host(shost);
err_shost:
@@ -540,7 +548,8 @@ static __init int isci_init(void)
{
int err;
- pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME);
+ pr_info("%s: Intel(R) C600 SAS Controller Driver - version %s\n",
+ DRV_NAME, DRV_VERSION);
isci_transport_template = sas_domain_attach_transport(&isci_transport_ops);
if (!isci_transport_template)
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 8d9192d..09e6113 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -104,6 +104,7 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
u32 parity_count = 0;
u32 llctl, link_rate;
u32 clksm_value = 0;
+ u32 sp_timeouts = 0;
iphy->link_layer_registers = reg;
@@ -211,6 +212,18 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
writel(llctl, &iphy->link_layer_registers->link_layer_control);
+ sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts);
+
+ /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */
+ sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF);
+
+ /* Set RATE_CHANGE timeout value to 0x3B (59us). This ensures SCU can
+ * lock with 3Gb drive when SCU max rate is set to 1.5Gb.
+ */
+ sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B);
+
+ writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts);
+
if (is_a2(ihost->pdev)) {
/* Program the max ARB time for the PHY to 700us so we inter-operate with
* the PMC expander which shuts down PHYs if the expander PHY generates too
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
index 9b266c7..00afc73 100644
--- a/drivers/scsi/isci/registers.h
+++ b/drivers/scsi/isci/registers.h
@@ -1299,6 +1299,18 @@ struct scu_transport_layer_registers {
#define SCU_AFE_XCVRCR_OFFSET 0x00DC
#define SCU_AFE_LUTCR_OFFSET 0x00E0
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_SHIFT (0UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_MASK (0x000000FFUL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_SHIFT (8UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_MASK (0x0000FF00UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_SHIFT (16UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_MASK (0x00FF0000UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_SHIFT (24UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_MASK (0xFF000000UL)
+
+#define SCU_SAS_PHYTOV_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_##name, value)
+
#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0)
#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003)
#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0)
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index a46e07a..b5d3a8c 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -732,12 +732,20 @@ sci_io_request_terminate(struct isci_request *ireq)
sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
return SCI_SUCCESS;
case SCI_REQ_TASK_WAIT_TC_RESP:
+ /* The task frame was already confirmed to have been
+ * sent by the SCU HW. Since the state machine is
+ * now only waiting for the task response itself,
+ * abort the request and complete it immediately
+ * and don't wait for the task response.
+ */
sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
return SCI_SUCCESS;
case SCI_REQ_ABORTING:
- sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
- return SCI_SUCCESS;
+ /* If a request has a termination requested twice, return
+ * a failure indication, since HW confirmation of the first
+ * abort is still outstanding.
+ */
case SCI_REQ_COMPLETED:
default:
dev_warn(&ireq->owning_controller->pdev->dev,
@@ -2399,22 +2407,19 @@ static void isci_task_save_for_upper_layer_completion(
}
}
-static void isci_request_process_stp_response(struct sas_task *task,
- void *response_buffer)
+static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
{
- struct dev_to_host_fis *d2h_reg_fis = response_buffer;
struct task_status_struct *ts = &task->task_status;
struct ata_task_resp *resp = (void *)&ts->buf[0];
- resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6));
- memcpy(&resp->ending_fis[0], response_buffer + 16, 24);
+ resp->frame_len = sizeof(*fis);
+ memcpy(resp->ending_fis, fis, sizeof(*fis));
ts->buf_valid_size = sizeof(*resp);
- /**
- * If the device fault bit is set in the status register, then
+ /* If the device fault bit is set in the status register, then
* set the sense data and return.
*/
- if (d2h_reg_fis->status & ATA_DF)
+ if (fis->status & ATA_DF)
ts->stat = SAS_PROTO_RESPONSE;
else
ts->stat = SAM_STAT_GOOD;
@@ -2428,7 +2433,6 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
{
struct sas_task *task = isci_request_access_task(request);
struct ssp_response_iu *resp_iu;
- void *resp_buf;
unsigned long task_flags;
struct isci_remote_device *idev = isci_lookup_device(task->dev);
enum service_response response = SAS_TASK_UNDELIVERED;
@@ -2565,9 +2569,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
task);
if (sas_protocol_ata(task->task_proto)) {
- resp_buf = &request->stp.rsp;
- isci_request_process_stp_response(task,
- resp_buf);
+ isci_process_stp_response(task, &request->stp.rsp);
} else if (SAS_PROTOCOL_SSP == task->task_proto) {
/* crack the iu response buffer. */
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
index e9e1e2a..16f88ab 100644
--- a/drivers/scsi/isci/unsolicited_frame_control.c
+++ b/drivers/scsi/isci/unsolicited_frame_control.c
@@ -72,7 +72,7 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
*/
buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
- size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t);
+ size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]);
/*
* The Unsolicited Frame buffers are set at the start of the UF
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h
index 31cb950..75d8966 100644
--- a/drivers/scsi/isci/unsolicited_frame_control.h
+++ b/drivers/scsi/isci/unsolicited_frame_control.h
@@ -214,7 +214,7 @@ struct sci_uf_address_table_array {
* starting address of the UF address table.
* 64-bit pointers are required by the hardware.
*/
- dma_addr_t *array;
+ u64 *array;
/**
* This field specifies the physical address location for the UF
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index f5a0665..d261e98 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -494,6 +494,9 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
*/
error = lport->tt.frame_send(lport, fp);
+ if (fh->fh_type == FC_TYPE_BLS)
+ return error;
+
/*
* Update the exchange and sequence flags,
* assuming all frames for the sequence have been sent.
@@ -575,42 +578,35 @@ static void fc_seq_set_resp(struct fc_seq *sp,
}
/**
- * fc_seq_exch_abort() - Abort an exchange and sequence
- * @req_sp: The sequence to be aborted
+ * fc_exch_abort_locked() - Abort an exchange
+ * @ep: The exchange to be aborted
* @timer_msec: The period of time to wait before aborting
*
- * Generally called because of a timeout or an abort from the upper layer.
+ * Locking notes: Called with exch lock held
+ *
+ * Return value: 0 on success else error code
*/
-static int fc_seq_exch_abort(const struct fc_seq *req_sp,
- unsigned int timer_msec)
+static int fc_exch_abort_locked(struct fc_exch *ep,
+ unsigned int timer_msec)
{
struct fc_seq *sp;
- struct fc_exch *ep;
struct fc_frame *fp;
int error;
- ep = fc_seq_exch(req_sp);
-
- spin_lock_bh(&ep->ex_lock);
if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
- ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
- spin_unlock_bh(&ep->ex_lock);
+ ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP))
return -ENXIO;
- }
/*
* Send the abort on a new sequence if possible.
*/
sp = fc_seq_start_next_locked(&ep->seq);
- if (!sp) {
- spin_unlock_bh(&ep->ex_lock);
+ if (!sp)
return -ENOMEM;
- }
ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
if (timer_msec)
fc_exch_timer_set_locked(ep, timer_msec);
- spin_unlock_bh(&ep->ex_lock);
/*
* If not logged into the fabric, don't send ABTS but leave
@@ -633,6 +629,28 @@ static int fc_seq_exch_abort(const struct fc_seq *req_sp,
}
/**
+ * fc_seq_exch_abort() - Abort an exchange and sequence
+ * @req_sp: The sequence to be aborted
+ * @timer_msec: The period of time to wait before aborting
+ *
+ * Generally called because of a timeout or an abort from the upper layer.
+ *
+ * Return value: 0 on success else error code
+ */
+static int fc_seq_exch_abort(const struct fc_seq *req_sp,
+ unsigned int timer_msec)
+{
+ struct fc_exch *ep;
+ int error;
+
+ ep = fc_seq_exch(req_sp);
+ spin_lock_bh(&ep->ex_lock);
+ error = fc_exch_abort_locked(ep, timer_msec);
+ spin_unlock_bh(&ep->ex_lock);
+ return error;
+}
+
+/**
* fc_exch_timeout() - Handle exchange timer expiration
* @work: The work_struct identifying the exchange that timed out
*/
@@ -802,10 +820,8 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
spin_lock_bh(&pool->lock);
ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
- if (ep) {
+ if (ep && ep->xid == xid)
fc_exch_hold(ep);
- WARN_ON(ep->xid != xid);
- }
spin_unlock_bh(&pool->lock);
}
return ep;
@@ -1717,6 +1733,7 @@ static void fc_exch_reset(struct fc_exch *ep)
int rc = 1;
spin_lock_bh(&ep->ex_lock);
+ fc_exch_abort_locked(ep, 0);
ep->state |= FC_EX_RST_CLEANUP;
if (cancel_delayed_work(&ep->timeout_work))
atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
@@ -1964,6 +1981,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
struct fc_exch *ep;
struct fc_seq *sp = NULL;
struct fc_frame_header *fh;
+ struct fc_fcp_pkt *fsp = NULL;
int rc = 1;
ep = fc_exch_alloc(lport, fp);
@@ -1986,8 +2004,10 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
fc_exch_setup_hdr(ep, fp, ep->f_ctl);
sp->cnt++;
- if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD)
+ if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) {
+ fsp = fr_fsp(fp);
fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
+ }
if (unlikely(lport->tt.frame_send(lport, fp)))
goto err;
@@ -2001,7 +2021,8 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
spin_unlock_bh(&ep->ex_lock);
return sp;
err:
- fc_fcp_ddp_done(fr_fsp(fp));
+ if (fsp)
+ fc_fcp_ddp_done(fsp);
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
@@ -2465,8 +2486,11 @@ int fc_setup_exch_mgr(void)
fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
if (!fc_exch_workqueue)
- return -ENOMEM;
+ goto err;
return 0;
+err:
+ kmem_cache_destroy(fc_em_cachep);
+ return -ENOMEM;
}
/**
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 9cd2149..4c41ee8 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -498,7 +498,7 @@ crc_err:
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->ErrorFrames++;
/* per cpu count, not total count, but OK for limit */
- if (stats->InvalidCRCCount++ < 5)
+ if (stats->InvalidCRCCount++ < FC_MAX_ERROR_CNT)
printk(KERN_WARNING "libfc: CRC error on data "
"frame for port (%6.6x)\n",
lport->port_id);
@@ -690,7 +690,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
}
/**
- * fc_fcp_abts_resp() - Send an ABTS response
+ * fc_fcp_abts_resp() - Receive an ABTS response
* @fsp: The FCP packet that is being aborted
* @fp: The response frame
*/
@@ -730,7 +730,7 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
}
/**
- * fc_fcp_recv() - Reveive an FCP frame
+ * fc_fcp_recv() - Receive an FCP frame
* @seq: The sequence the frame is on
* @fp: The received frame
* @arg: The related FCP packet
@@ -1084,6 +1084,7 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
if (unlikely(rc)) {
spin_lock_irqsave(&si->scsi_queue_lock, flags);
+ fsp->cmd->SCp.ptr = NULL;
list_del(&fsp->list);
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
}
@@ -1645,12 +1646,10 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
struct fc_seq *seq;
struct fcp_srr *srr;
struct fc_frame *fp;
- u8 cdb_op;
unsigned int rec_tov;
rport = fsp->rport;
rpriv = rport->dd_data;
- cdb_op = fsp->cdb_cmd.fc_cdb[0];
if (!(rpriv->flags & FC_RP_FLAGS_RETRY) ||
rpriv->rp_state != RPORT_ST_READY)
@@ -2020,6 +2019,11 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
struct fc_fcp_internal *si;
int rc = FAILED;
unsigned long flags;
+ int rval;
+
+ rval = fc_block_scsi_eh(sc_cmd);
+ if (rval)
+ return rval;
lport = shost_priv(sc_cmd->device->host);
if (lport->state != LPORT_ST_READY)
@@ -2069,9 +2073,9 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
int rc = FAILED;
int rval;
- rval = fc_remote_port_chkready(rport);
+ rval = fc_block_scsi_eh(sc_cmd);
if (rval)
- goto out;
+ return rval;
lport = shost_priv(sc_cmd->device->host);
@@ -2117,6 +2121,8 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
FC_SCSI_DBG(lport, "Resetting host\n");
+ fc_block_scsi_eh(sc_cmd);
+
lport->tt.lport_reset(lport);
wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index e008b16..628f347 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -88,6 +88,7 @@
*/
#include <linux/timer.h>
+#include <linux/delay.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
@@ -1029,8 +1030,16 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
FCH_EVT_LIPRESET, 0);
fc_vports_linkchange(lport);
fc_lport_reset_locked(lport);
- if (lport->link_up)
+ if (lport->link_up) {
+ /*
+ * Wait upto resource allocation time out before
+ * doing re-login since incomplete FIP exchanged
+ * from last session may collide with exchanges
+ * in new session.
+ */
+ msleep(lport->r_a_tov);
fc_lport_enter_flogi(lport);
+ }
}
/**
@@ -1352,7 +1361,6 @@ static void fc_lport_timeout(struct work_struct *work)
WARN_ON(1);
break;
case LPORT_ST_READY:
- WARN_ON(1);
break;
case LPORT_ST_RESET:
break;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index d7a4120..256a999 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -84,22 +84,6 @@ MODULE_PARM_DESC(debug_libiscsi_eh,
__func__, ##arg); \
} while (0);
-/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
-#define SNA32_CHECK 2147483648UL
-
-static int iscsi_sna_lt(u32 n1, u32 n2)
-{
- return n1 != n2 && ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
- (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
-}
-
-/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
-static int iscsi_sna_lte(u32 n1, u32 n2)
-{
- return n1 == n2 || ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
- (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
-}
-
inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
{
struct Scsi_Host *shost = conn->session->host;
@@ -360,7 +344,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
struct iscsi_conn *conn = task->conn;
struct iscsi_session *session = conn->session;
struct scsi_cmnd *sc = task->sc;
- struct iscsi_cmd *hdr;
+ struct iscsi_scsi_req *hdr;
unsigned hdrlength, cmd_len;
itt_t itt;
int rc;
@@ -374,7 +358,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
if (rc)
return rc;
}
- hdr = (struct iscsi_cmd *) task->hdr;
+ hdr = (struct iscsi_scsi_req *)task->hdr;
itt = hdr->itt;
memset(hdr, 0, sizeof(*hdr));
@@ -830,7 +814,7 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
struct iscsi_task *task, char *data,
int datalen)
{
- struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
+ struct iscsi_scsi_rsp *rhdr = (struct iscsi_scsi_rsp *)hdr;
struct iscsi_session *session = conn->session;
struct scsi_cmnd *sc = task->sc;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 874e29d..f84084b 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -849,6 +849,9 @@ static struct domain_device *sas_ex_discover_expander(
res = sas_discover_expander(child);
if (res) {
+ spin_lock_irq(&parent->port->dev_list_lock);
+ list_del(&child->dev_list_node);
+ spin_unlock_irq(&parent->port->dev_list_lock);
kfree(child);
return NULL;
}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 8ec2c86..c088a36 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -20,6 +20,11 @@
*******************************************************************/
#include <scsi/scsi_host.h>
+
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
+#define CONFIG_SCSI_LPFC_DEBUG_FS
+#endif
+
struct lpfc_sli2_slim;
#define LPFC_PCI_DEV_LP 0x1
@@ -465,9 +470,10 @@ enum intr_type_t {
struct unsol_rcv_ct_ctx {
uint32_t ctxt_id;
uint32_t SID;
- uint32_t oxid;
uint32_t flags;
#define UNSOL_VALID 0x00000001
+ uint16_t oxid;
+ uint16_t rxid;
};
#define LPFC_USER_LINK_SPEED_AUTO 0 /* auto select (default)*/
@@ -674,6 +680,9 @@ struct lpfc_hba {
uint32_t cfg_enable_rrq;
uint32_t cfg_topology;
uint32_t cfg_link_speed;
+#define LPFC_FCF_FOV 1 /* Fast fcf failover */
+#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */
+ uint32_t cfg_fcf_failover_policy;
uint32_t cfg_cr_delay;
uint32_t cfg_cr_count;
uint32_t cfg_multi_ring_support;
@@ -845,9 +854,13 @@ struct lpfc_hba {
/* iDiag debugfs sub-directory */
struct dentry *idiag_root;
struct dentry *idiag_pci_cfg;
+ struct dentry *idiag_bar_acc;
struct dentry *idiag_que_info;
struct dentry *idiag_que_acc;
struct dentry *idiag_drb_acc;
+ struct dentry *idiag_ctl_acc;
+ struct dentry *idiag_mbx_acc;
+ struct dentry *idiag_ext_acc;
#endif
/* Used for deferred freeing of ELS data buffers */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 135a53b..2542f1f 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -755,6 +755,47 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * SLI4 interface type-2 device to wait on the sliport status register for
+ * the readyness after performing a firmware reset.
+ *
+ * Returns:
+ * zero for success
+ **/
+static int
+lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
+{
+ struct lpfc_register portstat_reg;
+ int i;
+
+
+ lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0);
+
+ /* wait for the SLI port firmware ready after firmware reset */
+ for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
+ msleep(10);
+ lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0);
+ if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
+ continue;
+ if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
+ continue;
+ if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg))
+ continue;
+ break;
+ }
+
+ if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT)
+ return 0;
+ else
+ return -EIO;
+}
+
+/**
* lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
* @phba: lpfc_hba pointer.
*
@@ -769,6 +810,7 @@ static ssize_t
lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
{
struct completion online_compl;
+ struct pci_dev *pdev = phba->pcidev;
uint32_t reg_val;
int status = 0;
int rc;
@@ -781,6 +823,14 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
LPFC_SLI_INTF_IF_TYPE_2))
return -EPERM;
+ if (!pdev->is_physfn)
+ return -EPERM;
+
+ /* Disable SR-IOV virtual functions if enabled */
+ if (phba->cfg_sriov_nr_virtfn) {
+ pci_disable_sriov(pdev);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
if (status != 0)
@@ -805,7 +855,10 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
/* delay driver action following IF_TYPE_2 reset */
- msleep(100);
+ rc = lpfc_sli4_pdev_status_reg_wait(phba);
+
+ if (rc)
+ return -EIO;
init_completion(&online_compl);
rc = lpfc_workq_post_event(phba, &status, &online_compl,
@@ -895,6 +948,10 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
if (!phba->cfg_enable_hba_reset)
return -EACCES;
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3050 lpfc_board_mode set to %s\n", buf);
+
init_completion(&online_compl);
if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
@@ -1290,6 +1347,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
if (phba->sli_rev == LPFC_SLI_REV4)
val = 0;
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3051 lpfc_poll changed from %d to %d\n",
+ phba->cfg_poll, val);
+
spin_lock_irq(&phba->hbalock);
old_val = phba->cfg_poll;
@@ -1414,80 +1475,10 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- struct pci_dev *pdev = phba->pcidev;
- union lpfc_sli4_cfg_shdr *shdr;
- uint32_t shdr_status, shdr_add_status;
- LPFC_MBOXQ_t *mboxq;
- struct lpfc_mbx_get_prof_cfg *get_prof_cfg;
- struct lpfc_rsrc_desc_pcie *desc;
- uint32_t max_nr_virtfn;
- uint32_t desc_count;
- int length, rc, i;
-
- if ((phba->sli_rev < LPFC_SLI_REV4) ||
- (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
- LPFC_SLI_INTF_IF_TYPE_2))
- return -EPERM;
-
- if (!pdev->is_physfn)
- return snprintf(buf, PAGE_SIZE, "%d\n", 0);
-
- mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mboxq)
- return -ENOMEM;
-
- /* get the maximum number of virtfn support by physfn */
- length = (sizeof(struct lpfc_mbx_get_prof_cfg) -
- sizeof(struct lpfc_sli4_cfg_mhdr));
- lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
- LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG,
- length, LPFC_SLI4_MBX_EMBED);
- shdr = (union lpfc_sli4_cfg_shdr *)
- &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
- bf_set(lpfc_mbox_hdr_pf_num, &shdr->request,
- phba->sli4_hba.iov.pf_number + 1);
-
- get_prof_cfg = &mboxq->u.mqe.un.get_prof_cfg;
- bf_set(lpfc_mbx_get_prof_cfg_prof_tp, &get_prof_cfg->u.request,
- LPFC_CFG_TYPE_CURRENT_ACTIVE);
-
- rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
- lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
-
- if (rc != MBX_TIMEOUT) {
- /* check return status */
- shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
- shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
- &shdr->response);
- if (shdr_status || shdr_add_status || rc)
- goto error_out;
-
- } else
- goto error_out;
-
- desc_count = get_prof_cfg->u.response.prof_cfg.rsrc_desc_count;
-
- for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
- desc = (struct lpfc_rsrc_desc_pcie *)
- &get_prof_cfg->u.response.prof_cfg.desc[i];
- if (LPFC_RSRC_DESC_TYPE_PCIE ==
- bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
- max_nr_virtfn = bf_get(lpfc_rsrc_desc_pcie_nr_virtfn,
- desc);
- break;
- }
- }
-
- if (i < LPFC_RSRC_DESC_MAX_NUM) {
- if (rc != MBX_TIMEOUT)
- mempool_free(mboxq, phba->mbox_mem_pool);
- return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
- }
+ uint16_t max_nr_virtfn;
-error_out:
- if (rc != MBX_TIMEOUT)
- mempool_free(mboxq, phba->mbox_mem_pool);
- return -EIO;
+ max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
+ return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
}
/**
@@ -1605,6 +1596,9 @@ static int \
lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
{ \
if (val >= minval && val <= maxval) {\
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
+ "3052 lpfc_" #attr " changed from %d to %d\n", \
+ phba->cfg_##attr, val); \
phba->cfg_##attr = val;\
return 0;\
}\
@@ -1762,6 +1756,9 @@ static int \
lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
{ \
if (val >= minval && val <= maxval) {\
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
+ "3053 lpfc_" #attr " changed from %d to %d\n", \
+ vport->cfg_##attr, val); \
vport->cfg_##attr = val;\
return 0;\
}\
@@ -2196,6 +2193,9 @@ lpfc_param_show(enable_npiv);
lpfc_param_init(enable_npiv, 1, 0, 1);
static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
+LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
+ "FCF Fast failover=1 Priority failover=2");
+
int lpfc_enable_rrq;
module_param(lpfc_enable_rrq, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
@@ -2678,6 +2678,9 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
if (nolip)
return strlen(buf);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3054 lpfc_topology changed from %d to %d\n",
+ prev_val, val);
err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
if (err) {
phba->cfg_topology = prev_val;
@@ -3101,6 +3104,10 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
if (sscanf(val_buf, "%i", &val) != 1)
return -EINVAL;
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3055 lpfc_link_speed changed from %d to %d %s\n",
+ phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
+
if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
@@ -3678,7 +3685,9 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
# - Default will result in registering capabilities for all profiles.
#
*/
-unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION;
+unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE0_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION;
module_param(lpfc_prot_mask, uint, S_IRUGO);
MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
@@ -3769,6 +3778,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_fdmi_on,
&dev_attr_lpfc_max_luns,
&dev_attr_lpfc_enable_npiv,
+ &dev_attr_lpfc_fcf_failover_policy,
&dev_attr_lpfc_enable_rrq,
&dev_attr_nport_evt_cnt,
&dev_attr_board_mode,
@@ -4989,6 +4999,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_link_speed_init(phba, lpfc_link_speed);
lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
+ lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 7fb0ba4..6760c69 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -42,6 +42,7 @@
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
+#include "lpfc_debugfs.h"
#include "lpfc_vport.h"
#include "lpfc_version.h"
@@ -960,8 +961,10 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
evt_dat->immed_dat].oxid,
phba->ct_ctx[
evt_dat->immed_dat].SID);
+ phba->ct_ctx[evt_dat->immed_dat].rxid =
+ piocbq->iocb.ulpContext;
phba->ct_ctx[evt_dat->immed_dat].oxid =
- piocbq->iocb.ulpContext;
+ piocbq->iocb.unsli3.rcvsli3.ox_id;
phba->ct_ctx[evt_dat->immed_dat].SID =
piocbq->iocb.un.rcvels.remoteID;
phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
@@ -1312,7 +1315,8 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
rc = IOCB_ERROR;
goto issue_ct_rsp_exit;
}
- icmd->ulpContext = phba->ct_ctx[tag].oxid;
+ icmd->ulpContext = phba->ct_ctx[tag].rxid;
+ icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
if (!ndlp) {
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
@@ -1337,9 +1341,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
goto issue_ct_rsp_exit;
}
- icmd->un.ulpWord[3] = ndlp->nlp_rpi;
- if (phba->sli_rev == LPFC_SLI_REV4)
- icmd->ulpContext =
+ icmd->un.ulpWord[3] =
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
/* The exchange is done, mark the entry as invalid */
@@ -1351,8 +1353,8 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
/* Xmit CT response on exchange <xid> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "2722 Xmit CT response on exchange x%x Data: x%x x%x\n",
- icmd->ulpContext, icmd->ulpIoTag, phba->link_state);
+ "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
+ icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
ctiocb->iocb_cmpl = NULL;
ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
@@ -1471,13 +1473,12 @@ send_mgmt_rsp_exit:
/**
* lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
* @phba: Pointer to HBA context object.
- * @job: LPFC_BSG_VENDOR_DIAG_MODE
*
* This function is responsible for preparing driver for diag loopback
* on device.
*/
static int
-lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
+lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
{
struct lpfc_vport **vports;
struct Scsi_Host *shost;
@@ -1521,7 +1522,6 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
/**
* lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
* @phba: Pointer to HBA context object.
- * @job: LPFC_BSG_VENDOR_DIAG_MODE
*
* This function is responsible for driver exit processing of setting up
* diag loopback mode on device.
@@ -1567,7 +1567,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
uint32_t link_flags;
uint32_t timeout;
LPFC_MBOXQ_t *pmboxq;
- int mbxstatus;
+ int mbxstatus = MBX_SUCCESS;
int i = 0;
int rc = 0;
@@ -1586,7 +1586,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
goto job_error;
}
- rc = lpfc_bsg_diag_mode_enter(phba, job);
+ rc = lpfc_bsg_diag_mode_enter(phba);
if (rc)
goto job_error;
@@ -1741,7 +1741,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
uint32_t link_flags, timeout, req_len, alloc_len;
struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
LPFC_MBOXQ_t *pmboxq = NULL;
- int mbxstatus, i, rc = 0;
+ int mbxstatus = MBX_SUCCESS, i, rc = 0;
/* no data to return just the return code */
job->reply->reply_payload_rcv_len = 0;
@@ -1758,7 +1758,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
goto job_error;
}
- rc = lpfc_bsg_diag_mode_enter(phba, job);
+ rc = lpfc_bsg_diag_mode_enter(phba);
if (rc)
goto job_error;
@@ -1982,7 +1982,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
goto job_error;
}
- rc = lpfc_bsg_diag_mode_enter(phba, job);
+ rc = lpfc_bsg_diag_mode_enter(phba);
if (rc)
goto job_error;
@@ -3178,6 +3178,11 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
"(x%x/x%x) complete bsg job done, bsize:%d\n",
phba->mbox_ext_buf_ctx.nembType,
phba->mbox_ext_buf_ctx.mboxType, size);
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
+ phba->mbox_ext_buf_ctx.nembType,
+ phba->mbox_ext_buf_ctx.mboxType,
+ dma_ebuf, sta_pos_addr,
+ phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
} else
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
@@ -3430,6 +3435,10 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
"ext_buf_cnt:%d\n", ext_buf_cnt);
}
+ /* before dma descriptor setup */
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
+ sta_pre_addr, dmabuf, ext_buf_cnt);
+
/* reject non-embedded mailbox command with none external buffer */
if (ext_buf_cnt == 0) {
rc = -EPERM;
@@ -3477,6 +3486,10 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
}
+ /* after dma descriptor setup */
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
+ sta_pos_addr, dmabuf, ext_buf_cnt);
+
/* construct base driver mbox command */
pmb = &pmboxq->u.mb;
pmbx = (uint8_t *)dmabuf->virt;
@@ -3511,7 +3524,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2947 Issued SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
- return 1;
+ return SLI_CONFIG_HANDLED;
}
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2948 Failed to issue SLI_CONFIG ext-buffer "
@@ -3549,7 +3562,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
LPFC_MBOXQ_t *pmboxq = NULL;
MAILBOX_t *pmb;
uint8_t *mbx;
- int rc = 0, i;
+ int rc = SLI_CONFIG_NOT_HANDLED, i;
mbox_req =
(struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
@@ -3591,12 +3604,20 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
"ext_buf_cnt:%d\n", ext_buf_cnt);
}
+ /* before dma buffer descriptor setup */
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
+ sta_pre_addr, dmabuf, ext_buf_cnt);
+
if (ext_buf_cnt == 0)
return -EPERM;
/* for the first external buffer */
lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
+ /* after dma descriptor setup */
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
+ sta_pos_addr, dmabuf, ext_buf_cnt);
+
/* log for looking forward */
for (i = 1; i < ext_buf_cnt; i++) {
if (nemb_tp == nemb_mse)
@@ -3660,7 +3681,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2955 Issued SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
- return 1;
+ return SLI_CONFIG_HANDLED;
}
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2956 Failed to issue SLI_CONFIG ext-buffer "
@@ -3668,6 +3689,11 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
rc = -EPIPE;
}
+ /* wait for additoinal external buffers */
+ job->reply->result = 0;
+ job->job_done(job);
+ return SLI_CONFIG_HANDLED;
+
job_error:
if (pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -3840,6 +3866,12 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
struct lpfc_dmabuf, list);
list_del_init(&dmabuf->list);
+
+ /* after dma buffer descriptor setup */
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
+ mbox_rd, dma_ebuf, sta_pos_addr,
+ dmabuf, index);
+
pbuf = (uint8_t *)dmabuf->virt;
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
@@ -3922,6 +3954,11 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
dmabuf);
list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+ /* after write dma buffer */
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
+ mbox_wr, dma_ebuf, sta_pos_addr,
+ dmabuf, index);
+
if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2968 SLI_CONFIG ext-buffer wr all %d "
@@ -3959,7 +3996,7 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2969 Issued SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
- return 1;
+ return SLI_CONFIG_HANDLED;
}
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2970 Failed to issue SLI_CONFIG ext-buffer "
@@ -4039,14 +4076,14 @@ lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
struct dfc_mbox_req *mbox_req;
- int rc;
+ int rc = SLI_CONFIG_NOT_HANDLED;
mbox_req =
(struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
/* mbox command with/without single external buffer */
if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
- return SLI_CONFIG_NOT_HANDLED;
+ return rc;
/* mbox command and first external buffer */
if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
@@ -4249,7 +4286,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
* mailbox extension size
*/
if ((transmit_length > receive_length) ||
- (transmit_length > MAILBOX_EXT_SIZE)) {
+ (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
rc = -ERANGE;
goto job_done;
}
@@ -4272,7 +4309,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* receive length cannot be greater than mailbox
* extension size
*/
- if (receive_length > MAILBOX_EXT_SIZE) {
+ if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
rc = -ERANGE;
goto job_done;
}
@@ -4306,7 +4343,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
/* bde size cannot be greater than mailbox ext size */
- if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) {
+ if (bde->tus.f.bdeSize >
+ BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
rc = -ERANGE;
goto job_done;
}
@@ -4332,7 +4370,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
* mailbox extension size
*/
if ((receive_length == 0) ||
- (receive_length > MAILBOX_EXT_SIZE)) {
+ (receive_length >
+ BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
rc = -ERANGE;
goto job_done;
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index fc20c24..a6db6ae 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -235,9 +235,11 @@ int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
+void lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *, uint16_t);
int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
+void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
int lpfc_mem_alloc(struct lpfc_hba *, int align);
void lpfc_mem_free(struct lpfc_hba *);
@@ -371,6 +373,10 @@ extern struct lpfc_hbq_init *lpfc_hbq_defs[];
/* SLI4 if_type 2 externs. */
int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *);
int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *);
+int lpfc_sli4_get_allocated_extnts(struct lpfc_hba *, uint16_t,
+ uint16_t *, uint16_t *);
+int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *, uint16_t,
+ uint16_t *, uint16_t *);
/* externs BlockGuard */
extern char *_dump_buf_data;
@@ -432,10 +438,16 @@ void lpfc_handle_rrq_active(struct lpfc_hba *);
int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
uint16_t, uint16_t, uint16_t);
+uint16_t lpfc_sli4_xri_inrange(struct lpfc_hba *, uint16_t);
void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
uint32_t);
+void lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *, enum nemb_type,
+ enum mbox_type, enum dma_type, enum sta_type,
+ struct lpfc_dmabuf *, uint32_t);
+void lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *, MAILBOX_t *);
int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
/* functions to support SR-IOV */
int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
+uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 30b25c5..a0424dd 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -48,6 +48,7 @@
#include "lpfc_version.h"
#include "lpfc_compat.h"
#include "lpfc_debugfs.h"
+#include "lpfc_bsg.h"
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
/*
@@ -135,7 +136,11 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
int i, index, len, enable;
uint32_t ms;
struct lpfc_debugfs_trc *dtp;
- char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE];
+ char *buffer;
+
+ buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL);
+ if (!buffer)
+ return 0;
enable = lpfc_debugfs_enable;
lpfc_debugfs_enable = 0;
@@ -167,6 +172,8 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
}
lpfc_debugfs_enable = enable;
+ kfree(buffer);
+
return len;
}
@@ -195,8 +202,11 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
int i, index, len, enable;
uint32_t ms;
struct lpfc_debugfs_trc *dtp;
- char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE];
+ char *buffer;
+ buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL);
+ if (!buffer)
+ return 0;
enable = lpfc_debugfs_enable;
lpfc_debugfs_enable = 0;
@@ -228,6 +238,8 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
}
lpfc_debugfs_enable = enable;
+ kfree(buffer);
+
return len;
}
@@ -378,7 +390,11 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
int len = 0;
int i, off;
uint32_t *ptr;
- char buffer[1024];
+ char *buffer;
+
+ buffer = kmalloc(1024, GFP_KERNEL);
+ if (!buffer)
+ return 0;
off = 0;
spin_lock_irq(&phba->hbalock);
@@ -407,6 +423,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
}
spin_unlock_irq(&phba->hbalock);
+ kfree(buffer);
+
return len;
}
@@ -1327,8 +1345,8 @@ lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
return 0;
if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
- where = idiag.cmd.data[0];
- count = idiag.cmd.data[1];
+ where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
+ count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
} else
return 0;
@@ -1373,6 +1391,11 @@ pcicfg_browse:
len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
"%08x ", u32val);
offset += sizeof(uint32_t);
+ if (offset >= LPFC_PCI_CFG_SIZE) {
+ len += snprintf(pbuffer+len,
+ LPFC_PCI_CFG_SIZE-len, "\n");
+ break;
+ }
index -= sizeof(uint32_t);
if (!index)
len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
@@ -1385,8 +1408,11 @@ pcicfg_browse:
}
/* Set up the offset for next portion of pci cfg read */
- idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE;
- if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE)
+ if (index == 0) {
+ idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE;
+ if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE)
+ idiag.offset.last_rd = 0;
+ } else
idiag.offset.last_rd = 0;
return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
@@ -1439,8 +1465,8 @@ lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
if (rc != LPFC_PCI_CFG_RD_CMD_ARG)
goto error_out;
/* Read command from PCI config space, set up command fields */
- where = idiag.cmd.data[0];
- count = idiag.cmd.data[1];
+ where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
+ count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
if (count == LPFC_PCI_CFG_BROWSE) {
if (where % sizeof(uint32_t))
goto error_out;
@@ -1475,9 +1501,9 @@ lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
if (rc != LPFC_PCI_CFG_WR_CMD_ARG)
goto error_out;
/* Write command to PCI config space, read-modify-write */
- where = idiag.cmd.data[0];
- count = idiag.cmd.data[1];
- value = idiag.cmd.data[2];
+ where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
+ count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
+ value = idiag.cmd.data[IDIAG_PCICFG_VALUE_INDX];
/* Sanity checks */
if ((count != sizeof(uint8_t)) &&
(count != sizeof(uint16_t)) &&
@@ -1570,6 +1596,292 @@ error_out:
}
/**
+ * lpfc_idiag_baracc_read - idiag debugfs pci bar access read
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba pci bar memory mapped space
+ * according to the idiag command, and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_baracc_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ int offset_label, offset, offset_run, len = 0, index;
+ int bar_num, acc_range, bar_size;
+ char *pbuffer;
+ void __iomem *mem_mapped_bar;
+ uint32_t if_type;
+ struct pci_dev *pdev;
+ uint32_t u32val;
+
+ pdev = phba->pcidev;
+ if (!pdev)
+ return 0;
+
+ /* This is a user read operation */
+ debug->op = LPFC_IDIAG_OP_RD;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_PCI_BAR_RD_BUF_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+
+ if (*ppos)
+ return 0;
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) {
+ bar_num = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX];
+ offset = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX];
+ acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX];
+ bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX];
+ } else
+ return 0;
+
+ if (acc_range == 0)
+ return 0;
+
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if (bar_num == IDIAG_BARACC_BAR_0)
+ mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
+ else if (bar_num == IDIAG_BARACC_BAR_1)
+ mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p;
+ else if (bar_num == IDIAG_BARACC_BAR_2)
+ mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p;
+ else
+ return 0;
+ } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ if (bar_num == IDIAG_BARACC_BAR_0)
+ mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
+ else
+ return 0;
+ } else
+ return 0;
+
+ /* Read single PCI bar space register */
+ if (acc_range == SINGLE_WORD) {
+ offset_run = offset;
+ u32val = readl(mem_mapped_bar + offset_run);
+ len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+ "%05x: %08x\n", offset_run, u32val);
+ } else
+ goto baracc_browse;
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+
+baracc_browse:
+
+ /* Browse all PCI bar space registers */
+ offset_label = idiag.offset.last_rd;
+ offset_run = offset_label;
+
+ /* Read PCI bar memory mapped space */
+ len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+ "%05x: ", offset_label);
+ index = LPFC_PCI_BAR_RD_SIZE;
+ while (index > 0) {
+ u32val = readl(mem_mapped_bar + offset_run);
+ len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+ "%08x ", u32val);
+ offset_run += sizeof(uint32_t);
+ if (acc_range == LPFC_PCI_BAR_BROWSE) {
+ if (offset_run >= bar_size) {
+ len += snprintf(pbuffer+len,
+ LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
+ break;
+ }
+ } else {
+ if (offset_run >= offset +
+ (acc_range * sizeof(uint32_t))) {
+ len += snprintf(pbuffer+len,
+ LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
+ break;
+ }
+ }
+ index -= sizeof(uint32_t);
+ if (!index)
+ len += snprintf(pbuffer+len,
+ LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
+ else if (!(index % (8 * sizeof(uint32_t)))) {
+ offset_label += (8 * sizeof(uint32_t));
+ len += snprintf(pbuffer+len,
+ LPFC_PCI_BAR_RD_BUF_SIZE-len,
+ "\n%05x: ", offset_label);
+ }
+ }
+
+ /* Set up the offset for next portion of pci bar read */
+ if (index == 0) {
+ idiag.offset.last_rd += LPFC_PCI_BAR_RD_SIZE;
+ if (acc_range == LPFC_PCI_BAR_BROWSE) {
+ if (idiag.offset.last_rd >= bar_size)
+ idiag.offset.last_rd = 0;
+ } else {
+ if (offset_run >= offset +
+ (acc_range * sizeof(uint32_t)))
+ idiag.offset.last_rd = offset;
+ }
+ } else {
+ if (acc_range == LPFC_PCI_BAR_BROWSE)
+ idiag.offset.last_rd = 0;
+ else
+ idiag.offset.last_rd = offset;
+ }
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_baracc_write - Syntax check and set up idiag bar access commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and
+ * then perform the syntax check for PCI bar memory mapped space read or
+ * write command accordingly. In the case of PCI bar memory mapped space
+ * read command, it sets up the command in the idiag command struct for
+ * the debugfs read operation. In the case of PCI bar memorpy mapped space
+ * write operation, it executes the write operation into the PCI bar memory
+ * mapped space accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ */
+static ssize_t
+lpfc_idiag_baracc_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ uint32_t bar_num, bar_size, offset, value, acc_range;
+ struct pci_dev *pdev;
+ void __iomem *mem_mapped_bar;
+ uint32_t if_type;
+ uint32_t u32val;
+ int rc;
+
+ pdev = phba->pcidev;
+ if (!pdev)
+ return -EFAULT;
+
+ /* This is a user write operation */
+ debug->op = LPFC_IDIAG_OP_WR;
+
+ rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+ if (rc < 0)
+ return rc;
+
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+ bar_num = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX];
+
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if ((bar_num != IDIAG_BARACC_BAR_0) &&
+ (bar_num != IDIAG_BARACC_BAR_1) &&
+ (bar_num != IDIAG_BARACC_BAR_2))
+ goto error_out;
+ } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ if (bar_num != IDIAG_BARACC_BAR_0)
+ goto error_out;
+ } else
+ goto error_out;
+
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if (bar_num == IDIAG_BARACC_BAR_0) {
+ idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
+ LPFC_PCI_IF0_BAR0_SIZE;
+ mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
+ } else if (bar_num == IDIAG_BARACC_BAR_1) {
+ idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
+ LPFC_PCI_IF0_BAR1_SIZE;
+ mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p;
+ } else if (bar_num == IDIAG_BARACC_BAR_2) {
+ idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
+ LPFC_PCI_IF0_BAR2_SIZE;
+ mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p;
+ } else
+ goto error_out;
+ } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ if (bar_num == IDIAG_BARACC_BAR_0) {
+ idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
+ LPFC_PCI_IF2_BAR0_SIZE;
+ mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
+ } else
+ goto error_out;
+ } else
+ goto error_out;
+
+ offset = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX];
+ if (offset % sizeof(uint32_t))
+ goto error_out;
+
+ bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX];
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) {
+ /* Sanity check on PCI config read command line arguments */
+ if (rc != LPFC_PCI_BAR_RD_CMD_ARG)
+ goto error_out;
+ acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX];
+ if (acc_range == LPFC_PCI_BAR_BROWSE) {
+ if (offset > bar_size - sizeof(uint32_t))
+ goto error_out;
+ /* Starting offset to browse */
+ idiag.offset.last_rd = offset;
+ } else if (acc_range > SINGLE_WORD) {
+ if (offset + acc_range * sizeof(uint32_t) > bar_size)
+ goto error_out;
+ /* Starting offset to browse */
+ idiag.offset.last_rd = offset;
+ } else if (acc_range != SINGLE_WORD)
+ goto error_out;
+ } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) {
+ /* Sanity check on PCI bar write command line arguments */
+ if (rc != LPFC_PCI_BAR_WR_CMD_ARG)
+ goto error_out;
+ /* Write command to PCI bar space, read-modify-write */
+ acc_range = SINGLE_WORD;
+ value = idiag.cmd.data[IDIAG_BARACC_REG_VAL_INDX];
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR) {
+ writel(value, mem_mapped_bar + offset);
+ readl(mem_mapped_bar + offset);
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST) {
+ u32val = readl(mem_mapped_bar + offset);
+ u32val |= value;
+ writel(u32val, mem_mapped_bar + offset);
+ readl(mem_mapped_bar + offset);
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) {
+ u32val = readl(mem_mapped_bar + offset);
+ u32val &= ~value;
+ writel(u32val, mem_mapped_bar + offset);
+ readl(mem_mapped_bar + offset);
+ }
+ } else
+ /* All other opecodes are illegal for now */
+ goto error_out;
+
+ return nbytes;
+error_out:
+ memset(&idiag, 0, sizeof(idiag));
+ return -EINVAL;
+}
+
+/**
* lpfc_idiag_queinfo_read - idiag debugfs read queue information
* @file: The file pointer to read from.
* @buf: The buffer to copy the data to.
@@ -1871,8 +2183,8 @@ lpfc_idiag_queacc_read(struct file *file, char __user *buf, size_t nbytes,
return 0;
if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) {
- index = idiag.cmd.data[2];
- count = idiag.cmd.data[3];
+ index = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX];
+ count = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX];
pque = (struct lpfc_queue *)idiag.ptr_private;
} else
return 0;
@@ -1944,12 +2256,12 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
return rc;
/* Get and sanity check on command feilds */
- quetp = idiag.cmd.data[0];
- queid = idiag.cmd.data[1];
- index = idiag.cmd.data[2];
- count = idiag.cmd.data[3];
- offset = idiag.cmd.data[4];
- value = idiag.cmd.data[5];
+ quetp = idiag.cmd.data[IDIAG_QUEACC_QUETP_INDX];
+ queid = idiag.cmd.data[IDIAG_QUEACC_QUEID_INDX];
+ index = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX];
+ count = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX];
+ offset = idiag.cmd.data[IDIAG_QUEACC_OFFST_INDX];
+ value = idiag.cmd.data[IDIAG_QUEACC_VALUE_INDX];
/* Sanity check on command line arguments */
if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR ||
@@ -2218,7 +2530,7 @@ lpfc_idiag_drbacc_read(struct file *file, char __user *buf, size_t nbytes,
return 0;
if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD)
- drb_reg_id = idiag.cmd.data[0];
+ drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX];
else
return 0;
@@ -2257,7 +2569,7 @@ lpfc_idiag_drbacc_write(struct file *file, const char __user *buf,
{
struct lpfc_debug *debug = file->private_data;
struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
- uint32_t drb_reg_id, value, reg_val;
+ uint32_t drb_reg_id, value, reg_val = 0;
void __iomem *drb_reg;
int rc;
@@ -2269,8 +2581,8 @@ lpfc_idiag_drbacc_write(struct file *file, const char __user *buf,
return rc;
/* Sanity check on command line arguments */
- drb_reg_id = idiag.cmd.data[0];
- value = idiag.cmd.data[1];
+ drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX];
+ value = idiag.cmd.data[IDIAG_DRBACC_VALUE_INDX];
if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR ||
idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST ||
@@ -2330,6 +2642,679 @@ error_out:
return -EINVAL;
}
+/**
+ * lpfc_idiag_ctlacc_read_reg - idiag debugfs read a control registers
+ * @phba: The pointer to hba structure.
+ * @pbuffer: The pointer to the buffer to copy the data to.
+ * @len: The lenght of bytes to copied.
+ * @drbregid: The id to doorbell registers.
+ *
+ * Description:
+ * This routine reads a control register and copies its content to the
+ * user buffer pointed to by @pbuffer.
+ *
+ * Returns:
+ * This function returns the amount of data that was copied into @pbuffer.
+ **/
+static int
+lpfc_idiag_ctlacc_read_reg(struct lpfc_hba *phba, char *pbuffer,
+ int len, uint32_t ctlregid)
+{
+
+ if (!pbuffer)
+ return 0;
+
+ switch (ctlregid) {
+ case LPFC_CTL_PORT_SEM:
+ len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ "Port SemReg: 0x%08x\n",
+ readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_SEM_OFFSET));
+ break;
+ case LPFC_CTL_PORT_STA:
+ len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ "Port StaReg: 0x%08x\n",
+ readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_STA_OFFSET));
+ break;
+ case LPFC_CTL_PORT_CTL:
+ len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ "Port CtlReg: 0x%08x\n",
+ readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_CTL_OFFSET));
+ break;
+ case LPFC_CTL_PORT_ER1:
+ len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ "Port Er1Reg: 0x%08x\n",
+ readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_ER1_OFFSET));
+ break;
+ case LPFC_CTL_PORT_ER2:
+ len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ "Port Er2Reg: 0x%08x\n",
+ readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_ER2_OFFSET));
+ break;
+ case LPFC_CTL_PDEV_CTL:
+ len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ "PDev CtlReg: 0x%08x\n",
+ readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PDEV_CTL_OFFSET));
+ break;
+ default:
+ break;
+ }
+ return len;
+}
+
+/**
+ * lpfc_idiag_ctlacc_read - idiag debugfs read port and device control register
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba port and device registers according
+ * to the idiag command, and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_ctlacc_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ uint32_t ctl_reg_id, i;
+ char *pbuffer;
+ int len = 0;
+
+ /* This is a user read operation */
+ debug->op = LPFC_IDIAG_OP_RD;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_CTL_ACC_BUF_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+
+ if (*ppos)
+ return 0;
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD)
+ ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX];
+ else
+ return 0;
+
+ if (ctl_reg_id == LPFC_CTL_ACC_ALL)
+ for (i = 1; i <= LPFC_CTL_MAX; i++)
+ len = lpfc_idiag_ctlacc_read_reg(phba,
+ pbuffer, len, i);
+ else
+ len = lpfc_idiag_ctlacc_read_reg(phba,
+ pbuffer, len, ctl_reg_id);
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_ctlacc_write - Syntax check and set up idiag ctlacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for port and device control register read (dump)
+ * or write (set) command accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_ctlacc_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ uint32_t ctl_reg_id, value, reg_val = 0;
+ void __iomem *ctl_reg;
+ int rc;
+
+ /* This is a user write operation */
+ debug->op = LPFC_IDIAG_OP_WR;
+
+ rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+ if (rc < 0)
+ return rc;
+
+ /* Sanity check on command line arguments */
+ ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX];
+ value = idiag.cmd.data[IDIAG_CTLACC_VALUE_INDX];
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
+ if (rc != LPFC_CTL_ACC_WR_CMD_ARG)
+ goto error_out;
+ if (ctl_reg_id > LPFC_CTL_MAX)
+ goto error_out;
+ } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD) {
+ if (rc != LPFC_CTL_ACC_RD_CMD_ARG)
+ goto error_out;
+ if ((ctl_reg_id > LPFC_CTL_MAX) &&
+ (ctl_reg_id != LPFC_CTL_ACC_ALL))
+ goto error_out;
+ } else
+ goto error_out;
+
+ /* Perform the write access operation */
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
+ switch (ctl_reg_id) {
+ case LPFC_CTL_PORT_SEM:
+ ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_SEM_OFFSET;
+ break;
+ case LPFC_CTL_PORT_STA:
+ ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_STA_OFFSET;
+ break;
+ case LPFC_CTL_PORT_CTL:
+ ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_CTL_OFFSET;
+ break;
+ case LPFC_CTL_PORT_ER1:
+ ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_ER1_OFFSET;
+ break;
+ case LPFC_CTL_PORT_ER2:
+ ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_ER2_OFFSET;
+ break;
+ case LPFC_CTL_PDEV_CTL:
+ ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PDEV_CTL_OFFSET;
+ break;
+ default:
+ goto error_out;
+ }
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR)
+ reg_val = value;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST) {
+ reg_val = readl(ctl_reg);
+ reg_val |= value;
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
+ reg_val = readl(ctl_reg);
+ reg_val &= ~value;
+ }
+ writel(reg_val, ctl_reg);
+ readl(ctl_reg); /* flush */
+ }
+ return nbytes;
+
+error_out:
+ /* Clean out command structure on command error out */
+ memset(&idiag, 0, sizeof(idiag));
+ return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_mbxacc_get_setup - idiag debugfs get mailbox access setup
+ * @phba: Pointer to HBA context object.
+ * @pbuffer: Pointer to data buffer.
+ *
+ * Description:
+ * This routine gets the driver mailbox access debugfs setup information.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static int
+lpfc_idiag_mbxacc_get_setup(struct lpfc_hba *phba, char *pbuffer)
+{
+ uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd;
+ int len = 0;
+
+ mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
+ mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
+ mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
+ mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
+
+ len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+ "mbx_dump_map: 0x%08x\n", mbx_dump_map);
+ len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+ "mbx_dump_cnt: %04d\n", mbx_dump_cnt);
+ len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+ "mbx_word_cnt: %04d\n", mbx_word_cnt);
+ len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+ "mbx_mbox_cmd: 0x%02x\n", mbx_mbox_cmd);
+
+ return len;
+}
+
+/**
+ * lpfc_idiag_mbxacc_read - idiag debugfs read on mailbox access
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba driver mailbox access debugfs setup
+ * information.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_mbxacc_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ char *pbuffer;
+ int len = 0;
+
+ /* This is a user read operation */
+ debug->op = LPFC_IDIAG_OP_RD;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_MBX_ACC_BUF_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+
+ if (*ppos)
+ return 0;
+
+ if ((idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP) &&
+ (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP))
+ return 0;
+
+ len = lpfc_idiag_mbxacc_get_setup(phba, pbuffer);
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_mbxacc_write - Syntax check and set up idiag mbxacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for driver mailbox command (dump) and sets up the
+ * necessary states in the idiag command struct accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_mbxacc_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd;
+ int rc;
+
+ /* This is a user write operation */
+ debug->op = LPFC_IDIAG_OP_WR;
+
+ rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+ if (rc < 0)
+ return rc;
+
+ /* Sanity check on command line arguments */
+ mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
+ mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
+ mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
+ mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_MBXACC_DP) {
+ if (!(mbx_dump_map & LPFC_MBX_DMP_MBX_ALL))
+ goto error_out;
+ if ((mbx_dump_map & ~LPFC_MBX_DMP_MBX_ALL) &&
+ (mbx_dump_map != LPFC_MBX_DMP_ALL))
+ goto error_out;
+ if (mbx_word_cnt > sizeof(MAILBOX_t))
+ goto error_out;
+ } else if (idiag.cmd.opcode == LPFC_IDIAG_BSG_MBXACC_DP) {
+ if (!(mbx_dump_map & LPFC_BSG_DMP_MBX_ALL))
+ goto error_out;
+ if ((mbx_dump_map & ~LPFC_BSG_DMP_MBX_ALL) &&
+ (mbx_dump_map != LPFC_MBX_DMP_ALL))
+ goto error_out;
+ if (mbx_word_cnt > (BSG_MBOX_SIZE)/4)
+ goto error_out;
+ if (mbx_mbox_cmd != 0x9b)
+ goto error_out;
+ } else
+ goto error_out;
+
+ if (mbx_word_cnt == 0)
+ goto error_out;
+ if (rc != LPFC_MBX_DMP_ARG)
+ goto error_out;
+ if (mbx_mbox_cmd & ~0xff)
+ goto error_out;
+
+ /* condition for stop mailbox dump */
+ if (mbx_dump_cnt == 0)
+ goto reset_out;
+
+ return nbytes;
+
+reset_out:
+ /* Clean out command structure on command error out */
+ memset(&idiag, 0, sizeof(idiag));
+ return nbytes;
+
+error_out:
+ /* Clean out command structure on command error out */
+ memset(&idiag, 0, sizeof(idiag));
+ return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_extacc_avail_get - get the available extents information
+ * @phba: pointer to lpfc hba data structure.
+ * @pbuffer: pointer to internal buffer.
+ * @len: length into the internal buffer data has been copied.
+ *
+ * Description:
+ * This routine is to get the available extent information.
+ *
+ * Returns:
+ * overall lenth of the data read into the internal buffer.
+ **/
+static int
+lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
+{
+ uint16_t ext_cnt, ext_size;
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\nAvailable Extents Information:\n");
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tPort Available VPI extents: ");
+ lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VPI,
+ &ext_cnt, &ext_size);
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Count %3d, Size %3d\n", ext_cnt, ext_size);
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tPort Available VFI extents: ");
+ lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VFI,
+ &ext_cnt, &ext_size);
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Count %3d, Size %3d\n", ext_cnt, ext_size);
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tPort Available RPI extents: ");
+ lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_RPI,
+ &ext_cnt, &ext_size);
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Count %3d, Size %3d\n", ext_cnt, ext_size);
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tPort Available XRI extents: ");
+ lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_XRI,
+ &ext_cnt, &ext_size);
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Count %3d, Size %3d\n", ext_cnt, ext_size);
+
+ return len;
+}
+
+/**
+ * lpfc_idiag_extacc_alloc_get - get the allocated extents information
+ * @phba: pointer to lpfc hba data structure.
+ * @pbuffer: pointer to internal buffer.
+ * @len: length into the internal buffer data has been copied.
+ *
+ * Description:
+ * This routine is to get the allocated extent information.
+ *
+ * Returns:
+ * overall lenth of the data read into the internal buffer.
+ **/
+static int
+lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len)
+{
+ uint16_t ext_cnt, ext_size;
+ int rc;
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\nAllocated Extents Information:\n");
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tHost Allocated VPI extents: ");
+ rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VPI,
+ &ext_cnt, &ext_size);
+ if (!rc)
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Port %d Extent %3d, Size %3d\n",
+ phba->brd_no, ext_cnt, ext_size);
+ else
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "N/A\n");
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tHost Allocated VFI extents: ");
+ rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VFI,
+ &ext_cnt, &ext_size);
+ if (!rc)
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Port %d Extent %3d, Size %3d\n",
+ phba->brd_no, ext_cnt, ext_size);
+ else
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "N/A\n");
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tHost Allocated RPI extents: ");
+ rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_RPI,
+ &ext_cnt, &ext_size);
+ if (!rc)
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Port %d Extent %3d, Size %3d\n",
+ phba->brd_no, ext_cnt, ext_size);
+ else
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "N/A\n");
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tHost Allocated XRI extents: ");
+ rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_XRI,
+ &ext_cnt, &ext_size);
+ if (!rc)
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Port %d Extent %3d, Size %3d\n",
+ phba->brd_no, ext_cnt, ext_size);
+ else
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "N/A\n");
+
+ return len;
+}
+
+/**
+ * lpfc_idiag_extacc_drivr_get - get driver extent information
+ * @phba: pointer to lpfc hba data structure.
+ * @pbuffer: pointer to internal buffer.
+ * @len: length into the internal buffer data has been copied.
+ *
+ * Description:
+ * This routine is to get the driver extent information.
+ *
+ * Returns:
+ * overall lenth of the data read into the internal buffer.
+ **/
+static int
+lpfc_idiag_extacc_drivr_get(struct lpfc_hba *phba, char *pbuffer, int len)
+{
+ struct lpfc_rsrc_blks *rsrc_blks;
+ int index;
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\nDriver Extents Information:\n");
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tVPI extents:\n");
+ index = 0;
+ list_for_each_entry(rsrc_blks, &phba->lpfc_vpi_blk_list, list) {
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\t\tBlock %3d: Start %4d, Count %4d\n",
+ index, rsrc_blks->rsrc_start,
+ rsrc_blks->rsrc_size);
+ index++;
+ }
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tVFI extents:\n");
+ index = 0;
+ list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_vfi_blk_list,
+ list) {
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\t\tBlock %3d: Start %4d, Count %4d\n",
+ index, rsrc_blks->rsrc_start,
+ rsrc_blks->rsrc_size);
+ index++;
+ }
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tRPI extents:\n");
+ index = 0;
+ list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_rpi_blk_list,
+ list) {
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\t\tBlock %3d: Start %4d, Count %4d\n",
+ index, rsrc_blks->rsrc_start,
+ rsrc_blks->rsrc_size);
+ index++;
+ }
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tXRI extents:\n");
+ index = 0;
+ list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_xri_blk_list,
+ list) {
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\t\tBlock %3d: Start %4d, Count %4d\n",
+ index, rsrc_blks->rsrc_start,
+ rsrc_blks->rsrc_size);
+ index++;
+ }
+
+ return len;
+}
+
+/**
+ * lpfc_idiag_extacc_write - Syntax check and set up idiag extacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for extent information access commands and sets
+ * up the necessary states in the idiag command struct accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_extacc_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ uint32_t ext_map;
+ int rc;
+
+ /* This is a user write operation */
+ debug->op = LPFC_IDIAG_OP_WR;
+
+ rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+ if (rc < 0)
+ return rc;
+
+ ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX];
+
+ if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD)
+ goto error_out;
+ if (rc != LPFC_EXT_ACC_CMD_ARG)
+ goto error_out;
+ if (!(ext_map & LPFC_EXT_ACC_ALL))
+ goto error_out;
+
+ return nbytes;
+error_out:
+ /* Clean out command structure on command error out */
+ memset(&idiag, 0, sizeof(idiag));
+ return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_extacc_read - idiag debugfs read access to extent information
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the proper extent information according to
+ * the idiag command, and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_extacc_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ char *pbuffer;
+ uint32_t ext_map;
+ int len = 0;
+
+ /* This is a user read operation */
+ debug->op = LPFC_IDIAG_OP_RD;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_EXT_ACC_BUF_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+ if (*ppos)
+ return 0;
+ if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD)
+ return 0;
+
+ ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX];
+ if (ext_map & LPFC_EXT_ACC_AVAIL)
+ len = lpfc_idiag_extacc_avail_get(phba, pbuffer, len);
+ if (ext_map & LPFC_EXT_ACC_ALLOC)
+ len = lpfc_idiag_extacc_alloc_get(phba, pbuffer, len);
+ if (ext_map & LPFC_EXT_ACC_DRIVR)
+ len = lpfc_idiag_extacc_drivr_get(phba, pbuffer, len);
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
#undef lpfc_debugfs_op_disc_trc
static const struct file_operations lpfc_debugfs_op_disc_trc = {
.owner = THIS_MODULE,
@@ -2420,6 +3405,16 @@ static const struct file_operations lpfc_idiag_op_pciCfg = {
.release = lpfc_idiag_cmd_release,
};
+#undef lpfc_idiag_op_barAcc
+static const struct file_operations lpfc_idiag_op_barAcc = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_idiag_baracc_read,
+ .write = lpfc_idiag_baracc_write,
+ .release = lpfc_idiag_cmd_release,
+};
+
#undef lpfc_idiag_op_queInfo
static const struct file_operations lpfc_idiag_op_queInfo = {
.owner = THIS_MODULE,
@@ -2428,7 +3423,7 @@ static const struct file_operations lpfc_idiag_op_queInfo = {
.release = lpfc_idiag_release,
};
-#undef lpfc_idiag_op_queacc
+#undef lpfc_idiag_op_queAcc
static const struct file_operations lpfc_idiag_op_queAcc = {
.owner = THIS_MODULE,
.open = lpfc_idiag_open,
@@ -2438,7 +3433,7 @@ static const struct file_operations lpfc_idiag_op_queAcc = {
.release = lpfc_idiag_cmd_release,
};
-#undef lpfc_idiag_op_drbacc
+#undef lpfc_idiag_op_drbAcc
static const struct file_operations lpfc_idiag_op_drbAcc = {
.owner = THIS_MODULE,
.open = lpfc_idiag_open,
@@ -2448,8 +3443,234 @@ static const struct file_operations lpfc_idiag_op_drbAcc = {
.release = lpfc_idiag_cmd_release,
};
+#undef lpfc_idiag_op_ctlAcc
+static const struct file_operations lpfc_idiag_op_ctlAcc = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_idiag_ctlacc_read,
+ .write = lpfc_idiag_ctlacc_write,
+ .release = lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_mbxAcc
+static const struct file_operations lpfc_idiag_op_mbxAcc = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_idiag_mbxacc_read,
+ .write = lpfc_idiag_mbxacc_write,
+ .release = lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_extAcc
+static const struct file_operations lpfc_idiag_op_extAcc = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_idiag_extacc_read,
+ .write = lpfc_idiag_extacc_write,
+ .release = lpfc_idiag_cmd_release,
+};
+
#endif
+/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * Description:
+ * This routine dump a bsg pass-through non-embedded mailbox command with
+ * external buffer.
+ **/
+void
+lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp,
+ enum mbox_type mbox_tp, enum dma_type dma_tp,
+ enum sta_type sta_tp,
+ struct lpfc_dmabuf *dmabuf, uint32_t ext_buf)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint32_t *mbx_mbox_cmd, *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt;
+ char line_buf[LPFC_MBX_ACC_LBUF_SZ];
+ int len = 0;
+ uint32_t do_dump = 0;
+ uint32_t *pword;
+ uint32_t i;
+
+ if (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP)
+ return;
+
+ mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
+ mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
+ mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
+ mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
+
+ if (!(*mbx_dump_map & LPFC_MBX_DMP_ALL) ||
+ (*mbx_dump_cnt == 0) ||
+ (*mbx_word_cnt == 0))
+ return;
+
+ if (*mbx_mbox_cmd != 0x9B)
+ return;
+
+ if ((mbox_tp == mbox_rd) && (dma_tp == dma_mbox)) {
+ if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_MBX) {
+ do_dump |= LPFC_BSG_DMP_MBX_RD_MBX;
+ printk(KERN_ERR "\nRead mbox command (x%x), "
+ "nemb:0x%x, extbuf_cnt:%d:\n",
+ sta_tp, nemb_tp, ext_buf);
+ }
+ }
+ if ((mbox_tp == mbox_rd) && (dma_tp == dma_ebuf)) {
+ if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_BUF) {
+ do_dump |= LPFC_BSG_DMP_MBX_RD_BUF;
+ printk(KERN_ERR "\nRead mbox buffer (x%x), "
+ "nemb:0x%x, extbuf_seq:%d:\n",
+ sta_tp, nemb_tp, ext_buf);
+ }
+ }
+ if ((mbox_tp == mbox_wr) && (dma_tp == dma_mbox)) {
+ if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_MBX) {
+ do_dump |= LPFC_BSG_DMP_MBX_WR_MBX;
+ printk(KERN_ERR "\nWrite mbox command (x%x), "
+ "nemb:0x%x, extbuf_cnt:%d:\n",
+ sta_tp, nemb_tp, ext_buf);
+ }
+ }
+ if ((mbox_tp == mbox_wr) && (dma_tp == dma_ebuf)) {
+ if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_BUF) {
+ do_dump |= LPFC_BSG_DMP_MBX_WR_BUF;
+ printk(KERN_ERR "\nWrite mbox buffer (x%x), "
+ "nemb:0x%x, extbuf_seq:%d:\n",
+ sta_tp, nemb_tp, ext_buf);
+ }
+ }
+
+ /* dump buffer content */
+ if (do_dump) {
+ pword = (uint32_t *)dmabuf->virt;
+ for (i = 0; i < *mbx_word_cnt; i++) {
+ if (!(i % 8)) {
+ if (i != 0)
+ printk(KERN_ERR "%s\n", line_buf);
+ len = 0;
+ len += snprintf(line_buf+len,
+ LPFC_MBX_ACC_LBUF_SZ-len,
+ "%03d: ", i);
+ }
+ len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
+ "%08x ", (uint32_t)*pword);
+ pword++;
+ }
+ if ((i - 1) % 8)
+ printk(KERN_ERR "%s\n", line_buf);
+ (*mbx_dump_cnt)--;
+ }
+
+ /* Clean out command structure on reaching dump count */
+ if (*mbx_dump_cnt == 0)
+ memset(&idiag, 0, sizeof(idiag));
+ return;
+#endif
+}
+
+/* lpfc_idiag_mbxacc_dump_issue_mbox - idiag debugfs dump issue mailbox command
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * Description:
+ * This routine dump a pass-through non-embedded mailbox command from issue
+ * mailbox command.
+ **/
+void
+lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint32_t *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt, *mbx_mbox_cmd;
+ char line_buf[LPFC_MBX_ACC_LBUF_SZ];
+ int len = 0;
+ uint32_t *pword;
+ uint8_t *pbyte;
+ uint32_t i, j;
+
+ if (idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP)
+ return;
+
+ mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
+ mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
+ mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
+ mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
+
+ if (!(*mbx_dump_map & LPFC_MBX_DMP_MBX_ALL) ||
+ (*mbx_dump_cnt == 0) ||
+ (*mbx_word_cnt == 0))
+ return;
+
+ if ((*mbx_mbox_cmd != LPFC_MBX_ALL_CMD) &&
+ (*mbx_mbox_cmd != pmbox->mbxCommand))
+ return;
+
+ /* dump buffer content */
+ if (*mbx_dump_map & LPFC_MBX_DMP_MBX_WORD) {
+ printk(KERN_ERR "Mailbox command:0x%x dump by word:\n",
+ pmbox->mbxCommand);
+ pword = (uint32_t *)pmbox;
+ for (i = 0; i < *mbx_word_cnt; i++) {
+ if (!(i % 8)) {
+ if (i != 0)
+ printk(KERN_ERR "%s\n", line_buf);
+ len = 0;
+ memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
+ len += snprintf(line_buf+len,
+ LPFC_MBX_ACC_LBUF_SZ-len,
+ "%03d: ", i);
+ }
+ len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
+ "%08x ",
+ ((uint32_t)*pword) & 0xffffffff);
+ pword++;
+ }
+ if ((i - 1) % 8)
+ printk(KERN_ERR "%s\n", line_buf);
+ printk(KERN_ERR "\n");
+ }
+ if (*mbx_dump_map & LPFC_MBX_DMP_MBX_BYTE) {
+ printk(KERN_ERR "Mailbox command:0x%x dump by byte:\n",
+ pmbox->mbxCommand);
+ pbyte = (uint8_t *)pmbox;
+ for (i = 0; i < *mbx_word_cnt; i++) {
+ if (!(i % 8)) {
+ if (i != 0)
+ printk(KERN_ERR "%s\n", line_buf);
+ len = 0;
+ memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
+ len += snprintf(line_buf+len,
+ LPFC_MBX_ACC_LBUF_SZ-len,
+ "%03d: ", i);
+ }
+ for (j = 0; j < 4; j++) {
+ len += snprintf(line_buf+len,
+ LPFC_MBX_ACC_LBUF_SZ-len,
+ "%02x",
+ ((uint8_t)*pbyte) & 0xff);
+ pbyte++;
+ }
+ len += snprintf(line_buf+len,
+ LPFC_MBX_ACC_LBUF_SZ-len, " ");
+ }
+ if ((i - 1) % 8)
+ printk(KERN_ERR "%s\n", line_buf);
+ printk(KERN_ERR "\n");
+ }
+ (*mbx_dump_cnt)--;
+
+ /* Clean out command structure on reaching dump count */
+ if (*mbx_dump_cnt == 0)
+ memset(&idiag, 0, sizeof(idiag));
+ return;
+#endif
+}
+
/**
* lpfc_debugfs_initialize - Initialize debugfs for a vport
* @vport: The vport pointer to initialize.
@@ -2673,7 +3894,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
vport, &lpfc_debugfs_op_nodelist);
if (!vport->debug_nodelist) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Can't create debugfs nodelist\n");
+ "2985 Can't create debugfs nodelist\n");
goto debug_failed;
}
@@ -2710,6 +3931,20 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
idiag.offset.last_rd = 0;
}
+ /* iDiag PCI BAR access */
+ snprintf(name, sizeof(name), "barAcc");
+ if (!phba->idiag_bar_acc) {
+ phba->idiag_bar_acc =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->idiag_root, phba, &lpfc_idiag_op_barAcc);
+ if (!phba->idiag_bar_acc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3056 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ idiag.offset.last_rd = 0;
+ }
+
/* iDiag get PCI function queue information */
snprintf(name, sizeof(name), "queInfo");
if (!phba->idiag_que_info) {
@@ -2749,6 +3984,50 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
}
+ /* iDiag access PCI function control registers */
+ snprintf(name, sizeof(name), "ctlAcc");
+ if (!phba->idiag_ctl_acc) {
+ phba->idiag_ctl_acc =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->idiag_root, phba, &lpfc_idiag_op_ctlAcc);
+ if (!phba->idiag_ctl_acc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2981 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ }
+
+ /* iDiag access mbox commands */
+ snprintf(name, sizeof(name), "mbxAcc");
+ if (!phba->idiag_mbx_acc) {
+ phba->idiag_mbx_acc =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->idiag_root, phba, &lpfc_idiag_op_mbxAcc);
+ if (!phba->idiag_mbx_acc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2980 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ }
+
+ /* iDiag extents access commands */
+ if (phba->sli4_hba.extents_in_use) {
+ snprintf(name, sizeof(name), "extAcc");
+ if (!phba->idiag_ext_acc) {
+ phba->idiag_ext_acc =
+ debugfs_create_file(name,
+ S_IFREG|S_IRUGO|S_IWUSR,
+ phba->idiag_root, phba,
+ &lpfc_idiag_op_extAcc);
+ if (!phba->idiag_ext_acc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2986 Cant create "
+ "idiag debugfs\n");
+ goto debug_failed;
+ }
+ }
+ }
+
debug_failed:
return;
#endif
@@ -2783,7 +4062,6 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(vport->debug_nodelist); /* nodelist */
vport->debug_nodelist = NULL;
}
-
if (vport->vport_debugfs_root) {
debugfs_remove(vport->vport_debugfs_root); /* vportX */
vport->vport_debugfs_root = NULL;
@@ -2827,6 +4105,21 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
* iDiag release
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (phba->idiag_ext_acc) {
+ /* iDiag extAcc */
+ debugfs_remove(phba->idiag_ext_acc);
+ phba->idiag_ext_acc = NULL;
+ }
+ if (phba->idiag_mbx_acc) {
+ /* iDiag mbxAcc */
+ debugfs_remove(phba->idiag_mbx_acc);
+ phba->idiag_mbx_acc = NULL;
+ }
+ if (phba->idiag_ctl_acc) {
+ /* iDiag ctlAcc */
+ debugfs_remove(phba->idiag_ctl_acc);
+ phba->idiag_ctl_acc = NULL;
+ }
if (phba->idiag_drb_acc) {
/* iDiag drbAcc */
debugfs_remove(phba->idiag_drb_acc);
@@ -2842,6 +4135,11 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->idiag_que_info);
phba->idiag_que_info = NULL;
}
+ if (phba->idiag_bar_acc) {
+ /* iDiag barAcc */
+ debugfs_remove(phba->idiag_bar_acc);
+ phba->idiag_bar_acc = NULL;
+ }
if (phba->idiag_pci_cfg) {
/* iDiag pciCfg */
debugfs_remove(phba->idiag_pci_cfg);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 6525a5e..f83bd94 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -39,14 +39,51 @@
/* hbqinfo output buffer size */
#define LPFC_HBQINFO_SIZE 8192
+/*
+ * For SLI4 iDiag debugfs diagnostics tool
+ */
+
/* pciConf */
#define LPFC_PCI_CFG_BROWSE 0xffff
#define LPFC_PCI_CFG_RD_CMD_ARG 2
#define LPFC_PCI_CFG_WR_CMD_ARG 3
#define LPFC_PCI_CFG_SIZE 4096
-#define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2)
#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4)
+#define IDIAG_PCICFG_WHERE_INDX 0
+#define IDIAG_PCICFG_COUNT_INDX 1
+#define IDIAG_PCICFG_VALUE_INDX 2
+
+/* barAcc */
+#define LPFC_PCI_BAR_BROWSE 0xffff
+#define LPFC_PCI_BAR_RD_CMD_ARG 3
+#define LPFC_PCI_BAR_WR_CMD_ARG 3
+
+#define LPFC_PCI_IF0_BAR0_SIZE (1024 * 16)
+#define LPFC_PCI_IF0_BAR1_SIZE (1024 * 128)
+#define LPFC_PCI_IF0_BAR2_SIZE (1024 * 128)
+#define LPFC_PCI_IF2_BAR0_SIZE (1024 * 32)
+
+#define LPFC_PCI_BAR_RD_BUF_SIZE 4096
+#define LPFC_PCI_BAR_RD_SIZE (LPFC_PCI_BAR_RD_BUF_SIZE/4)
+
+#define LPFC_PCI_IF0_BAR0_RD_SIZE (LPFC_PCI_IF0_BAR0_SIZE/4)
+#define LPFC_PCI_IF0_BAR1_RD_SIZE (LPFC_PCI_IF0_BAR1_SIZE/4)
+#define LPFC_PCI_IF0_BAR2_RD_SIZE (LPFC_PCI_IF0_BAR2_SIZE/4)
+#define LPFC_PCI_IF2_BAR0_RD_SIZE (LPFC_PCI_IF2_BAR0_SIZE/4)
+
+#define IDIAG_BARACC_BAR_NUM_INDX 0
+#define IDIAG_BARACC_OFF_SET_INDX 1
+#define IDIAG_BARACC_ACC_MOD_INDX 2
+#define IDIAG_BARACC_REG_VAL_INDX 2
+#define IDIAG_BARACC_BAR_SZE_INDX 3
+
+#define IDIAG_BARACC_BAR_0 0
+#define IDIAG_BARACC_BAR_1 1
+#define IDIAG_BARACC_BAR_2 2
+
+#define SINGLE_WORD 1
+
/* queue info */
#define LPFC_QUE_INFO_GET_BUF_SIZE 4096
@@ -63,7 +100,14 @@
#define LPFC_IDIAG_WQ 4
#define LPFC_IDIAG_RQ 5
-/* doorbell acc */
+#define IDIAG_QUEACC_QUETP_INDX 0
+#define IDIAG_QUEACC_QUEID_INDX 1
+#define IDIAG_QUEACC_INDEX_INDX 2
+#define IDIAG_QUEACC_COUNT_INDX 3
+#define IDIAG_QUEACC_OFFST_INDX 4
+#define IDIAG_QUEACC_VALUE_INDX 5
+
+/* doorbell register acc */
#define LPFC_DRB_ACC_ALL 0xffff
#define LPFC_DRB_ACC_RD_CMD_ARG 1
#define LPFC_DRB_ACC_WR_CMD_ARG 2
@@ -76,6 +120,67 @@
#define LPFC_DRB_MAX 4
+#define IDIAG_DRBACC_REGID_INDX 0
+#define IDIAG_DRBACC_VALUE_INDX 1
+
+/* control register acc */
+#define LPFC_CTL_ACC_ALL 0xffff
+#define LPFC_CTL_ACC_RD_CMD_ARG 1
+#define LPFC_CTL_ACC_WR_CMD_ARG 2
+#define LPFC_CTL_ACC_BUF_SIZE 256
+
+#define LPFC_CTL_PORT_SEM 1
+#define LPFC_CTL_PORT_STA 2
+#define LPFC_CTL_PORT_CTL 3
+#define LPFC_CTL_PORT_ER1 4
+#define LPFC_CTL_PORT_ER2 5
+#define LPFC_CTL_PDEV_CTL 6
+
+#define LPFC_CTL_MAX 6
+
+#define IDIAG_CTLACC_REGID_INDX 0
+#define IDIAG_CTLACC_VALUE_INDX 1
+
+/* mailbox access */
+#define LPFC_MBX_DMP_ARG 4
+
+#define LPFC_MBX_ACC_BUF_SIZE 512
+#define LPFC_MBX_ACC_LBUF_SZ 128
+
+#define LPFC_MBX_DMP_MBX_WORD 0x00000001
+#define LPFC_MBX_DMP_MBX_BYTE 0x00000002
+#define LPFC_MBX_DMP_MBX_ALL (LPFC_MBX_DMP_MBX_WORD | LPFC_MBX_DMP_MBX_BYTE)
+
+#define LPFC_BSG_DMP_MBX_RD_MBX 0x00000001
+#define LPFC_BSG_DMP_MBX_RD_BUF 0x00000002
+#define LPFC_BSG_DMP_MBX_WR_MBX 0x00000004
+#define LPFC_BSG_DMP_MBX_WR_BUF 0x00000008
+#define LPFC_BSG_DMP_MBX_ALL (LPFC_BSG_DMP_MBX_RD_MBX | \
+ LPFC_BSG_DMP_MBX_RD_BUF | \
+ LPFC_BSG_DMP_MBX_WR_MBX | \
+ LPFC_BSG_DMP_MBX_WR_BUF)
+
+#define LPFC_MBX_DMP_ALL 0xffff
+#define LPFC_MBX_ALL_CMD 0xff
+
+#define IDIAG_MBXACC_MBCMD_INDX 0
+#define IDIAG_MBXACC_DPMAP_INDX 1
+#define IDIAG_MBXACC_DPCNT_INDX 2
+#define IDIAG_MBXACC_WDCNT_INDX 3
+
+/* extents access */
+#define LPFC_EXT_ACC_CMD_ARG 1
+#define LPFC_EXT_ACC_BUF_SIZE 4096
+
+#define LPFC_EXT_ACC_AVAIL 0x1
+#define LPFC_EXT_ACC_ALLOC 0x2
+#define LPFC_EXT_ACC_DRIVR 0x4
+#define LPFC_EXT_ACC_ALL (LPFC_EXT_ACC_DRIVR | \
+ LPFC_EXT_ACC_AVAIL | \
+ LPFC_EXT_ACC_ALLOC)
+
+#define IDIAG_EXTACC_EXMAP_INDX 0
+
#define SIZE_U8 sizeof(uint8_t)
#define SIZE_U16 sizeof(uint16_t)
#define SIZE_U32 sizeof(uint32_t)
@@ -110,6 +215,11 @@ struct lpfc_idiag_cmd {
#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003
#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004
+#define LPFC_IDIAG_CMD_BARACC_RD 0x00000008
+#define LPFC_IDIAG_CMD_BARACC_WR 0x00000009
+#define LPFC_IDIAG_CMD_BARACC_ST 0x0000000a
+#define LPFC_IDIAG_CMD_BARACC_CL 0x0000000b
+
#define LPFC_IDIAG_CMD_QUEACC_RD 0x00000011
#define LPFC_IDIAG_CMD_QUEACC_WR 0x00000012
#define LPFC_IDIAG_CMD_QUEACC_ST 0x00000013
@@ -119,6 +229,17 @@ struct lpfc_idiag_cmd {
#define LPFC_IDIAG_CMD_DRBACC_WR 0x00000022
#define LPFC_IDIAG_CMD_DRBACC_ST 0x00000023
#define LPFC_IDIAG_CMD_DRBACC_CL 0x00000024
+
+#define LPFC_IDIAG_CMD_CTLACC_RD 0x00000031
+#define LPFC_IDIAG_CMD_CTLACC_WR 0x00000032
+#define LPFC_IDIAG_CMD_CTLACC_ST 0x00000033
+#define LPFC_IDIAG_CMD_CTLACC_CL 0x00000034
+
+#define LPFC_IDIAG_CMD_MBXACC_DP 0x00000041
+#define LPFC_IDIAG_BSG_MBXACC_DP 0x00000042
+
+#define LPFC_IDIAG_CMD_EXTACC_RD 0x00000051
+
uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE];
};
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 32a0845..023da0e 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -647,21 +647,15 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
lpfc_cleanup_pending_mbox(vport);
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->sli_rev == LPFC_SLI_REV4) {
lpfc_sli4_unreg_all_rpis(vport);
-
- if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
- spin_unlock_irq(shost->host_lock);
- }
- /*
- * If VPI is unreged, driver need to do INIT_VPI
- * before re-registering
- */
- if (phba->sli_rev == LPFC_SLI_REV4) {
- spin_lock_irq(shost->host_lock);
+ /*
+ * If VPI is unreged, driver need to do INIT_VPI
+ * before re-registering
+ */
vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
spin_unlock_irq(shost->host_lock);
}
@@ -880,6 +874,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
phba->fcf.current_rec.fcf_indx,
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout);
+ lpfc_sli4_set_fcf_flogi_fail(phba,
+ phba->fcf.current_rec.fcf_indx);
fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
if (rc)
@@ -1096,11 +1092,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Set the fcfi to the fcfi we registered with */
elsiocb->iocb.ulpContext = phba->fcf.fcfi;
}
- } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
- sp->cmn.request_multiple_Nport = 1;
- /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
- icmd->ulpCt_h = 1;
- icmd->ulpCt_l = 0;
+ } else {
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ sp->cmn.request_multiple_Nport = 1;
+ /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
+ icmd->ulpCt_h = 1;
+ icmd->ulpCt_l = 0;
+ } else
+ sp->cmn.request_multiple_Nport = 0;
}
if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
@@ -3656,7 +3655,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
}
icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t);
@@ -3673,7 +3673,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
return 1;
icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
if (mbox)
@@ -3695,7 +3696,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
return 1;
icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
@@ -3781,7 +3783,8 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
icmd = &elsiocb->iocb;
oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
@@ -3853,7 +3856,8 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
icmd = &elsiocb->iocb;
oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
/* Xmit ADISC ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -3931,7 +3935,9 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
icmd = &elsiocb->iocb;
oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+
/* Xmit PRLI ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0131 Xmit PRLI ACC response tag x%x xri x%x, "
@@ -4035,7 +4041,9 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
icmd = &elsiocb->iocb;
oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+
/* Xmit RNID ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0132 Xmit RNID ACC response tag x%x xri x%x\n",
@@ -4163,7 +4171,9 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
if (!elsiocb)
return 1;
- elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri */
+ elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */
+ elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
+
/* Xmit ECHO ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2876 Xmit ECHO ACC response tag x%x xri x%x\n",
@@ -5054,13 +5064,15 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
uint8_t *pcmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_nodelist *ndlp;
- uint16_t xri;
+ uint16_t oxid;
+ uint16_t rxid;
uint32_t cmdsize;
mb = &pmb->u.mb;
ndlp = (struct lpfc_nodelist *) pmb->context2;
- xri = (uint16_t) ((unsigned long)(pmb->context1));
+ rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
+ oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
pmb->context1 = NULL;
pmb->context2 = NULL;
@@ -5082,7 +5094,8 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
icmd = &elsiocb->iocb;
- icmd->ulpContext = xri;
+ icmd->ulpContext = rxid;
+ icmd->unsli3.rcvsli3.ox_id = oxid;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
@@ -5137,13 +5150,16 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
uint8_t *pcmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_nodelist *ndlp;
- uint16_t xri, status;
+ uint16_t status;
+ uint16_t oxid;
+ uint16_t rxid;
uint32_t cmdsize;
mb = &pmb->u.mb;
ndlp = (struct lpfc_nodelist *) pmb->context2;
- xri = (uint16_t) ((unsigned long)(pmb->context1));
+ rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
+ oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
pmb->context1 = NULL;
pmb->context2 = NULL;
@@ -5165,7 +5181,8 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
icmd = &elsiocb->iocb;
- icmd->ulpContext = xri;
+ icmd->ulpContext = rxid;
+ icmd->unsli3.rcvsli3.ox_id = oxid;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
@@ -5238,8 +5255,9 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
if (mbox) {
lpfc_read_lnk_stat(phba, mbox);
- mbox->context1 =
- (void *)((unsigned long) cmdiocb->iocb.ulpContext);
+ mbox->context1 = (void *)((unsigned long)
+ ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
+ cmdiocb->iocb.ulpContext)); /* rx_id */
mbox->context2 = lpfc_nlp_get(ndlp);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
@@ -5314,7 +5332,8 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
pcmd += sizeof(uint32_t); /* Skip past command */
/* use the command's xri in the response */
- elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext;
+ elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */
+ elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
rtv_rsp = (struct RTV_RSP *)pcmd;
@@ -5399,8 +5418,9 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
if (mbox) {
lpfc_read_lnk_stat(phba, mbox);
- mbox->context1 =
- (void *)((unsigned long) cmdiocb->iocb.ulpContext);
+ mbox->context1 = (void *)((unsigned long)
+ ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
+ cmdiocb->iocb.ulpContext)); /* rx_id */
mbox->context2 = lpfc_nlp_get(ndlp);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
@@ -5554,7 +5574,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
icmd = &elsiocb->iocb;
oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
@@ -6586,7 +6607,7 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
{
struct lpfc_vport *vport;
unsigned long flags;
- int i;
+ int i = 0;
/* The physical ports are always vpi 0 - translate is unnecessary. */
if (vpi > 0) {
@@ -6609,7 +6630,7 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
spin_lock_irqsave(&phba->hbalock, flags);
list_for_each_entry(vport, &phba->port_list, listentry) {
- if (vport->vpi == vpi) {
+ if (vport->vpi == i) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return vport;
}
@@ -7787,6 +7808,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
{
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+ uint16_t lxri = 0;
struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
unsigned long iflag = 0;
@@ -7815,7 +7837,12 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
}
}
spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
- sglq_entry = __lpfc_get_active_sglq(phba, xri);
+ lxri = lpfc_sli4_xri_inrange(phba, xri);
+ if (lxri == NO_XRI) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return;
+ }
+ sglq_entry = __lpfc_get_active_sglq(phba, lxri);
if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 18d0dbf..0b47adf 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1109,6 +1109,28 @@ out:
return;
}
+/**
+ * lpfc_sli4_clear_fcf_rr_bmask
+ * @phba pointer to the struct lpfc_hba for this port.
+ * This fucnction resets the round robin bit mask and clears the
+ * fcf priority list. The list deletions are done while holding the
+ * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
+ * from the lpfc_fcf_pri record.
+ **/
+void
+lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
+{
+ struct lpfc_fcf_pri *fcf_pri;
+ struct lpfc_fcf_pri *next_fcf_pri;
+ memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(fcf_pri, next_fcf_pri,
+ &phba->fcf.fcf_pri_list, list) {
+ list_del_init(&fcf_pri->list);
+ fcf_pri->fcf_rec.flag = 0;
+ }
+ spin_unlock_irq(&phba->hbalock);
+}
static void
lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
@@ -1130,7 +1152,8 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
spin_unlock_irq(&phba->hbalock);
/* If there is a pending FCoE event, restart FCF table scan. */
- if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
+ if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
+ lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
goto fail_out;
/* Mark successful completion of FCF table scan */
@@ -1250,6 +1273,30 @@ lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
}
/**
+ * lpfc_update_fcf_record - Update driver fcf record
+ * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: Index for the lpfc_fcf_record.
+ * @new_fcf_record: pointer to hba fcf record.
+ *
+ * This routine updates the driver FCF priority record from the new HBA FCF
+ * record. This routine is called with the host lock held.
+ **/
+static void
+__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
+ struct fcf_record *new_fcf_record
+ )
+{
+ struct lpfc_fcf_pri *fcf_pri;
+
+ fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+ fcf_pri->fcf_rec.fcf_index = fcf_index;
+ /* FCF record priority */
+ fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
+
+}
+
+/**
* lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
* @fcf: pointer to driver fcf record.
* @new_fcf_record: pointer to fcf record.
@@ -1332,6 +1379,9 @@ __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
fcf_rec->addr_mode = addr_mode;
fcf_rec->vlan_id = vlan_id;
fcf_rec->flag |= (flag | RECORD_VALID);
+ __lpfc_update_fcf_record_pri(phba,
+ bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
+ new_fcf_record);
}
/**
@@ -1834,6 +1884,8 @@ lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
return false;
if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
return false;
+ if (fcf_rec->priority != new_fcf_record->fip_priority)
+ return false;
return true;
}
@@ -1897,6 +1949,152 @@ stop_flogi_current_fcf:
}
/**
+ * lpfc_sli4_fcf_pri_list_del
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index the index of the fcf record to delete
+ * This routine checks the on list flag of the fcf_index to be deleted.
+ * If it is one the list then it is removed from the list, and the flag
+ * is cleared. This routine grab the hbalock before removing the fcf
+ * record from the list.
+ **/
+static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
+ uint16_t fcf_index)
+{
+ struct lpfc_fcf_pri *new_fcf_pri;
+
+ new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "3058 deleting idx x%x pri x%x flg x%x\n",
+ fcf_index, new_fcf_pri->fcf_rec.priority,
+ new_fcf_pri->fcf_rec.flag);
+ spin_lock_irq(&phba->hbalock);
+ if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
+ if (phba->fcf.current_rec.priority ==
+ new_fcf_pri->fcf_rec.priority)
+ phba->fcf.eligible_fcf_cnt--;
+ list_del_init(&new_fcf_pri->list);
+ new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
+ }
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_set_fcf_flogi_fail
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index the index of the fcf record to update
+ * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
+ * flag so the the round robin slection for the particular priority level
+ * will try a different fcf record that does not have this bit set.
+ * If the fcf record is re-read for any reason this flag is cleared brfore
+ * adding it to the priority list.
+ **/
+void
+lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ struct lpfc_fcf_pri *new_fcf_pri;
+ new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+ spin_lock_irq(&phba->hbalock);
+ new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_fcf_pri_list_add
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index the index of the fcf record to add
+ * This routine checks the priority of the fcf_index to be added.
+ * If it is a lower priority than the current head of the fcf_pri list
+ * then it is added to the list in the right order.
+ * If it is the same priority as the current head of the list then it
+ * is added to the head of the list and its bit in the rr_bmask is set.
+ * If the fcf_index to be added is of a higher priority than the current
+ * head of the list then the rr_bmask is cleared, its bit is set in the
+ * rr_bmask and it is added to the head of the list.
+ * returns:
+ * 0=success 1=failure
+ **/
+int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index,
+ struct fcf_record *new_fcf_record)
+{
+ uint16_t current_fcf_pri;
+ uint16_t last_index;
+ struct lpfc_fcf_pri *fcf_pri;
+ struct lpfc_fcf_pri *next_fcf_pri;
+ struct lpfc_fcf_pri *new_fcf_pri;
+ int ret;
+
+ new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "3059 adding idx x%x pri x%x flg x%x\n",
+ fcf_index, new_fcf_record->fip_priority,
+ new_fcf_pri->fcf_rec.flag);
+ spin_lock_irq(&phba->hbalock);
+ if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
+ list_del_init(&new_fcf_pri->list);
+ new_fcf_pri->fcf_rec.fcf_index = fcf_index;
+ new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
+ if (list_empty(&phba->fcf.fcf_pri_list)) {
+ list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
+ ret = lpfc_sli4_fcf_rr_index_set(phba,
+ new_fcf_pri->fcf_rec.fcf_index);
+ goto out;
+ }
+
+ last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX);
+ if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ ret = 0; /* Empty rr list */
+ goto out;
+ }
+ current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
+ if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) {
+ list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
+ if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) {
+ memset(phba->fcf.fcf_rr_bmask, 0,
+ sizeof(*phba->fcf.fcf_rr_bmask));
+ /* fcfs_at_this_priority_level = 1; */
+ phba->fcf.eligible_fcf_cnt = 1;
+ } else
+ /* fcfs_at_this_priority_level++; */
+ phba->fcf.eligible_fcf_cnt++;
+ ret = lpfc_sli4_fcf_rr_index_set(phba,
+ new_fcf_pri->fcf_rec.fcf_index);
+ goto out;
+ }
+
+ list_for_each_entry_safe(fcf_pri, next_fcf_pri,
+ &phba->fcf.fcf_pri_list, list) {
+ if (new_fcf_pri->fcf_rec.priority <=
+ fcf_pri->fcf_rec.priority) {
+ if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
+ list_add(&new_fcf_pri->list,
+ &phba->fcf.fcf_pri_list);
+ else
+ list_add(&new_fcf_pri->list,
+ &((struct lpfc_fcf_pri *)
+ fcf_pri->list.prev)->list);
+ ret = 0;
+ goto out;
+ } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
+ || new_fcf_pri->fcf_rec.priority <
+ next_fcf_pri->fcf_rec.priority) {
+ list_add(&new_fcf_pri->list, &fcf_pri->list);
+ ret = 0;
+ goto out;
+ }
+ if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
+ continue;
+
+ }
+ ret = 1;
+out:
+ /* we use = instead of |= to clear the FLOGI_FAILED flag. */
+ new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
+ spin_unlock_irq(&phba->hbalock);
+ return ret;
+}
+
+/**
* lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
* @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to mailbox object.
@@ -1958,6 +2156,9 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* record for roundrobin FCF failover.
*/
if (!rc) {
+ lpfc_sli4_fcf_pri_list_del(phba,
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record));
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2781 FCF (x%x) failed connection "
"list check: (x%x/x%x)\n",
@@ -2005,7 +2206,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
goto read_next_fcf;
} else {
fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
- rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
+ rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
+ new_fcf_record);
if (rc)
goto read_next_fcf;
}
@@ -2018,7 +2220,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
*/
spin_lock_irq(&phba->hbalock);
if (phba->fcf.fcf_flag & FCF_IN_USE) {
- if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
+ if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
+ lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
new_fcf_record, vlan_id)) {
if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
phba->fcf.current_rec.fcf_indx) {
@@ -2232,7 +2435,8 @@ read_next_fcf:
(phba->fcf.fcf_flag & FCF_REDISC_PEND))
return;
- if (phba->fcf.fcf_flag & FCF_IN_USE) {
+ if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
+ phba->fcf.fcf_flag & FCF_IN_USE) {
/*
* In case the current in-use FCF record no
* longer existed during FCF discovery that
@@ -2247,7 +2451,6 @@ read_next_fcf:
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag |= FCF_REDISC_FOV;
spin_unlock_irq(&phba->hbalock);
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
lpfc_sli4_fcf_scan_read_fcf_rec(phba,
LPFC_FCOE_FCF_GET_FIRST);
return;
@@ -2424,7 +2627,8 @@ lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
/* Update the eligible FCF record index bmask */
fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
- rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
+
+ rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
out:
lpfc_sli4_mbox_cmd_free(phba, mboxq);
@@ -2645,6 +2849,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
vport->vpi_state |= LPFC_VPI_REGISTERED;
vport->fc_flag |= FC_VFI_REGISTERED;
vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+ vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
spin_unlock_irq(shost->host_lock);
if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
@@ -2893,8 +3098,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
goto out;
}
/* Reset FCF roundrobin bmask for new discovery */
- memset(phba->fcf.fcf_rr_bmask, 0,
- sizeof(*phba->fcf.fcf_rr_bmask));
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
}
return;
@@ -5592,7 +5796,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
/* Reset FCF roundrobin bmask for new discovery */
- memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index ab4c4d6..046edc4 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -3470,11 +3470,16 @@ typedef struct {
or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
struct rcv_sli3 {
- uint32_t word8Rsvd;
#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t ox_id;
+ uint16_t seq_cnt;
+
uint16_t vpi;
uint16_t word9Rsvd;
#else /* __LITTLE_ENDIAN */
+ uint16_t seq_cnt;
+ uint16_t ox_id;
+
uint16_t word9Rsvd;
uint16_t vpi;
#endif
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 11e26a2..7f8003b 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -170,15 +170,8 @@ struct lpfc_sli_intf {
#define LPFC_PCI_FUNC3 3
#define LPFC_PCI_FUNC4 4
-/* SLI4 interface type-2 control register offsets */
-#define LPFC_CTL_PORT_SEM_OFFSET 0x400
-#define LPFC_CTL_PORT_STA_OFFSET 0x404
-#define LPFC_CTL_PORT_CTL_OFFSET 0x408
-#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
-#define LPFC_CTL_PORT_ER2_OFFSET 0x410
+/* SLI4 interface type-2 PDEV_CTL register */
#define LPFC_CTL_PDEV_CTL_OFFSET 0x414
-
-/* Some SLI4 interface type-2 PDEV_CTL register bits */
#define LPFC_CTL_PDEV_CTL_DRST 0x00000001
#define LPFC_CTL_PDEV_CTL_FRST 0x00000002
#define LPFC_CTL_PDEV_CTL_DD 0x00000004
@@ -337,6 +330,7 @@ struct lpfc_cqe {
#define CQE_CODE_RELEASE_WQE 0x2
#define CQE_CODE_RECEIVE 0x4
#define CQE_CODE_XRI_ABORTED 0x5
+#define CQE_CODE_RECEIVE_V1 0x9
/* completion queue entry for wqe completions */
struct lpfc_wcqe_complete {
@@ -440,7 +434,10 @@ struct lpfc_rcqe {
#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */
#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */
#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */
- uint32_t reserved1;
+ uint32_t word1;
+#define lpfc_rcqe_fcf_id_v1_SHIFT 0
+#define lpfc_rcqe_fcf_id_v1_MASK 0x0000003F
+#define lpfc_rcqe_fcf_id_v1_WORD word1
uint32_t word2;
#define lpfc_rcqe_length_SHIFT 16
#define lpfc_rcqe_length_MASK 0x0000FFFF
@@ -451,6 +448,9 @@ struct lpfc_rcqe {
#define lpfc_rcqe_fcf_id_SHIFT 0
#define lpfc_rcqe_fcf_id_MASK 0x0000003F
#define lpfc_rcqe_fcf_id_WORD word2
+#define lpfc_rcqe_rq_id_v1_SHIFT 0
+#define lpfc_rcqe_rq_id_v1_MASK 0x0000FFFF
+#define lpfc_rcqe_rq_id_v1_WORD word2
uint32_t word3;
#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT
#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK
@@ -515,7 +515,7 @@ struct lpfc_register {
/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */
#define LPFC_SLI_INTF 0x0058
-#define LPFC_SLIPORT_IF2_SMPHR 0x0400
+#define LPFC_CTL_PORT_SEM_OFFSET 0x400
#define lpfc_port_smphr_perr_SHIFT 31
#define lpfc_port_smphr_perr_MASK 0x1
#define lpfc_port_smphr_perr_WORD word0
@@ -575,7 +575,7 @@ struct lpfc_register {
#define LPFC_POST_STAGE_PORT_READY 0xC000
#define LPFC_POST_STAGE_PORT_UE 0xF000
-#define LPFC_SLIPORT_STATUS 0x0404
+#define LPFC_CTL_PORT_STA_OFFSET 0x404
#define lpfc_sliport_status_err_SHIFT 31
#define lpfc_sliport_status_err_MASK 0x1
#define lpfc_sliport_status_err_WORD word0
@@ -593,7 +593,7 @@ struct lpfc_register {
#define lpfc_sliport_status_rdy_WORD word0
#define MAX_IF_TYPE_2_RESETS 1000
-#define LPFC_SLIPORT_CNTRL 0x0408
+#define LPFC_CTL_PORT_CTL_OFFSET 0x408
#define lpfc_sliport_ctrl_end_SHIFT 30
#define lpfc_sliport_ctrl_end_MASK 0x1
#define lpfc_sliport_ctrl_end_WORD word0
@@ -604,8 +604,8 @@ struct lpfc_register {
#define lpfc_sliport_ctrl_ip_WORD word0
#define LPFC_SLIPORT_INIT_PORT 1
-#define LPFC_SLIPORT_ERR_1 0x040C
-#define LPFC_SLIPORT_ERR_2 0x0410
+#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
+#define LPFC_CTL_PORT_ER2_OFFSET 0x410
/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
* reside in BAR 2.
@@ -3198,6 +3198,8 @@ struct lpfc_grp_hdr {
#define lpfc_grp_hdr_id_MASK 0x000000FF
#define lpfc_grp_hdr_id_WORD word2
uint8_t rev_name[128];
+ uint8_t date[12];
+ uint8_t revision[32];
};
#define FCP_COMMAND 0x0
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 148b98d..a3c8200 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2927,6 +2927,8 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
sizeof fc_host_symbolic_name(shost));
fc_host_supported_speeds(shost) = 0;
+ if (phba->lmt & LMT_16Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
if (phba->lmt & LMT_10Gb)
fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
if (phba->lmt & LMT_8Gb)
@@ -3632,8 +3634,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
lpfc_sli4_fcf_dead_failthrough(phba);
} else {
/* Reset FCF roundrobin bmask for new discovery */
- memset(phba->fcf.fcf_rr_bmask, 0,
- sizeof(*phba->fcf.fcf_rr_bmask));
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
/*
* Handling fast FCF failover to a DEAD FCF event is
* considered equalivant to receiving CVL to all vports.
@@ -3647,7 +3648,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
vport = lpfc_find_vport_by_vpid(phba,
- acqe_fip->index - phba->vpi_base);
+ acqe_fip->index);
ndlp = lpfc_sli4_perform_vport_cvl(vport);
if (!ndlp)
break;
@@ -3719,8 +3720,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
* Reset FCF roundrobin bmask for new
* discovery.
*/
- memset(phba->fcf.fcf_rr_bmask, 0,
- sizeof(*phba->fcf.fcf_rr_bmask));
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
}
break;
default:
@@ -4035,6 +4035,34 @@ lpfc_reset_hba(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function enables the PCI SR-IOV virtual functions to a physical
+ * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
+ * enable the number of virtual functions to the physical function. As
+ * not all devices support SR-IOV, the return code from the pci_enable_sriov()
+ * API call does not considered as an error condition for most of the device.
+ **/
+uint16_t
+lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
+{
+ struct pci_dev *pdev = phba->pcidev;
+ uint16_t nr_virtfn;
+ int pos;
+
+ if (!pdev->is_physfn)
+ return 0;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos == 0)
+ return 0;
+
+ pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
+ return nr_virtfn;
+}
+
+/**
* lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
* @phba: pointer to lpfc hba data structure.
* @nr_vfn: number of virtual functions to be enabled.
@@ -4049,8 +4077,17 @@ int
lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
{
struct pci_dev *pdev = phba->pcidev;
+ uint16_t max_nr_vfn;
int rc;
+ max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
+ if (nr_vfn > max_nr_vfn) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3057 Requested vfs (%d) greater than "
+ "supported vfs (%d)", nr_vfn, max_nr_vfn);
+ return -EINVAL;
+ }
+
rc = pci_enable_sriov(pdev, nr_vfn);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -4516,7 +4553,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
}
}
- return rc;
+ return 0;
out_free_fcp_eq_hdl:
kfree(phba->sli4_hba.fcp_eq_hdl);
@@ -4966,17 +5003,14 @@ out_free_mem:
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to post rpi header templates to the
- * HBA consistent with the SLI-4 interface spec. This routine
+ * port for those SLI4 ports that do not support extents. This routine
* posts a PAGE_SIZE memory region to the port to hold up to
- * PAGE_SIZE modulo 64 rpi context headers.
- * No locks are held here because this is an initialization routine
- * called only from probe or lpfc_online when interrupts are not
- * enabled and the driver is reinitializing the device.
+ * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
+ * and should be called only when interrupts are disabled.
*
* Return codes
* 0 - successful
- * -ENOMEM - No available memory
- * -EIO - The mailbox failed to complete successfully.
+ * -ERROR - otherwise.
**/
int
lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
@@ -5687,17 +5721,22 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
break;
case LPFC_SLI_INTF_IF_TYPE_2:
phba->sli4_hba.u.if_type2.ERR1regaddr =
- phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_1;
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_ER1_OFFSET;
phba->sli4_hba.u.if_type2.ERR2regaddr =
- phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_2;
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_ER2_OFFSET;
phba->sli4_hba.u.if_type2.CTRLregaddr =
- phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_CNTRL;
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_CTL_OFFSET;
phba->sli4_hba.u.if_type2.STATUSregaddr =
- phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_STATUS;
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_STA_OFFSET;
phba->sli4_hba.SLIINTFregaddr =
phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
phba->sli4_hba.PSMPHRregaddr =
- phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_IF2_SMPHR;
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_SEM_OFFSET;
phba->sli4_hba.RQDBregaddr =
phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
phba->sli4_hba.WQDBregaddr =
@@ -8859,11 +8898,11 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
return -EINVAL;
}
lpfc_decode_firmware_rev(phba, fwrev, 1);
- if (strncmp(fwrev, image->rev_name, strnlen(fwrev, 16))) {
+ if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3023 Updating Firmware. Current Version:%s "
"New Version:%s\n",
- fwrev, image->rev_name);
+ fwrev, image->revision);
for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
GFP_KERNEL);
@@ -8892,9 +8931,9 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
fw->size - offset);
break;
}
- temp_offset += SLI4_PAGE_SIZE;
memcpy(dmabuf->virt, fw->data + temp_offset,
SLI4_PAGE_SIZE);
+ temp_offset += SLI4_PAGE_SIZE;
}
rc = lpfc_wr_object(phba, &dma_buffer_list,
(fw->size - offset), &offset);
@@ -9005,6 +9044,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
}
INIT_LIST_HEAD(&phba->active_rrq_list);
+ INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
/* Set up common device driver resources */
error = lpfc_setup_driver_resource_phase2(phba);
@@ -9112,7 +9152,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Check if there are static vports to be created. */
lpfc_create_static_vport(phba);
-
return 0;
out_disable_intr:
@@ -9483,6 +9522,13 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
}
pci_restore_state(pdev);
+
+ /*
+ * As the new kernel behavior of pci_restore_state() API call clears
+ * device saved_state flag, need to save the restored state again.
+ */
+ pci_save_state(pdev);
+
if (pdev->is_busmaster)
pci_set_master(pdev);
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 5567670..83450cc 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2031,7 +2031,7 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
bf_set(lpfc_init_vfi_vp, init_vfi, 1);
bf_set(lpfc_init_vfi_vfi, init_vfi,
vport->phba->sli4_hba.vfi_ids[vport->vfi]);
- bf_set(lpfc_init_vpi_vpi, init_vfi,
+ bf_set(lpfc_init_vfi_vpi, init_vfi,
vport->phba->vpi_ids[vport->vpi]);
bf_set(lpfc_init_vfi_fcfi, init_vfi,
vport->phba->fcf.fcfi);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 3ccc974..eadd241 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1302,13 +1302,13 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
case SCSI_PROT_NORMAL:
default:
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9063 BLKGRD: Bad op/guard:%d/%d combination\n",
- scsi_get_prot_op(sc), guard_type);
+ "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
+ scsi_get_prot_op(sc));
ret = 1;
break;
}
- } else if (guard_type == SHOST_DIX_GUARD_CRC) {
+ } else {
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
@@ -1324,17 +1324,18 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
+ *txop = BG_OP_IN_CRC_OUT_NODIF;
+ *rxop = BG_OP_IN_NODIF_OUT_CRC;
+ break;
+
case SCSI_PROT_NORMAL:
default:
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9075 BLKGRD: Bad op/guard:%d/%d combination\n",
- scsi_get_prot_op(sc), guard_type);
+ "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
+ scsi_get_prot_op(sc));
ret = 1;
break;
}
- } else {
- /* unsupported format */
- BUG();
}
return ret;
@@ -1352,45 +1353,6 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
return sc->device->sector_size;
}
-/**
- * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
- * @sc: in: SCSI command
- * @apptagmask: out: app tag mask
- * @apptagval: out: app tag value
- * @reftag: out: ref tag (reference tag)
- *
- * Description:
- * Extract DIF parameters from the command if possible. Otherwise,
- * use default parameters.
- *
- **/
-static inline void
-lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
- uint16_t *apptagval, uint32_t *reftag)
-{
- struct scsi_dif_tuple *spt;
- unsigned char op = scsi_get_prot_op(sc);
- unsigned int protcnt = scsi_prot_sg_count(sc);
- static int cnt;
-
- if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
- op == SCSI_PROT_WRITE_PASS)) {
-
- cnt++;
- spt = page_address(sg_page(scsi_prot_sglist(sc))) +
- scsi_prot_sglist(sc)[0].offset;
- *apptagmask = 0;
- *apptagval = 0;
- *reftag = cpu_to_be32(spt->ref_tag);
-
- } else {
- /* SBC defines ref tag to be lower 32bits of LBA */
- *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
- *apptagmask = 0;
- *apptagval = 0;
- }
-}
-
/*
* This function sets up buffer list for protection groups of
* type LPFC_PG_TYPE_NO_DIF
@@ -1427,9 +1389,8 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
dma_addr_t physaddr;
int i = 0, num_bde = 0, status;
int datadir = sc->sc_data_direction;
- unsigned blksize;
uint32_t reftag;
- uint16_t apptagmask, apptagval;
+ unsigned blksize;
uint8_t txop, rxop;
status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
@@ -1438,17 +1399,16 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* extract some info from the scsi command for pde*/
blksize = lpfc_cmd_blksize(sc);
- lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
+ reftag = scsi_get_lba(sc) & 0xffffffff;
/* setup PDE5 with what we have */
pde5 = (struct lpfc_pde5 *) bpl;
memset(pde5, 0, sizeof(struct lpfc_pde5));
bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
- pde5->reftag = reftag;
/* Endianness conversion if necessary for PDE5 */
pde5->word0 = cpu_to_le32(pde5->word0);
- pde5->reftag = cpu_to_le32(pde5->reftag);
+ pde5->reftag = cpu_to_le32(reftag);
/* advance bpl and increment bde count */
num_bde++;
@@ -1463,10 +1423,10 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
if (datadir == DMA_FROM_DEVICE) {
bf_set(pde6_ce, pde6, 1);
bf_set(pde6_re, pde6, 1);
- bf_set(pde6_ae, pde6, 1);
}
bf_set(pde6_ai, pde6, 1);
- bf_set(pde6_apptagval, pde6, apptagval);
+ bf_set(pde6_ae, pde6, 0);
+ bf_set(pde6_apptagval, pde6, 0);
/* Endianness conversion if necessary for PDE6 */
pde6->word0 = cpu_to_le32(pde6->word0);
@@ -1551,7 +1511,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
unsigned char pgdone = 0, alldone = 0;
unsigned blksize;
uint32_t reftag;
- uint16_t apptagmask, apptagval;
uint8_t txop, rxop;
int num_bde = 0;
@@ -1571,7 +1530,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* extract some info from the scsi command */
blksize = lpfc_cmd_blksize(sc);
- lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
+ reftag = scsi_get_lba(sc) & 0xffffffff;
split_offset = 0;
do {
@@ -1579,11 +1538,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
pde5 = (struct lpfc_pde5 *) bpl;
memset(pde5, 0, sizeof(struct lpfc_pde5));
bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
- pde5->reftag = reftag;
/* Endianness conversion if necessary for PDE5 */
pde5->word0 = cpu_to_le32(pde5->word0);
- pde5->reftag = cpu_to_le32(pde5->reftag);
+ pde5->reftag = cpu_to_le32(reftag);
/* advance bpl and increment bde count */
num_bde++;
@@ -1597,9 +1555,9 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
bf_set(pde6_oprx, pde6, rxop);
bf_set(pde6_ce, pde6, 1);
bf_set(pde6_re, pde6, 1);
- bf_set(pde6_ae, pde6, 1);
bf_set(pde6_ai, pde6, 1);
- bf_set(pde6_apptagval, pde6, apptagval);
+ bf_set(pde6_ae, pde6, 0);
+ bf_set(pde6_apptagval, pde6, 0);
/* Endianness conversion if necessary for PDE6 */
pde6->word0 = cpu_to_le32(pde6->word0);
@@ -1621,8 +1579,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
memset(pde7, 0, sizeof(struct lpfc_pde7));
bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
- pde7->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr));
- pde7->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
+ pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
+ pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
protgrp_blks = protgroup_len / 8;
protgrp_bytes = protgrp_blks * blksize;
@@ -1632,7 +1590,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
protgroup_offset += protgroup_remainder;
protgrp_blks = protgroup_remainder / 8;
- protgrp_bytes = protgroup_remainder * blksize;
+ protgrp_bytes = protgrp_blks * blksize;
} else {
protgroup_offset = 0;
curr_prot++;
@@ -2006,16 +1964,21 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
/*
* setup sense data descriptor 0 per SPC-4 as an information
- * field, and put the failing LBA in it
+ * field, and put the failing LBA in it.
+ * This code assumes there was also a guard/app/ref tag error
+ * indication.
*/
- cmd->sense_buffer[8] = 0; /* Information */
- cmd->sense_buffer[9] = 0xa; /* Add. length */
+ cmd->sense_buffer[7] = 0xc; /* Additional sense length */
+ cmd->sense_buffer[8] = 0; /* Information descriptor type */
+ cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
+ cmd->sense_buffer[10] = 0x80; /* Validity bit */
bghm /= cmd->device->sector_size;
failing_sector = scsi_get_lba(cmd);
failing_sector += bghm;
- put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]);
+ /* Descriptor Information */
+ put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
}
if (!ret) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 98999bb..8b799f0 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -560,7 +560,7 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
if (rrq) {
rrq->send_rrq = send_rrq;
- rrq->xritag = phba->sli4_hba.xri_ids[xritag];
+ rrq->xritag = xritag;
rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
rrq->ndlp = ndlp;
rrq->nlp_DID = ndlp->nlp_DID;
@@ -2452,7 +2452,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* search continue save q for same XRI */
list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
- if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) {
+ if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
+ saveq->iocb.unsli3.rcvsli3.ox_id) {
list_add_tail(&saveq->list, &iocbq->list);
found = 1;
break;
@@ -3355,6 +3356,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
irspiocbq);
break;
case CQE_CODE_RECEIVE:
+ case CQE_CODE_RECEIVE_V1:
dmabuf = container_of(cq_event, struct hbq_dmabuf,
cq_event);
lpfc_sli4_handle_received_buffer(phba, dmabuf);
@@ -4712,10 +4714,15 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
* lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
* @phba: Pointer to HBA context object.
* @type: The resource extent type.
+ * @extnt_count: buffer to hold port available extent count.
+ * @extnt_size: buffer to hold element count per extent.
*
- * This function allocates all SLI4 resource identifiers.
+ * This function calls the port and retrievs the number of available
+ * extents and their size for a particular extent type.
+ *
+ * Returns: 0 if successful. Nonzero otherwise.
**/
-static int
+int
lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
uint16_t *extnt_count, uint16_t *extnt_size)
{
@@ -4892,7 +4899,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
req_len, *emb);
if (alloc_len < req_len) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "9000 Allocated DMA memory size (x%x) is "
+ "2982 Allocated DMA memory size (x%x) is "
"less than the requested DMA memory "
"size (x%x)\n", alloc_len, req_len);
return -ENOMEM;
@@ -5506,6 +5513,154 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
+ * @phba: Pointer to HBA context object.
+ * @type: The resource extent type.
+ * @extnt_count: buffer to hold port extent count response
+ * @extnt_size: buffer to hold port extent size response.
+ *
+ * This function calls the port to read the host allocated extents
+ * for a particular type.
+ **/
+int
+lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
+ uint16_t *extnt_cnt, uint16_t *extnt_size)
+{
+ bool emb;
+ int rc = 0;
+ uint16_t curr_blks = 0;
+ uint32_t req_len, emb_len;
+ uint32_t alloc_len, mbox_tmo;
+ struct list_head *blk_list_head;
+ struct lpfc_rsrc_blks *rsrc_blk;
+ LPFC_MBOXQ_t *mbox;
+ void *virtaddr = NULL;
+ struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
+ struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ blk_list_head = &phba->lpfc_vpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
+ break;
+ default:
+ return -EIO;
+ }
+
+ /* Count the number of extents currently allocatd for this type. */
+ list_for_each_entry(rsrc_blk, blk_list_head, list) {
+ if (curr_blks == 0) {
+ /*
+ * The GET_ALLOCATED mailbox does not return the size,
+ * just the count. The size should be just the size
+ * stored in the current allocated block and all sizes
+ * for an extent type are the same so set the return
+ * value now.
+ */
+ *extnt_size = rsrc_blk->rsrc_size;
+ }
+ curr_blks++;
+ }
+
+ /* Calculate the total requested length of the dma memory. */
+ req_len = curr_blks * sizeof(uint16_t);
+
+ /*
+ * Calculate the size of an embedded mailbox. The uint32_t
+ * accounts for extents-specific word.
+ */
+ emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
+ sizeof(uint32_t);
+
+ /*
+ * Presume the allocation and response will fit into an embedded
+ * mailbox. If not true, reconfigure to a non-embedded mailbox.
+ */
+ emb = LPFC_SLI4_MBX_EMBED;
+ req_len = emb_len;
+ if (req_len > emb_len) {
+ req_len = curr_blks * sizeof(uint16_t) +
+ sizeof(union lpfc_sli4_cfg_shdr) +
+ sizeof(uint32_t);
+ emb = LPFC_SLI4_MBX_NEMBED;
+ }
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
+
+ alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
+ req_len, emb);
+ if (alloc_len < req_len) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2983 Allocated DMA memory size (x%x) is "
+ "less than the requested DMA memory "
+ "size (x%x)\n", alloc_len, req_len);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ /*
+ * Figure out where the response is located. Then get local pointers
+ * to the response data. The port does not guarantee to respond to
+ * all extents counts request so update the local variable with the
+ * allocated count from the port.
+ */
+ if (emb == LPFC_SLI4_MBX_EMBED) {
+ rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
+ shdr = &rsrc_ext->header.cfg_shdr;
+ *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
+ } else {
+ virtaddr = mbox->sge_array->addr[0];
+ n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+ shdr = &n_rsrc->cfg_shdr;
+ *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
+ }
+
+ if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "2984 Failed to read allocated resources "
+ "for type %d - Status 0x%x Add'l Status 0x%x.\n",
+ type,
+ bf_get(lpfc_mbox_hdr_status, &shdr->response),
+ bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
+ rc = -EIO;
+ goto err_exit;
+ }
+ err_exit:
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return rc;
+}
+
+/**
* lpfc_sli4_hba_setup - SLI4 device intialization PCI function
* @phba: Pointer to HBA context object.
*
@@ -5837,6 +5992,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
"Advanced Error Reporting (AER)\n");
phba->cfg_aer_support = 0;
}
+ rc = 0;
}
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
@@ -6634,6 +6790,9 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
unsigned long iflags;
int rc;
+ /* dump from issue mailbox command if setup */
+ lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
+
rc = lpfc_mbox_dev_check(phba);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -7318,12 +7477,12 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
- break;
+ break;
case CMD_XMIT_SEQUENCE64_CX:
bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
iocbq->iocb.un.ulpWord[3]);
bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
- iocbq->iocb.ulpContext);
+ iocbq->iocb.unsli3.rcvsli3.ox_id);
/* The entire sequence is transmitted for this IOCB */
xmit_len = total_len;
cmnd = CMD_XMIT_SEQUENCE64_CR;
@@ -7341,7 +7500,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
wqe->xmit_sequence.xmit_len = xmit_len;
command_type = OTHER_COMMAND;
- break;
+ break;
case CMD_XMIT_BCAST64_CN:
/* word3 iocb=iotag32 wqe=seq_payload_len */
wqe->xmit_bcast64.seq_payload_len = xmit_len;
@@ -7355,7 +7514,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
LPFC_WQE_LENLOC_WORD3);
bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
- break;
+ break;
case CMD_FCP_IWRITE64_CR:
command_type = FCP_COMMAND_DATA_OUT;
/* word3 iocb=iotag wqe=payload_offset_len */
@@ -7375,7 +7534,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
LPFC_WQE_LENLOC_WORD4);
bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
- break;
+ break;
case CMD_FCP_IREAD64_CR:
/* word3 iocb=iotag wqe=payload_offset_len */
/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
@@ -7394,7 +7553,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
LPFC_WQE_LENLOC_WORD4);
bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
- break;
+ break;
case CMD_FCP_ICMND64_CR:
/* word3 iocb=IO_TAG wqe=reserved */
wqe->fcp_icmd.rsrvd3 = 0;
@@ -7407,7 +7566,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
LPFC_WQE_LENLOC_NONE);
bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
- break;
+ break;
case CMD_GEN_REQUEST64_CR:
/* For this command calculate the xmit length of the
* request bde.
@@ -7442,7 +7601,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
command_type = OTHER_COMMAND;
- break;
+ break;
case CMD_XMIT_ELS_RSP64_CX:
ndlp = (struct lpfc_nodelist *)iocbq->context1;
/* words0-2 BDE memcpy */
@@ -7457,7 +7616,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
- iocbq->iocb.ulpContext);
+ iocbq->iocb.unsli3.rcvsli3.ox_id);
if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
phba->vpi_ids[iocbq->vport->vpi]);
@@ -7470,7 +7629,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
command_type = OTHER_COMMAND;
- break;
+ break;
case CMD_CLOSE_XRI_CN:
case CMD_ABORT_XRI_CN:
case CMD_ABORT_XRI_CX:
@@ -7509,7 +7668,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
cmnd = CMD_ABORT_XRI_CX;
command_type = OTHER_COMMAND;
xritag = 0;
- break;
+ break;
case CMD_XMIT_BLS_RSP64_CX:
/* As BLS ABTS RSP WQE is very different from other WQEs,
* we re-construct this WQE here based on information in
@@ -7553,7 +7712,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
}
- break;
+ break;
case CMD_XRI_ABORTED_CX:
case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
@@ -7565,7 +7724,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
"2014 Invalid command 0x%x\n",
iocbq->iocb.ulpCommand);
return IOCB_ERROR;
- break;
+ break;
}
bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
@@ -10481,10 +10640,14 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
struct hbq_dmabuf *dma_buf;
- uint32_t status;
+ uint32_t status, rq_id;
unsigned long iflags;
- if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id)
+ if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
+ rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
+ else
+ rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
+ if (rq_id != hrq->queue_id)
goto out;
status = bf_get(lpfc_rcqe_status, rcqe);
@@ -10563,6 +10726,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
(struct sli4_wcqe_xri_aborted *)&cqevt);
break;
case CQE_CODE_RECEIVE:
+ case CQE_CODE_RECEIVE_V1:
/* Process the RQ event */
phba->last_completion_time = jiffies;
workposted = lpfc_sli4_sp_handle_rcqe(phba,
@@ -12345,19 +12509,18 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
}
/**
- * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
+ * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to post rpi header templates to the
- * port for those SLI4 ports that do not support extents. This routine
- * posts a PAGE_SIZE memory region to the port to hold up to
- * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
- * and should be called only when interrupts are disabled.
+ * HBA consistent with the SLI-4 interface spec. This routine
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
*
- * Return codes
- * 0 - successful
- * -ERROR - otherwise.
- */
+ * Returns
+ * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
+ * LPFC_RPI_ALLOC_ERROR if no rpis are available.
+ **/
uint16_t
lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
{
@@ -13406,7 +13569,7 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
* This function validates the xri maps to the known range of XRIs allocated an
* used by the driver.
**/
-static uint16_t
+uint16_t
lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
uint16_t xri)
{
@@ -13643,10 +13806,12 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
static struct lpfc_iocbq *
lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
{
+ struct hbq_dmabuf *hbq_buf;
struct lpfc_dmabuf *d_buf, *n_buf;
struct lpfc_iocbq *first_iocbq, *iocbq;
struct fc_frame_header *fc_hdr;
uint32_t sid;
+ uint32_t len, tot_len;
struct ulp_bde64 *pbde;
fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
@@ -13655,6 +13820,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
lpfc_update_rcv_time_stamp(vport);
/* get the Remote Port's SID */
sid = sli4_sid_from_fc_hdr(fc_hdr);
+ tot_len = 0;
/* Get an iocbq struct to fill in. */
first_iocbq = lpfc_sli_get_iocbq(vport->phba);
if (first_iocbq) {
@@ -13662,9 +13828,12 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
- first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
- /* iocbq is prepped for internal consumption. Logical vpi. */
- first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->vpi;
+ first_iocbq->iocb.ulpContext = NO_XRI;
+ first_iocbq->iocb.unsli3.rcvsli3.ox_id =
+ be16_to_cpu(fc_hdr->fh_ox_id);
+ /* iocbq is prepped for internal consumption. Physical vpi. */
+ first_iocbq->iocb.unsli3.rcvsli3.vpi =
+ vport->phba->vpi_ids[vport->vpi];
/* put the first buffer into the first IOCBq */
first_iocbq->context2 = &seq_dmabuf->dbuf;
first_iocbq->context3 = NULL;
@@ -13672,9 +13841,9 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
first_iocbq->iocb.un.rcvels.remoteID = sid;
- first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
- bf_get(lpfc_rcqe_length,
+ tot_len = bf_get(lpfc_rcqe_length,
&seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+ first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
}
iocbq = first_iocbq;
/*
@@ -13692,9 +13861,13 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
pbde = (struct ulp_bde64 *)
&iocbq->iocb.unsli3.sli3Words[4];
pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
- first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
- bf_get(lpfc_rcqe_length,
- &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
+ /* We need to get the size out of the right CQE */
+ hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ len = bf_get(lpfc_rcqe_length,
+ &hbq_buf->cq_event.cqe.rcqe_cmpl);
+ iocbq->iocb.unsli3.rcvsli3.acc_len += len;
+ tot_len += len;
} else {
iocbq = lpfc_sli_get_iocbq(vport->phba);
if (!iocbq) {
@@ -13712,9 +13885,14 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
iocbq->iocb.ulpBdeCount = 1;
iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
- first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
- bf_get(lpfc_rcqe_length,
- &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
+ /* We need to get the size out of the right CQE */
+ hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ len = bf_get(lpfc_rcqe_length,
+ &hbq_buf->cq_event.cqe.rcqe_cmpl);
+ tot_len += len;
+ iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
+
iocbq->iocb.un.rcvels.remoteID = sid;
list_add_tail(&iocbq->list, &first_iocbq->list);
}
@@ -13787,7 +13965,13 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
lpfc_in_buf_free(phba, &dmabuf->dbuf);
return;
}
- fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl);
+ if ((bf_get(lpfc_cqe_code,
+ &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
+ fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
+ &dmabuf->cq_event.cqe.rcqe_cmpl);
+ else
+ fcfi = bf_get(lpfc_rcqe_fcf_id,
+ &dmabuf->cq_event.cqe.rcqe_cmpl);
vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
/* throw out the frame */
@@ -14451,6 +14635,92 @@ fail_fcf_read:
}
/**
+ * lpfc_check_next_fcf_pri
+ * phba pointer to the lpfc_hba struct for this port.
+ * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
+ * routine when the rr_bmask is empty. The FCF indecies are put into the
+ * rr_bmask based on their priority level. Starting from the highest priority
+ * to the lowest. The most likely FCF candidate will be in the highest
+ * priority group. When this routine is called it searches the fcf_pri list for
+ * next lowest priority group and repopulates the rr_bmask with only those
+ * fcf_indexes.
+ * returns:
+ * 1=success 0=failure
+ **/
+int
+lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
+{
+ uint16_t next_fcf_pri;
+ uint16_t last_index;
+ struct lpfc_fcf_pri *fcf_pri;
+ int rc;
+ int ret = 0;
+
+ last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "3060 Last IDX %d\n", last_index);
+ if (list_empty(&phba->fcf.fcf_pri_list)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "3061 Last IDX %d\n", last_index);
+ return 0; /* Empty rr list */
+ }
+ next_fcf_pri = 0;
+ /*
+ * Clear the rr_bmask and set all of the bits that are at this
+ * priority.
+ */
+ memset(phba->fcf.fcf_rr_bmask, 0,
+ sizeof(*phba->fcf.fcf_rr_bmask));
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+ if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
+ continue;
+ /*
+ * the 1st priority that has not FLOGI failed
+ * will be the highest.
+ */
+ if (!next_fcf_pri)
+ next_fcf_pri = fcf_pri->fcf_rec.priority;
+ spin_unlock_irq(&phba->hbalock);
+ if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
+ rc = lpfc_sli4_fcf_rr_index_set(phba,
+ fcf_pri->fcf_rec.fcf_index);
+ if (rc)
+ return 0;
+ }
+ spin_lock_irq(&phba->hbalock);
+ }
+ /*
+ * if next_fcf_pri was not set above and the list is not empty then
+ * we have failed flogis on all of them. So reset flogi failed
+ * and start at the begining.
+ */
+ if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
+ list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+ fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
+ /*
+ * the 1st priority that has not FLOGI failed
+ * will be the highest.
+ */
+ if (!next_fcf_pri)
+ next_fcf_pri = fcf_pri->fcf_rec.priority;
+ spin_unlock_irq(&phba->hbalock);
+ if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
+ rc = lpfc_sli4_fcf_rr_index_set(phba,
+ fcf_pri->fcf_rec.fcf_index);
+ if (rc)
+ return 0;
+ }
+ spin_lock_irq(&phba->hbalock);
+ }
+ } else
+ ret = 1;
+ spin_unlock_irq(&phba->hbalock);
+
+ return ret;
+}
+/**
* lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
* @phba: pointer to lpfc hba data structure.
*
@@ -14466,6 +14736,7 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
uint16_t next_fcf_index;
/* Search start from next bit of currently registered FCF index */
+next_priority:
next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
LPFC_SLI4_FCF_TBL_INDX_MAX;
next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
@@ -14473,17 +14744,46 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
next_fcf_index);
/* Wrap around condition on phba->fcf.fcf_rr_bmask */
- if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
+ if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ /*
+ * If we have wrapped then we need to clear the bits that
+ * have been tested so that we can detect when we should
+ * change the priority level.
+ */
next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
+ }
+
/* Check roundrobin failover list empty condition */
- if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
+ next_fcf_index == phba->fcf.current_rec.fcf_indx) {
+ /*
+ * If next fcf index is not found check if there are lower
+ * Priority level fcf's in the fcf_priority list.
+ * Set up the rr_bmask with all of the avaiable fcf bits
+ * at that level and continue the selection process.
+ */
+ if (lpfc_check_next_fcf_pri_level(phba))
+ goto next_priority;
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2844 No roundrobin failover FCF available\n");
- return LPFC_FCOE_FCF_NEXT_NONE;
+ if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
+ return LPFC_FCOE_FCF_NEXT_NONE;
+ else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "3063 Only FCF available idx %d, flag %x\n",
+ next_fcf_index,
+ phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
+ return next_fcf_index;
+ }
}
+ if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
+ phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
+ LPFC_FCF_FLOGI_FAILED)
+ goto next_priority;
+
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2845 Get next roundrobin failover FCF (x%x)\n",
next_fcf_index);
@@ -14535,6 +14835,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
void
lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
{
+ struct lpfc_fcf_pri *fcf_pri;
if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"2762 FCF (x%x) reached driver's book "
@@ -14543,6 +14844,14 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
return;
}
/* Clear the eligible FCF record index bmask */
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+ if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
+ list_del_init(&fcf_pri->list);
+ break;
+ }
+ }
+ spin_unlock_irq(&phba->hbalock);
clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 4b17035..19bb87a 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -81,6 +81,8 @@
(fc_hdr)->fh_f_ctl[1] << 8 | \
(fc_hdr)->fh_f_ctl[2])
+#define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
+
enum lpfc_sli4_queue_type {
LPFC_EQ,
LPFC_GCQ,
@@ -157,6 +159,25 @@ struct lpfc_fcf_rec {
#define RECORD_VALID 0x02
};
+struct lpfc_fcf_pri_rec {
+ uint16_t fcf_index;
+#define LPFC_FCF_ON_PRI_LIST 0x0001
+#define LPFC_FCF_FLOGI_FAILED 0x0002
+ uint16_t flag;
+ uint32_t priority;
+};
+
+struct lpfc_fcf_pri {
+ struct list_head list;
+ struct lpfc_fcf_pri_rec fcf_rec;
+};
+
+/*
+ * Maximum FCF table index, it is for driver internal book keeping, it
+ * just needs to be no less than the supported HBA's FCF table size.
+ */
+#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
+
struct lpfc_fcf {
uint16_t fcfi;
uint32_t fcf_flag;
@@ -176,15 +197,13 @@ struct lpfc_fcf {
uint32_t eligible_fcf_cnt;
struct lpfc_fcf_rec current_rec;
struct lpfc_fcf_rec failover_rec;
+ struct list_head fcf_pri_list;
+ struct lpfc_fcf_pri fcf_pri[LPFC_SLI4_FCF_TBL_INDX_MAX];
+ uint32_t current_fcf_scan_pri;
struct timer_list redisc_wait;
unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
};
-/*
- * Maximum FCF table index, it is for driver internal book keeping, it
- * just needs to be no less than the supported HBA's FCF table size.
- */
-#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
#define LPFC_REGION23_SIGNATURE "RG23"
#define LPFC_REGION23_VERSION 1
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c03921b..c1e0ae9 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.23"
+#define LPFC_DRIVER_VERSION "8.3.25"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 7370c08..3948a00 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.05.38-rc1"
-#define MEGASAS_RELDATE "May. 11, 2011"
-#define MEGASAS_EXT_VERSION "Wed. May. 11 17:00:00 PDT 2011"
+#define MEGASAS_VERSION "00.00.05.40-rc1"
+#define MEGASAS_RELDATE "Jul. 26, 2011"
+#define MEGASAS_EXT_VERSION "Tue. Jul. 26 17:00:00 PDT 2011"
/*
* Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 2d8cdce..776d019 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v00.00.05.38-rc1
+ * Version : v00.00.05.40-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
@@ -54,6 +54,7 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "megaraid_sas_fusion.h"
#include "megaraid_sas.h"
@@ -2057,6 +2058,20 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
}
}
+static int megasas_change_queue_depth(struct scsi_device *sdev,
+ int queue_depth, int reason)
+{
+ if (reason != SCSI_QDEPTH_DEFAULT)
+ return -EOPNOTSUPP;
+
+ if (queue_depth > sdev->host->can_queue)
+ queue_depth = sdev->host->can_queue;
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev),
+ queue_depth);
+
+ return queue_depth;
+}
+
/*
* Scsi host template for megaraid_sas driver
*/
@@ -2074,6 +2089,7 @@ static struct scsi_host_template megasas_template = {
.eh_timed_out = megasas_reset_timer,
.bios_param = megasas_bios_param,
.use_clustering = ENABLE_CLUSTERING,
+ .change_queue_depth = megasas_change_queue_depth,
};
/**
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 8fe3a45..5a5af1f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -288,7 +288,6 @@ u8 MR_GetPhyParams(u32 ld, u64 stripRow, u16 stripRef, u64 *pdBlock,
/* Get dev handle from Pd */
*pDevHandle = MR_PdDevHandleGet(pd, map);
}
- retval = FALSE;
}
*pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 3e86bcc..5202de3 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -4257,6 +4257,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
u32 log_info;
struct MPT2SAS_DEVICE *sas_device_priv_data;
u32 response_code = 0;
+ unsigned long flags;
mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
@@ -4281,6 +4282,9 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
* the failed direct I/O should be redirected to volume
*/
if (_scsih_scsi_direct_io_get(ioc, smid)) {
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ ioc->scsi_lookup[smid - 1].scmd = scmd;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
_scsih_scsi_direct_io_set(ioc, smid, 0);
memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
mpi_request->DevHandle =
diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig
index c82b012..78f7e20 100644
--- a/drivers/scsi/mvsas/Kconfig
+++ b/drivers/scsi/mvsas/Kconfig
@@ -3,7 +3,7 @@
#
# Copyright 2007 Red Hat, Inc.
# Copyright 2008 Marvell. <kewei@marvell.com>
-# Copyright 2009-20011 Marvell. <yuxiangl@marvell.com>
+# Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
#
# This file is licensed under GPLv2.
#
@@ -41,3 +41,10 @@ config SCSI_MVSAS_DEBUG
help
Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode,
the driver prints some messages to the console.
+config SCSI_MVSAS_TASKLET
+ bool "Support for interrupt tasklet"
+ default n
+ depends on SCSI_MVSAS
+ help
+ Compiles the 88SE64xx/88SE94xx driver in interrupt tasklet mode.In this mode,
+ the interrupt will schedule a tasklet.
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
index 13c9604..8ba4722 100644
--- a/drivers/scsi/mvsas/mv_64xx.c
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -33,7 +33,6 @@ static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
u32 reg;
struct mvs_phy *phy = &mvi->phy[i];
- /* TODO check & save device type */
reg = mr32(MVS_GBL_PORT_TYPE);
phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
if (reg & MODE_SAS_SATA & (1 << i))
@@ -48,7 +47,7 @@ static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
u32 tmp;
tmp = mr32(MVS_PCS);
- if (mvi->chip->n_phy <= 4)
+ if (mvi->chip->n_phy <= MVS_SOC_PORTS)
tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT);
else
tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
@@ -58,24 +57,16 @@ static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
+ int i;
mvs_phy_hacks(mvi);
if (!(mvi->flags & MVF_FLAG_SOC)) {
- /* TEST - for phy decoding error, adjust voltage levels */
- mw32(MVS_P0_VSR_ADDR + 0, 0x8);
- mw32(MVS_P0_VSR_DATA + 0, 0x2F0);
-
- mw32(MVS_P0_VSR_ADDR + 8, 0x8);
- mw32(MVS_P0_VSR_DATA + 8, 0x2F0);
-
- mw32(MVS_P0_VSR_ADDR + 16, 0x8);
- mw32(MVS_P0_VSR_DATA + 16, 0x2F0);
-
- mw32(MVS_P0_VSR_ADDR + 24, 0x8);
- mw32(MVS_P0_VSR_DATA + 24, 0x2F0);
+ for (i = 0; i < MVS_SOC_PORTS; i++) {
+ mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE8);
+ mvs_write_port_vsr_data(mvi, i, 0x2F0);
+ }
} else {
- int i;
/* disable auto port detection */
mw32(MVS_GBL_PORT_TYPE, 0);
for (i = 0; i < mvi->chip->n_phy; i++) {
@@ -95,7 +86,7 @@ static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
u32 reg, tmp;
if (!(mvi->flags & MVF_FLAG_SOC)) {
- if (phy_id < 4)
+ if (phy_id < MVS_SOC_PORTS)
pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg);
else
pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg);
@@ -104,13 +95,13 @@ static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
reg = mr32(MVS_PHY_CTL);
tmp = reg;
- if (phy_id < 4)
+ if (phy_id < MVS_SOC_PORTS)
tmp |= (1U << phy_id) << PCTL_LINK_OFFS;
else
- tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS;
+ tmp |= (1U << (phy_id - MVS_SOC_PORTS)) << PCTL_LINK_OFFS;
if (!(mvi->flags & MVF_FLAG_SOC)) {
- if (phy_id < 4) {
+ if (phy_id < MVS_SOC_PORTS) {
pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
mdelay(10);
pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg);
@@ -133,9 +124,9 @@ static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
tmp &= ~PHYEV_RDY_CH;
mvs_write_port_irq_stat(mvi, phy_id, tmp);
tmp = mvs_read_phy_ctl(mvi, phy_id);
- if (hard == 1)
+ if (hard == MVS_HARD_RESET)
tmp |= PHY_RST_HARD;
- else if (hard == 0)
+ else if (hard == MVS_SOFT_RESET)
tmp |= PHY_RST;
mvs_write_phy_ctl(mvi, phy_id, tmp);
if (hard) {
@@ -321,6 +312,11 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
/* init phys */
mvs_64xx_phy_hacks(mvi);
+ tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
+ tmp &= 0x0000ffff;
+ tmp |= 0x00fa0000;
+ mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
+
/* enable auto port detection */
mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);
@@ -346,7 +342,7 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
mvs_64xx_enable_xmt(mvi, i);
- mvs_64xx_phy_reset(mvi, i, 1);
+ mvs_64xx_phy_reset(mvi, i, MVS_HARD_RESET);
msleep(500);
mvs_64xx_detect_porttype(mvi, i);
}
@@ -377,13 +373,7 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
mvs_update_phyinfo(mvi, i, 1);
}
- /* FIXME: update wide port bitmaps */
-
/* little endian for open address and command table, etc. */
- /*
- * it seems that ( from the spec ) turning on big-endian won't
- * do us any good on big-endian machines, need further confirmation
- */
cctl = mr32(MVS_CTL);
cctl |= CCTL_ENDIAN_CMD;
cctl |= CCTL_ENDIAN_DATA;
@@ -394,15 +384,19 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
/* reset CMD queue */
tmp = mr32(MVS_PCS);
tmp |= PCS_CMD_RST;
+ tmp &= ~PCS_SELF_CLEAR;
mw32(MVS_PCS, tmp);
- /* interrupt coalescing may cause missing HW interrput in some case,
- * and the max count is 0x1ff, while our max slot is 0x200,
+ /*
+ * the max count is 0x1ff, while our max slot is 0x200,
* it will make count 0.
*/
tmp = 0;
- mw32(MVS_INT_COAL, tmp);
+ if (MVS_CHIP_SLOT_SZ > 0x1ff)
+ mw32(MVS_INT_COAL, 0x1ff | COAL_EN);
+ else
+ mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN);
- tmp = 0x100;
+ tmp = 0x10000 | interrupt_coalescing;
mw32(MVS_INT_COAL_TMOUT, tmp);
/* ladies and gentlemen, start your engines */
@@ -477,13 +471,11 @@ static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat)
/* clear CMD_CMPLT ASAP */
mw32_f(MVS_INT_STAT, CINT_DONE);
-#ifndef MVS_USE_TASKLET
+
spin_lock(&mvi->lock);
-#endif
mvs_int_full(mvi);
-#ifndef MVS_USE_TASKLET
spin_unlock(&mvi->lock);
-#endif
+
return IRQ_HANDLED;
}
@@ -630,7 +622,6 @@ static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
{
u32 tmp;
struct mvs_phy *phy = &mvi->phy[i];
- /* workaround for HW phy decoding error on 1.5g disk drive */
mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
tmp = mvs_read_port_vsr_data(mvi, i);
if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
@@ -661,7 +652,7 @@ void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
tmp |= lrmax;
}
mvs_write_phy_ctl(mvi, phy_id, tmp);
- mvs_64xx_phy_reset(mvi, phy_id, 1);
+ mvs_64xx_phy_reset(mvi, phy_id, MVS_HARD_RESET);
}
static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
@@ -744,11 +735,13 @@ int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
return -1;
}
-#ifndef DISABLE_HOTPLUG_DMA_FIX
-void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
+void mvs_64xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
+ int buf_len, int from, void *prd)
{
int i;
struct mvs_prd *buf_prd = prd;
+ dma_addr_t buf_dma = mvi->bulk_buffer_dma;
+
buf_prd += from;
for (i = 0; i < MAX_SG_ENTRY - from; i++) {
buf_prd->addr = cpu_to_le64(buf_dma);
@@ -756,7 +749,28 @@ void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
++buf_prd;
}
}
-#endif
+
+static void mvs_64xx_tune_interrupt(struct mvs_info *mvi, u32 time)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp = 0;
+ /*
+ * the max count is 0x1ff, while our max slot is 0x200,
+ * it will make count 0.
+ */
+ if (time == 0) {
+ mw32(MVS_INT_COAL, 0);
+ mw32(MVS_INT_COAL_TMOUT, 0x10000);
+ } else {
+ if (MVS_CHIP_SLOT_SZ > 0x1ff)
+ mw32(MVS_INT_COAL, 0x1ff|COAL_EN);
+ else
+ mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN);
+
+ tmp = 0x10000 | time;
+ mw32(MVS_INT_COAL_TMOUT, tmp);
+ }
+}
const struct mvs_dispatch mvs_64xx_dispatch = {
"mv64xx",
@@ -780,7 +794,6 @@ const struct mvs_dispatch mvs_64xx_dispatch = {
mvs_write_port_irq_stat,
mvs_read_port_irq_mask,
mvs_write_port_irq_mask,
- mvs_get_sas_addr,
mvs_64xx_command_active,
mvs_64xx_clear_srs_irq,
mvs_64xx_issue_stop,
@@ -808,8 +821,8 @@ const struct mvs_dispatch mvs_64xx_dispatch = {
mvs_64xx_spi_buildcmd,
mvs_64xx_spi_issuecmd,
mvs_64xx_spi_waitdataready,
-#ifndef DISABLE_HOTPLUG_DMA_FIX
mvs_64xx_fix_dma,
-#endif
+ mvs_64xx_tune_interrupt,
+ NULL,
};
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 78162c3..3501291 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -48,6 +48,216 @@ static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
}
}
+void set_phy_tuning(struct mvs_info *mvi, int phy_id,
+ struct phy_tuning phy_tuning)
+{
+ u32 tmp, setting_0 = 0, setting_1 = 0;
+ u8 i;
+
+ /* Remap information for B0 chip:
+ *
+ * R0Ch -> R118h[15:0] (Adapted DFE F3 - F5 coefficient)
+ * R0Dh -> R118h[31:16] (Generation 1 Setting 0)
+ * R0Eh -> R11Ch[15:0] (Generation 1 Setting 1)
+ * R0Fh -> R11Ch[31:16] (Generation 2 Setting 0)
+ * R10h -> R120h[15:0] (Generation 2 Setting 1)
+ * R11h -> R120h[31:16] (Generation 3 Setting 0)
+ * R12h -> R124h[15:0] (Generation 3 Setting 1)
+ * R13h -> R124h[31:16] (Generation 4 Setting 0 (Reserved))
+ */
+
+ /* A0 has a different set of registers */
+ if (mvi->pdev->revision == VANIR_A0_REV)
+ return;
+
+ for (i = 0; i < 3; i++) {
+ /* loop 3 times, set Gen 1, Gen 2, Gen 3 */
+ switch (i) {
+ case 0:
+ setting_0 = GENERATION_1_SETTING;
+ setting_1 = GENERATION_1_2_SETTING;
+ break;
+ case 1:
+ setting_0 = GENERATION_1_2_SETTING;
+ setting_1 = GENERATION_2_3_SETTING;
+ break;
+ case 2:
+ setting_0 = GENERATION_2_3_SETTING;
+ setting_1 = GENERATION_3_4_SETTING;
+ break;
+ }
+
+ /* Set:
+ *
+ * Transmitter Emphasis Enable
+ * Transmitter Emphasis Amplitude
+ * Transmitter Amplitude
+ */
+ mvs_write_port_vsr_addr(mvi, phy_id, setting_0);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp &= ~(0xFBE << 16);
+ tmp |= (((phy_tuning.trans_emp_en << 11) |
+ (phy_tuning.trans_emp_amp << 7) |
+ (phy_tuning.trans_amp << 1)) << 16);
+ mvs_write_port_vsr_data(mvi, phy_id, tmp);
+
+ /* Set Transmitter Amplitude Adjust */
+ mvs_write_port_vsr_addr(mvi, phy_id, setting_1);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp &= ~(0xC000);
+ tmp |= (phy_tuning.trans_amp_adj << 14);
+ mvs_write_port_vsr_data(mvi, phy_id, tmp);
+ }
+}
+
+void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id,
+ struct ffe_control ffe)
+{
+ u32 tmp;
+
+ /* Don't run this if A0/B0 */
+ if ((mvi->pdev->revision == VANIR_A0_REV)
+ || (mvi->pdev->revision == VANIR_B0_REV))
+ return;
+
+ /* FFE Resistor and Capacitor */
+ /* R10Ch DFE Resolution Control/Squelch and FFE Setting
+ *
+ * FFE_FORCE [7]
+ * FFE_RES_SEL [6:4]
+ * FFE_CAP_SEL [3:0]
+ */
+ mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_FFE_CONTROL);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp &= ~0xFF;
+
+ /* Read from HBA_Info_Page */
+ tmp |= ((0x1 << 7) |
+ (ffe.ffe_rss_sel << 4) |
+ (ffe.ffe_cap_sel << 0));
+
+ mvs_write_port_vsr_data(mvi, phy_id, tmp);
+
+ /* R064h PHY Mode Register 1
+ *
+ * DFE_DIS 18
+ */
+ mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp &= ~0x40001;
+ /* Hard coding */
+ /* No defines in HBA_Info_Page */
+ tmp |= (0 << 18);
+ mvs_write_port_vsr_data(mvi, phy_id, tmp);
+
+ /* R110h DFE F0-F1 Coefficient Control/DFE Update Control
+ *
+ * DFE_UPDATE_EN [11:6]
+ * DFE_FX_FORCE [5:0]
+ */
+ mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_DFE_UPDATE_CRTL);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp &= ~0xFFF;
+ /* Hard coding */
+ /* No defines in HBA_Info_Page */
+ tmp |= ((0x3F << 6) | (0x0 << 0));
+ mvs_write_port_vsr_data(mvi, phy_id, tmp);
+
+ /* R1A0h Interface and Digital Reference Clock Control/Reserved_50h
+ *
+ * FFE_TRAIN_EN 3
+ */
+ mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp &= ~0x8;
+ /* Hard coding */
+ /* No defines in HBA_Info_Page */
+ tmp |= (0 << 3);
+ mvs_write_port_vsr_data(mvi, phy_id, tmp);
+}
+
+/*Notice: this function must be called when phy is disabled*/
+void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate)
+{
+ union reg_phy_cfg phy_cfg, phy_cfg_tmp;
+ mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
+ phy_cfg_tmp.v = mvs_read_port_vsr_data(mvi, phy_id);
+ phy_cfg.v = 0;
+ phy_cfg.u.disable_phy = phy_cfg_tmp.u.disable_phy;
+ phy_cfg.u.sas_support = 1;
+ phy_cfg.u.sata_support = 1;
+ phy_cfg.u.sata_host_mode = 1;
+
+ switch (rate) {
+ case 0x0:
+ /* support 1.5 Gbps */
+ phy_cfg.u.speed_support = 1;
+ phy_cfg.u.snw_3_support = 0;
+ phy_cfg.u.tx_lnk_parity = 1;
+ phy_cfg.u.tx_spt_phs_lnk_rate = 0x30;
+ break;
+ case 0x1:
+
+ /* support 1.5, 3.0 Gbps */
+ phy_cfg.u.speed_support = 3;
+ phy_cfg.u.tx_spt_phs_lnk_rate = 0x3c;
+ phy_cfg.u.tx_lgcl_lnk_rate = 0x08;
+ break;
+ case 0x2:
+ default:
+ /* support 1.5, 3.0, 6.0 Gbps */
+ phy_cfg.u.speed_support = 7;
+ phy_cfg.u.snw_3_support = 1;
+ phy_cfg.u.tx_lnk_parity = 1;
+ phy_cfg.u.tx_spt_phs_lnk_rate = 0x3f;
+ phy_cfg.u.tx_lgcl_lnk_rate = 0x09;
+ break;
+ }
+ mvs_write_port_vsr_data(mvi, phy_id, phy_cfg.v);
+}
+
+static void __devinit
+mvs_94xx_config_reg_from_hba(struct mvs_info *mvi, int phy_id)
+{
+ u32 temp;
+ temp = (u32)(*(u32 *)&mvi->hba_info_param.phy_tuning[phy_id]);
+ if (temp == 0xFFFFFFFFL) {
+ mvi->hba_info_param.phy_tuning[phy_id].trans_emp_amp = 0x6;
+ mvi->hba_info_param.phy_tuning[phy_id].trans_amp = 0x1A;
+ mvi->hba_info_param.phy_tuning[phy_id].trans_amp_adj = 0x3;
+ }
+
+ temp = (u8)(*(u8 *)&mvi->hba_info_param.ffe_ctl[phy_id]);
+ if (temp == 0xFFL) {
+ switch (mvi->pdev->revision) {
+ case VANIR_A0_REV:
+ case VANIR_B0_REV:
+ mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7;
+ mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0x7;
+ break;
+ case VANIR_C0_REV:
+ case VANIR_C1_REV:
+ case VANIR_C2_REV:
+ default:
+ mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7;
+ mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0xC;
+ break;
+ }
+ }
+
+ temp = (u8)(*(u8 *)&mvi->hba_info_param.phy_rate[phy_id]);
+ if (temp == 0xFFL)
+ /*set default phy_rate = 6Gbps*/
+ mvi->hba_info_param.phy_rate[phy_id] = 0x2;
+
+ set_phy_tuning(mvi, phy_id,
+ mvi->hba_info_param.phy_tuning[phy_id]);
+ set_phy_ffe_tuning(mvi, phy_id,
+ mvi->hba_info_param.ffe_ctl[phy_id]);
+ set_phy_rate(mvi, phy_id,
+ mvi->hba_info_param.phy_rate[phy_id]);
+}
+
static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
{
void __iomem *regs = mvi->regs;
@@ -61,7 +271,14 @@ static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
{
u32 tmp;
-
+ u32 delay = 5000;
+ if (hard == MVS_PHY_TUNE) {
+ mvs_write_port_cfg_addr(mvi, phy_id, PHYR_SATA_CTL);
+ tmp = mvs_read_port_cfg_data(mvi, phy_id);
+ mvs_write_port_cfg_data(mvi, phy_id, tmp|0x20000000);
+ mvs_write_port_cfg_data(mvi, phy_id, tmp|0x100000);
+ return;
+ }
tmp = mvs_read_port_irq_stat(mvi, phy_id);
tmp &= ~PHYEV_RDY_CH;
mvs_write_port_irq_stat(mvi, phy_id, tmp);
@@ -71,12 +288,15 @@ static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
mvs_write_phy_ctl(mvi, phy_id, tmp);
do {
tmp = mvs_read_phy_ctl(mvi, phy_id);
- } while (tmp & PHY_RST_HARD);
+ udelay(10);
+ delay--;
+ } while ((tmp & PHY_RST_HARD) && delay);
+ if (!delay)
+ mv_dprintk("phy hard reset failed.\n");
} else {
- mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT);
- tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp = mvs_read_phy_ctl(mvi, phy_id);
tmp |= PHY_RST;
- mvs_write_port_vsr_data(mvi, phy_id, tmp);
+ mvs_write_phy_ctl(mvi, phy_id, tmp);
}
}
@@ -90,12 +310,25 @@ static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
{
- mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4);
- mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
- mvs_write_port_vsr_addr(mvi, phy_id, 0x104);
- mvs_write_port_vsr_data(mvi, phy_id, 0x00018080);
+ u32 tmp;
+ u8 revision = 0;
+
+ revision = mvi->pdev->revision;
+ if (revision == VANIR_A0_REV) {
+ mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA);
+ mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
+ }
+ if (revision == VANIR_B0_REV) {
+ mvs_write_port_vsr_addr(mvi, phy_id, CMD_APP_MEM_CTL);
+ mvs_write_port_vsr_data(mvi, phy_id, 0x08001006);
+ mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA);
+ mvs_write_port_vsr_data(mvi, phy_id, 0x0000705f);
+ }
+
mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
- mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp |= bit(0);
+ mvs_write_port_vsr_data(mvi, phy_id, tmp & 0xfd7fffff);
}
static int __devinit mvs_94xx_init(struct mvs_info *mvi)
@@ -103,7 +336,9 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
void __iomem *regs = mvi->regs;
int i;
u32 tmp, cctl;
+ u8 revision;
+ revision = mvi->pdev->revision;
mvs_show_pcie_usage(mvi);
if (mvi->flags & MVF_FLAG_SOC) {
tmp = mr32(MVS_PHY_CTL);
@@ -133,6 +368,28 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
msleep(100);
}
+ /* disable Multiplexing, enable phy implemented */
+ mw32(MVS_PORTS_IMP, 0xFF);
+
+ if (revision == VANIR_A0_REV) {
+ mw32(MVS_PA_VSR_ADDR, CMD_CMWK_OOB_DET);
+ mw32(MVS_PA_VSR_PORT, 0x00018080);
+ }
+ mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE2);
+ if (revision == VANIR_A0_REV || revision == VANIR_B0_REV)
+ /* set 6G/3G/1.5G, multiplexing, without SSC */
+ mw32(MVS_PA_VSR_PORT, 0x0084d4fe);
+ else
+ /* set 6G/3G/1.5G, multiplexing, with and without SSC */
+ mw32(MVS_PA_VSR_PORT, 0x0084fffe);
+
+ if (revision == VANIR_B0_REV) {
+ mw32(MVS_PA_VSR_ADDR, CMD_APP_MEM_CTL);
+ mw32(MVS_PA_VSR_PORT, 0x08001006);
+ mw32(MVS_PA_VSR_ADDR, CMD_HOST_RD_DATA);
+ mw32(MVS_PA_VSR_PORT, 0x0000705f);
+ }
+
/* reset control */
mw32(MVS_PCS, 0); /* MVS_PCS */
mw32(MVS_STP_REG_SET_0, 0);
@@ -141,17 +398,8 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
/* init phys */
mvs_phy_hacks(mvi);
- /* disable Multiplexing, enable phy implemented */
- mw32(MVS_PORTS_IMP, 0xFF);
-
-
- mw32(MVS_PA_VSR_ADDR, 0x00000104);
- mw32(MVS_PA_VSR_PORT, 0x00018080);
- mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8);
- mw32(MVS_PA_VSR_PORT, 0x0084ffff);
-
/* set LED blink when IO*/
- mw32(MVS_PA_VSR_ADDR, 0x00000030);
+ mw32(MVS_PA_VSR_ADDR, VSR_PHY_ACT_LED);
tmp = mr32(MVS_PA_VSR_PORT);
tmp &= 0xFFFF00FF;
tmp |= 0x00003300;
@@ -175,12 +423,13 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
mvs_94xx_phy_disable(mvi, i);
/* set phy local SAS address */
mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
- (mvi->phy[i].dev_sas_addr));
+ cpu_to_le64(mvi->phy[i].dev_sas_addr));
mvs_94xx_enable_xmt(mvi, i);
+ mvs_94xx_config_reg_from_hba(mvi, i);
mvs_94xx_phy_enable(mvi, i);
- mvs_94xx_phy_reset(mvi, i, 1);
+ mvs_94xx_phy_reset(mvi, i, PHY_RST_HARD);
msleep(500);
mvs_94xx_detect_porttype(mvi, i);
}
@@ -211,16 +460,9 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
mvs_update_phyinfo(mvi, i, 1);
}
- /* FIXME: update wide port bitmaps */
-
/* little endian for open address and command table, etc. */
- /*
- * it seems that ( from the spec ) turning on big-endian won't
- * do us any good on big-endian machines, need further confirmation
- */
cctl = mr32(MVS_CTL);
cctl |= CCTL_ENDIAN_CMD;
- cctl |= CCTL_ENDIAN_DATA;
cctl &= ~CCTL_ENDIAN_OPEN;
cctl |= CCTL_ENDIAN_RSP;
mw32_f(MVS_CTL, cctl);
@@ -228,15 +470,20 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
/* reset CMD queue */
tmp = mr32(MVS_PCS);
tmp |= PCS_CMD_RST;
+ tmp &= ~PCS_SELF_CLEAR;
mw32(MVS_PCS, tmp);
- /* interrupt coalescing may cause missing HW interrput in some case,
- * and the max count is 0x1ff, while our max slot is 0x200,
+ /*
+ * the max count is 0x1ff, while our max slot is 0x200,
* it will make count 0.
*/
tmp = 0;
- mw32(MVS_INT_COAL, tmp);
+ if (MVS_CHIP_SLOT_SZ > 0x1ff)
+ mw32(MVS_INT_COAL, 0x1ff | COAL_EN);
+ else
+ mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN);
- tmp = 0x100;
+ /* default interrupt coalescing time is 128us */
+ tmp = 0x10000 | interrupt_coalescing;
mw32(MVS_INT_COAL_TMOUT, tmp);
/* ladies and gentlemen, start your engines */
@@ -249,7 +496,7 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
/* enable completion queue interrupt */
tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
- CINT_DMA_PCIE);
+ CINT_DMA_PCIE | CINT_NON_SPEC_NCQ_ERROR);
tmp |= CINT_PHY_MASK;
mw32(MVS_INT_MASK, tmp);
@@ -332,13 +579,10 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
((stat & IRQ_SAS_B) && mvi->id == 1)) {
mw32_f(MVS_INT_STAT, CINT_DONE);
- #ifndef MVS_USE_TASKLET
+
spin_lock(&mvi->lock);
- #endif
mvs_int_full(mvi);
- #ifndef MVS_USE_TASKLET
spin_unlock(&mvi->lock);
- #endif
}
return IRQ_HANDLED;
}
@@ -346,10 +590,48 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
{
u32 tmp;
- mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32));
- do {
- tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3));
- } while (tmp & 1 << (slot_idx % 32));
+ tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3));
+ if (tmp && 1 << (slot_idx % 32)) {
+ mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx);
+ mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3),
+ 1 << (slot_idx % 32));
+ do {
+ tmp = mvs_cr32(mvi,
+ MVS_COMMAND_ACTIVE + (slot_idx >> 3));
+ } while (tmp & 1 << (slot_idx % 32));
+ }
+}
+
+void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+
+ if (clear_all) {
+ tmp = mr32(MVS_INT_STAT_SRS_0);
+ if (tmp) {
+ mv_dprintk("check SRS 0 %08X.\n", tmp);
+ mw32(MVS_INT_STAT_SRS_0, tmp);
+ }
+ tmp = mr32(MVS_INT_STAT_SRS_1);
+ if (tmp) {
+ mv_dprintk("check SRS 1 %08X.\n", tmp);
+ mw32(MVS_INT_STAT_SRS_1, tmp);
+ }
+ } else {
+ if (reg_set > 31)
+ tmp = mr32(MVS_INT_STAT_SRS_1);
+ else
+ tmp = mr32(MVS_INT_STAT_SRS_0);
+
+ if (tmp & (1 << (reg_set % 32))) {
+ mv_dprintk("register set 0x%x was stopped.\n", reg_set);
+ if (reg_set > 31)
+ mw32(MVS_INT_STAT_SRS_1, 1 << (reg_set % 32));
+ else
+ mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32));
+ }
+ }
}
static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
@@ -357,37 +639,56 @@ static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
{
void __iomem *regs = mvi->regs;
u32 tmp;
+ mvs_94xx_clear_srs_irq(mvi, 0, 1);
- if (type == PORT_TYPE_SATA) {
- tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
- mw32(MVS_INT_STAT_SRS_0, tmp);
- }
- mw32(MVS_INT_STAT, CINT_CI_STOP);
+ tmp = mr32(MVS_INT_STAT);
+ mw32(MVS_INT_STAT, tmp | CINT_CI_STOP);
tmp = mr32(MVS_PCS) | 0xFF00;
mw32(MVS_PCS, tmp);
}
+static void mvs_94xx_non_spec_ncq_error(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs;
+ u32 err_0, err_1;
+ u8 i;
+ struct mvs_device *device;
+
+ err_0 = mr32(MVS_NON_NCQ_ERR_0);
+ err_1 = mr32(MVS_NON_NCQ_ERR_1);
+
+ mv_dprintk("non specific ncq error err_0:%x,err_1:%x.\n",
+ err_0, err_1);
+ for (i = 0; i < 32; i++) {
+ if (err_0 & bit(i)) {
+ device = mvs_find_dev_by_reg_set(mvi, i);
+ if (device)
+ mvs_release_task(mvi, device->sas_device);
+ }
+ if (err_1 & bit(i)) {
+ device = mvs_find_dev_by_reg_set(mvi, i+32);
+ if (device)
+ mvs_release_task(mvi, device->sas_device);
+ }
+ }
+
+ mw32(MVS_NON_NCQ_ERR_0, err_0);
+ mw32(MVS_NON_NCQ_ERR_1, err_1);
+}
+
static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
{
void __iomem *regs = mvi->regs;
- u32 tmp;
u8 reg_set = *tfs;
if (*tfs == MVS_ID_NOT_MAPPED)
return;
mvi->sata_reg_set &= ~bit(reg_set);
- if (reg_set < 32) {
+ if (reg_set < 32)
w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
- tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set;
- if (tmp)
- mw32(MVS_INT_STAT_SRS_0, tmp);
- } else {
- w_reg_set_enable(reg_set, mvi->sata_reg_set);
- tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set;
- if (tmp)
- mw32(MVS_INT_STAT_SRS_1, tmp);
- }
+ else
+ w_reg_set_enable(reg_set, (u32)(mvi->sata_reg_set >> 32));
*tfs = MVS_ID_NOT_MAPPED;
@@ -403,7 +704,7 @@ static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
return 0;
i = mv_ffc64(mvi->sata_reg_set);
- if (i > 32) {
+ if (i >= 32) {
mvi->sata_reg_set |= bit(i);
w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
*tfs = i;
@@ -422,9 +723,12 @@ static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
int i;
struct scatterlist *sg;
struct mvs_prd *buf_prd = prd;
+ struct mvs_prd_imt im_len;
+ *(u32 *)&im_len = 0;
for_each_sg(scatter, sg, nr, i) {
buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
- buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
+ im_len.len = sg_dma_len(sg);
+ buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len);
buf_prd++;
}
}
@@ -433,7 +737,7 @@ static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
{
u32 phy_st;
phy_st = mvs_read_phy_ctl(mvi, i);
- if (phy_st & PHY_READY_MASK) /* phy ready */
+ if (phy_st & PHY_READY_MASK)
return 1;
return 0;
}
@@ -447,7 +751,7 @@ static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
for (i = 0; i < 7; i++) {
mvs_write_port_cfg_addr(mvi, port_id,
CONFIG_ID_FRAME0 + i * 4);
- id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
+ id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id));
}
memcpy(id, id_frame, 28);
}
@@ -458,15 +762,13 @@ static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
int i;
u32 id_frame[7];
- /* mvs_hexdump(28, (u8 *)id_frame, 0); */
for (i = 0; i < 7; i++) {
mvs_write_port_cfg_addr(mvi, port_id,
CONFIG_ATT_ID_FRAME0 + i * 4);
- id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
+ id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id));
mv_dprintk("94xx phy %d atta frame %d %x.\n",
port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
}
- /* mvs_hexdump(28, (u8 *)id_frame, 0); */
memcpy(id, id_frame, 28);
}
@@ -526,7 +828,18 @@ static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
struct sas_phy_linkrates *rates)
{
- /* TODO */
+ u32 lrmax = 0;
+ u32 tmp;
+
+ tmp = mvs_read_phy_ctl(mvi, phy_id);
+ lrmax = (rates->maximum_linkrate - SAS_LINK_RATE_1_5_GBPS) << 12;
+
+ if (lrmax) {
+ tmp &= ~(0x3 << 12);
+ tmp |= lrmax;
+ }
+ mvs_write_phy_ctl(mvi, phy_id, tmp);
+ mvs_94xx_phy_reset(mvi, phy_id, PHY_RST_HARD);
}
static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
@@ -603,27 +916,59 @@ int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
return -1;
}
-#ifndef DISABLE_HOTPLUG_DMA_FIX
-void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
+void mvs_94xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
+ int buf_len, int from, void *prd)
{
int i;
struct mvs_prd *buf_prd = prd;
+ dma_addr_t buf_dma;
+ struct mvs_prd_imt im_len;
+
+ *(u32 *)&im_len = 0;
buf_prd += from;
- for (i = 0; i < MAX_SG_ENTRY - from; i++) {
- buf_prd->addr = cpu_to_le64(buf_dma);
- buf_prd->im_len.len = cpu_to_le32(buf_len);
- ++buf_prd;
+
+#define PRD_CHAINED_ENTRY 0x01
+ if ((mvi->pdev->revision == VANIR_A0_REV) ||
+ (mvi->pdev->revision == VANIR_B0_REV))
+ buf_dma = (phy_mask <= 0x08) ?
+ mvi->bulk_buffer_dma : mvi->bulk_buffer_dma1;
+ else
+ return;
+
+ for (i = from; i < MAX_SG_ENTRY; i++, ++buf_prd) {
+ if (i == MAX_SG_ENTRY - 1) {
+ buf_prd->addr = cpu_to_le64(virt_to_phys(buf_prd - 1));
+ im_len.len = 2;
+ im_len.misc_ctl = PRD_CHAINED_ENTRY;
+ } else {
+ buf_prd->addr = cpu_to_le64(buf_dma);
+ im_len.len = buf_len;
+ }
+ buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len);
}
}
-#endif
-/*
- * FIXME JEJB: temporary nop clear_srs_irq to make 94xx still work
- * with 64xx fixes
- */
-static void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set,
- u8 clear_all)
+static void mvs_94xx_tune_interrupt(struct mvs_info *mvi, u32 time)
{
+ void __iomem *regs = mvi->regs;
+ u32 tmp = 0;
+ /*
+ * the max count is 0x1ff, while our max slot is 0x200,
+ * it will make count 0.
+ */
+ if (time == 0) {
+ mw32(MVS_INT_COAL, 0);
+ mw32(MVS_INT_COAL_TMOUT, 0x10000);
+ } else {
+ if (MVS_CHIP_SLOT_SZ > 0x1ff)
+ mw32(MVS_INT_COAL, 0x1ff|COAL_EN);
+ else
+ mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN);
+
+ tmp = 0x10000 | time;
+ mw32(MVS_INT_COAL_TMOUT, tmp);
+ }
+
}
const struct mvs_dispatch mvs_94xx_dispatch = {
@@ -648,7 +993,6 @@ const struct mvs_dispatch mvs_94xx_dispatch = {
mvs_write_port_irq_stat,
mvs_read_port_irq_mask,
mvs_write_port_irq_mask,
- mvs_get_sas_addr,
mvs_94xx_command_active,
mvs_94xx_clear_srs_irq,
mvs_94xx_issue_stop,
@@ -676,8 +1020,8 @@ const struct mvs_dispatch mvs_94xx_dispatch = {
mvs_94xx_spi_buildcmd,
mvs_94xx_spi_issuecmd,
mvs_94xx_spi_waitdataready,
-#ifndef DISABLE_HOTPLUG_DMA_FIX
mvs_94xx_fix_dma,
-#endif
+ mvs_94xx_tune_interrupt,
+ mvs_94xx_non_spec_ncq_error,
};
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
index 8835bef..8f7eb4f 100644
--- a/drivers/scsi/mvsas/mv_94xx.h
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -30,6 +30,14 @@
#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS
+enum VANIR_REVISION_ID {
+ VANIR_A0_REV = 0xA0,
+ VANIR_B0_REV = 0x01,
+ VANIR_C0_REV = 0x02,
+ VANIR_C1_REV = 0x03,
+ VANIR_C2_REV = 0xC2,
+};
+
enum hw_registers {
MVS_GBL_CTL = 0x04, /* global control */
MVS_GBL_INT_STAT = 0x00, /* global irq status */
@@ -101,6 +109,7 @@ enum hw_registers {
MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */
MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */
MVS_PA_VSR_PORT = 0x294, /* All port VSR data */
+ MVS_COMMAND_ACTIVE = 0x300,
};
enum pci_cfg_registers {
@@ -112,26 +121,29 @@ enum pci_cfg_registers {
/* SAS/SATA Vendor Specific Port Registers */
enum sas_sata_vsp_regs {
- VSR_PHY_STAT = 0x00 * 4, /* Phy Status */
- VSR_PHY_MODE1 = 0x01 * 4, /* phy tx */
- VSR_PHY_MODE2 = 0x02 * 4, /* tx scc */
- VSR_PHY_MODE3 = 0x03 * 4, /* pll */
- VSR_PHY_MODE4 = 0x04 * 4, /* VCO */
- VSR_PHY_MODE5 = 0x05 * 4, /* Rx */
- VSR_PHY_MODE6 = 0x06 * 4, /* CDR */
- VSR_PHY_MODE7 = 0x07 * 4, /* Impedance */
- VSR_PHY_MODE8 = 0x08 * 4, /* Voltage */
- VSR_PHY_MODE9 = 0x09 * 4, /* Test */
- VSR_PHY_MODE10 = 0x0A * 4, /* Power */
- VSR_PHY_MODE11 = 0x0B * 4, /* Phy Mode */
- VSR_PHY_VS0 = 0x0C * 4, /* Vednor Specific 0 */
- VSR_PHY_VS1 = 0x0D * 4, /* Vednor Specific 1 */
+ VSR_PHY_STAT = 0x00 * 4, /* Phy Interrupt Status */
+ VSR_PHY_MODE1 = 0x01 * 4, /* phy Interrupt Enable */
+ VSR_PHY_MODE2 = 0x02 * 4, /* Phy Configuration */
+ VSR_PHY_MODE3 = 0x03 * 4, /* Phy Status */
+ VSR_PHY_MODE4 = 0x04 * 4, /* Phy Counter 0 */
+ VSR_PHY_MODE5 = 0x05 * 4, /* Phy Counter 1 */
+ VSR_PHY_MODE6 = 0x06 * 4, /* Event Counter Control */
+ VSR_PHY_MODE7 = 0x07 * 4, /* Event Counter Select */
+ VSR_PHY_MODE8 = 0x08 * 4, /* Event Counter 0 */
+ VSR_PHY_MODE9 = 0x09 * 4, /* Event Counter 1 */
+ VSR_PHY_MODE10 = 0x0A * 4, /* Event Counter 2 */
+ VSR_PHY_MODE11 = 0x0B * 4, /* Event Counter 3 */
+ VSR_PHY_ACT_LED = 0x0C * 4, /* Activity LED control */
+
+ VSR_PHY_FFE_CONTROL = 0x10C,
+ VSR_PHY_DFE_UPDATE_CRTL = 0x110,
+ VSR_REF_CLOCK_CRTL = 0x1A0,
};
enum chip_register_bits {
PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
- PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
- PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12),
+ PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 12),
+ PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
(0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
};
@@ -169,22 +181,75 @@ enum pci_interrupt_cause {
IRQ_PCIE_ERR = (1 << 31),
};
+union reg_phy_cfg {
+ u32 v;
+ struct {
+ u32 phy_reset:1;
+ u32 sas_support:1;
+ u32 sata_support:1;
+ u32 sata_host_mode:1;
+ /*
+ * bit 2: 6Gbps support
+ * bit 1: 3Gbps support
+ * bit 0: 1.5Gbps support
+ */
+ u32 speed_support:3;
+ u32 snw_3_support:1;
+ u32 tx_lnk_parity:1;
+ /*
+ * bit 5: G1 (1.5Gbps) Without SSC
+ * bit 4: G1 (1.5Gbps) with SSC
+ * bit 3: G2 (3.0Gbps) Without SSC
+ * bit 2: G2 (3.0Gbps) with SSC
+ * bit 1: G3 (6.0Gbps) without SSC
+ * bit 0: G3 (6.0Gbps) with SSC
+ */
+ u32 tx_spt_phs_lnk_rate:6;
+ /* 8h: 1.5Gbps 9h: 3Gbps Ah: 6Gbps */
+ u32 tx_lgcl_lnk_rate:4;
+ u32 tx_ssc_type:1;
+ u32 sata_spin_up_spt:1;
+ u32 sata_spin_up_en:1;
+ u32 bypass_oob:1;
+ u32 disable_phy:1;
+ u32 rsvd:8;
+ } u;
+};
+
#define MAX_SG_ENTRY 255
struct mvs_prd_imt {
+#ifndef __BIG_ENDIAN
__le32 len:22;
u8 _r_a:2;
u8 misc_ctl:4;
u8 inter_sel:4;
+#else
+ u32 inter_sel:4;
+ u32 misc_ctl:4;
+ u32 _r_a:2;
+ u32 len:22;
+#endif
};
struct mvs_prd {
/* 64-bit buffer address */
__le64 addr;
/* 22-bit length */
- struct mvs_prd_imt im_len;
+ __le32 im_len;
} __attribute__ ((packed));
+/*
+ * these registers are accessed through port vendor
+ * specific address/data registers
+ */
+enum sas_sata_phy_regs {
+ GENERATION_1_SETTING = 0x118,
+ GENERATION_1_2_SETTING = 0x11C,
+ GENERATION_2_3_SETTING = 0x120,
+ GENERATION_3_4_SETTING = 0x124,
+};
+
#define SPI_CTRL_REG_94XX 0xc800
#define SPI_ADDR_REG_94XX 0xc804
#define SPI_WR_DATA_REG_94XX 0xc808
diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h
index 1753a6f..bcc4080 100644
--- a/drivers/scsi/mvsas/mv_chips.h
+++ b/drivers/scsi/mvsas/mv_chips.h
@@ -164,7 +164,6 @@ static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
{
u32 tmp;
- /* workaround for SATA R-ERR, to ignore phy glitch */
tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
tmp &= ~(1 << 9);
tmp |= (1 << 10);
@@ -179,23 +178,10 @@ static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
tmp |= 0x3fff;
mvs_cw32(mvi, CMD_SAS_CTL0, tmp);
- /* workaround for WDTIMEOUT , set to 550 ms */
mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000);
/* not to halt for different port op during wideport link change */
mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d);
-
- /* workaround for Seagate disk not-found OOB sequence, recv
- * COMINIT before sending out COMWAKE */
- tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
- tmp &= 0x0000ffff;
- tmp |= 0x00fa0000;
- mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
-
- tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
- tmp &= 0x1fffffff;
- tmp |= (2U << 29); /* 8 ms retry */
- mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
}
static inline void mvs_int_sata(struct mvs_info *mvi)
@@ -223,6 +209,9 @@ static inline void mvs_int_full(struct mvs_info *mvi)
mvs_int_port(mvi, i, tmp);
}
+ if (stat & CINT_NON_SPEC_NCQ_ERROR)
+ MVS_CHIP_DISP->non_spec_ncq_error(mvi);
+
if (stat & CINT_SRS)
mvs_int_sata(mvi);
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
index bc00c94..dec7cad 100644
--- a/drivers/scsi/mvsas/mv_defs.h
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -43,7 +43,6 @@ enum chip_flavors {
/* driver compile-time configuration */
enum driver_configuration {
- MVS_SLOTS = 512, /* command slots */
MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
/* software requires power-of-2
@@ -56,8 +55,7 @@ enum driver_configuration {
MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
MVS_OAF_SZ = 64, /* Open address frame buffer size */
- MVS_QUEUE_SIZE = 32, /* Support Queue depth */
- MVS_CAN_QUEUE = MVS_SLOTS - 2, /* SCSI Queue depth */
+ MVS_QUEUE_SIZE = 64, /* Support Queue depth */
MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2,
};
@@ -144,6 +142,7 @@ enum hw_register_bits {
CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
CINT_MEM = (1U << 26), /* int mem parity err */
CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
+ CINT_NON_SPEC_NCQ_ERROR = (1U << 25), /* Non specific NCQ error */
CINT_SRS = (1U << 3), /* SRS event */
CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
CINT_DONE = (1U << 0), /* cmd completion */
@@ -161,7 +160,7 @@ enum hw_register_bits {
TXQ_CMD_SSP = 1, /* SSP protocol */
TXQ_CMD_SMP = 2, /* SMP protocol */
TXQ_CMD_STP = 3, /* STP/SATA protocol */
- TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
+ TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP target free list */
TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
TXQ_MODE_TARGET = 0,
@@ -391,15 +390,15 @@ enum sas_cmd_port_registers {
};
enum mvs_info_flags {
- MVF_MSI = (1U << 0), /* MSI is enabled */
MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */
};
enum mvs_event_flags {
- PHY_PLUG_EVENT = (3U),
+ PHY_PLUG_EVENT = (3U),
PHY_PLUG_IN = (1U << 0), /* phy plug in */
PHY_PLUG_OUT = (1U << 1), /* phy plug out */
+ EXP_BRCT_CHG = (1U << 2), /* broadcast change */
};
enum mvs_port_type {
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 90b6366..4e9af66 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -34,22 +34,25 @@ MODULE_PARM_DESC(collector, "\n"
"\tThe mvsas SAS LLDD supports both modes.\n"
"\tDefault: 1 (Direct Mode).\n");
+int interrupt_coalescing = 0x80;
+
static struct scsi_transport_template *mvs_stt;
struct kmem_cache *mvs_task_list_cache;
static const struct mvs_chip_info mvs_chips[] = {
- [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
- [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
- [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, },
- [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
- [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
- [chip_9445] = { 1, 4, 0x800, 17, 64, 11, &mvs_94xx_dispatch, },
- [chip_9485] = { 2, 4, 0x800, 17, 64, 11, &mvs_94xx_dispatch, },
- [chip_1300] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
- [chip_1320] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
+ [chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
+ [chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
+ [chip_6485] = { 1, 8, 0x800, 33, 32, 6, 10, &mvs_64xx_dispatch, },
+ [chip_9180] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
+ [chip_9480] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
+ [chip_9445] = { 1, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, },
+ [chip_9485] = { 2, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, },
+ [chip_1300] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
+ [chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
};
+struct device_attribute *mvst_host_attrs[];
+
#define SOC_SAS_NUM 2
-#define SG_MX 64
static struct scsi_host_template mvs_sht = {
.module = THIS_MODULE,
@@ -66,7 +69,7 @@ static struct scsi_host_template mvs_sht = {
.can_queue = 1,
.cmd_per_lun = 1,
.this_id = -1,
- .sg_tablesize = SG_MX,
+ .sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
@@ -74,6 +77,7 @@ static struct scsi_host_template mvs_sht = {
.slave_alloc = mvs_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+ .shost_attrs = mvst_host_attrs,
};
static struct sas_domain_function_template mvs_transport_ops = {
@@ -100,6 +104,7 @@ static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
struct asd_sas_phy *sas_phy = &phy->sas_phy;
phy->mvi = mvi;
+ phy->port = NULL;
init_timer(&phy->timer);
sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
sas_phy->class = SAS;
@@ -128,7 +133,7 @@ static void mvs_free(struct mvs_info *mvi)
if (mvi->flags & MVF_FLAG_SOC)
slot_nr = MVS_SOC_SLOTS;
else
- slot_nr = MVS_SLOTS;
+ slot_nr = MVS_CHIP_SLOT_SZ;
if (mvi->dma_pool)
pci_pool_destroy(mvi->dma_pool);
@@ -148,25 +153,26 @@ static void mvs_free(struct mvs_info *mvi)
dma_free_coherent(mvi->dev,
sizeof(*mvi->slot) * slot_nr,
mvi->slot, mvi->slot_dma);
-#ifndef DISABLE_HOTPLUG_DMA_FIX
+
if (mvi->bulk_buffer)
dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
mvi->bulk_buffer, mvi->bulk_buffer_dma);
-#endif
+ if (mvi->bulk_buffer1)
+ dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
+ mvi->bulk_buffer1, mvi->bulk_buffer_dma1);
MVS_CHIP_DISP->chip_iounmap(mvi);
if (mvi->shost)
scsi_host_put(mvi->shost);
list_for_each_entry(mwq, &mvi->wq_list, entry)
cancel_delayed_work(&mwq->work_q);
+ kfree(mvi->tags);
kfree(mvi);
}
-#ifdef MVS_USE_TASKLET
-struct tasklet_struct mv_tasklet;
+#ifdef CONFIG_SCSI_MVSAS_TASKLET
static void mvs_tasklet(unsigned long opaque)
{
- unsigned long flags;
u32 stat;
u16 core_nr, i = 0;
@@ -179,35 +185,49 @@ static void mvs_tasklet(unsigned long opaque)
if (unlikely(!mvi))
BUG_ON(1);
+ stat = MVS_CHIP_DISP->isr_status(mvi, mvi->pdev->irq);
+ if (!stat)
+ goto out;
+
for (i = 0; i < core_nr; i++) {
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
- stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq);
- if (stat)
- MVS_CHIP_DISP->isr(mvi, mvi->irq, stat);
+ MVS_CHIP_DISP->isr(mvi, mvi->pdev->irq, stat);
}
+out:
+ MVS_CHIP_DISP->interrupt_enable(mvi);
}
#endif
static irqreturn_t mvs_interrupt(int irq, void *opaque)
{
- u32 core_nr, i = 0;
+ u32 core_nr;
u32 stat;
struct mvs_info *mvi;
struct sas_ha_struct *sha = opaque;
+#ifndef CONFIG_SCSI_MVSAS_TASKLET
+ u32 i;
+#endif
core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
if (unlikely(!mvi))
return IRQ_NONE;
+#ifdef CONFIG_SCSI_MVSAS_TASKLET
+ MVS_CHIP_DISP->interrupt_disable(mvi);
+#endif
stat = MVS_CHIP_DISP->isr_status(mvi, irq);
- if (!stat)
+ if (!stat) {
+ #ifdef CONFIG_SCSI_MVSAS_TASKLET
+ MVS_CHIP_DISP->interrupt_enable(mvi);
+ #endif
return IRQ_NONE;
+ }
-#ifdef MVS_USE_TASKLET
- tasklet_schedule(&mv_tasklet);
+#ifdef CONFIG_SCSI_MVSAS_TASKLET
+ tasklet_schedule(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);
#else
for (i = 0; i < core_nr; i++) {
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
@@ -225,7 +245,7 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
if (mvi->flags & MVF_FLAG_SOC)
slot_nr = MVS_SOC_SLOTS;
else
- slot_nr = MVS_SLOTS;
+ slot_nr = MVS_CHIP_SLOT_SZ;
spin_lock_init(&mvi->lock);
for (i = 0; i < mvi->chip->n_phy; i++) {
@@ -273,13 +293,18 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
goto err_out;
memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
-#ifndef DISABLE_HOTPLUG_DMA_FIX
mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
TRASH_BUCKET_SIZE,
&mvi->bulk_buffer_dma, GFP_KERNEL);
if (!mvi->bulk_buffer)
goto err_out;
-#endif
+
+ mvi->bulk_buffer1 = dma_alloc_coherent(mvi->dev,
+ TRASH_BUCKET_SIZE,
+ &mvi->bulk_buffer_dma1, GFP_KERNEL);
+ if (!mvi->bulk_buffer1)
+ goto err_out;
+
sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id);
mvi->dma_pool = pci_pool_create(pool_name, mvi->pdev, MVS_SLOT_BUF_SZ, 16, 0);
if (!mvi->dma_pool) {
@@ -354,11 +379,12 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
const struct pci_device_id *ent,
struct Scsi_Host *shost, unsigned int id)
{
- struct mvs_info *mvi;
+ struct mvs_info *mvi = NULL;
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
- mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info),
- GFP_KERNEL);
+ mvi = kzalloc(sizeof(*mvi) +
+ (1L << mvs_chips[ent->driver_data].slot_width) *
+ sizeof(struct mvs_slot_info), GFP_KERNEL);
if (!mvi)
return NULL;
@@ -367,7 +393,6 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
mvi->chip_id = ent->driver_data;
mvi->chip = &mvs_chips[mvi->chip_id];
INIT_LIST_HEAD(&mvi->wq_list);
- mvi->irq = pdev->irq;
((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
@@ -375,9 +400,10 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
mvi->id = id;
mvi->sas = sha;
mvi->shost = shost;
-#ifdef MVS_USE_TASKLET
- tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha);
-#endif
+
+ mvi->tags = kzalloc(MVS_CHIP_SLOT_SZ>>3, GFP_KERNEL);
+ if (!mvi->tags)
+ goto err_out;
if (MVS_CHIP_DISP->chip_ioremap(mvi))
goto err_out;
@@ -388,7 +414,6 @@ err_out:
return NULL;
}
-/* move to PCI layer or libata core? */
static int pci_go_64(struct pci_dev *pdev)
{
int rc;
@@ -450,7 +475,7 @@ static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
shost->transportt = mvs_stt;
- shost->max_id = 128;
+ shost->max_id = MVS_MAX_DEVICES;
shost->max_lun = ~0;
shost->max_channel = 1;
shost->max_cmd_len = 16;
@@ -493,11 +518,12 @@ static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
if (mvi->flags & MVF_FLAG_SOC)
can_queue = MVS_SOC_CAN_QUEUE;
else
- can_queue = MVS_CAN_QUEUE;
+ can_queue = MVS_CHIP_SLOT_SZ;
sha->lldd_queue_size = can_queue;
+ shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG);
shost->can_queue = can_queue;
- mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys;
+ mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE;
sha->core.shost = mvi->shost;
}
@@ -518,6 +544,7 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
{
unsigned int rc, nhost = 0;
struct mvs_info *mvi;
+ struct mvs_prv_info *mpi;
irq_handler_t irq_handler = mvs_interrupt;
struct Scsi_Host *shost = NULL;
const struct mvs_chip_info *chip;
@@ -569,6 +596,9 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
goto err_out_regions;
}
+ memset(&mvi->hba_info_param, 0xFF,
+ sizeof(struct hba_info_page));
+
mvs_init_sas_add(mvi);
mvi->instance = nhost;
@@ -579,8 +609,9 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
}
nhost++;
} while (nhost < chip->n_host);
-#ifdef MVS_USE_TASKLET
- tasklet_init(&mv_tasklet, mvs_tasklet,
+ mpi = (struct mvs_prv_info *)(SHOST_TO_SAS_HA(shost)->lldd_ha);
+#ifdef CONFIG_SCSI_MVSAS_TASKLET
+ tasklet_init(&(mpi->mv_tasklet), mvs_tasklet,
(unsigned long)SHOST_TO_SAS_HA(shost));
#endif
@@ -625,8 +656,8 @@ static void __devexit mvs_pci_remove(struct pci_dev *pdev)
core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
-#ifdef MVS_USE_TASKLET
- tasklet_kill(&mv_tasklet);
+#ifdef CONFIG_SCSI_MVSAS_TASKLET
+ tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);
#endif
pci_set_drvdata(pdev, NULL);
@@ -635,7 +666,7 @@ static void __devexit mvs_pci_remove(struct pci_dev *pdev)
scsi_remove_host(mvi->shost);
MVS_CHIP_DISP->interrupt_disable(mvi);
- free_irq(mvi->irq, sha);
+ free_irq(mvi->pdev->irq, sha);
for (i = 0; i < core_nr; i++) {
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
mvs_free(mvi);
@@ -703,6 +734,70 @@ static struct pci_driver mvs_pci_driver = {
.remove = __devexit_p(mvs_pci_remove),
};
+static ssize_t
+mvs_show_driver_version(struct device *cdev,
+ struct device_attribute *attr, char *buffer)
+{
+ return snprintf(buffer, PAGE_SIZE, "%s\n", DRV_VERSION);
+}
+
+static DEVICE_ATTR(driver_version,
+ S_IRUGO,
+ mvs_show_driver_version,
+ NULL);
+
+static ssize_t
+mvs_store_interrupt_coalescing(struct device *cdev,
+ struct device_attribute *attr,
+ const char *buffer, size_t size)
+{
+ int val = 0;
+ struct mvs_info *mvi = NULL;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ u8 i, core_nr;
+ if (buffer == NULL)
+ return size;
+
+ if (sscanf(buffer, "%d", &val) != 1)
+ return -EINVAL;
+
+ if (val >= 0x10000) {
+ mv_dprintk("interrupt coalescing timer %d us is"
+ "too long\n", val);
+ return strlen(buffer);
+ }
+
+ interrupt_coalescing = val;
+
+ core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
+
+ if (unlikely(!mvi))
+ return -EINVAL;
+
+ for (i = 0; i < core_nr; i++) {
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
+ if (MVS_CHIP_DISP->tune_interrupt)
+ MVS_CHIP_DISP->tune_interrupt(mvi,
+ interrupt_coalescing);
+ }
+ mv_dprintk("set interrupt coalescing time to %d us\n",
+ interrupt_coalescing);
+ return strlen(buffer);
+}
+
+static ssize_t mvs_show_interrupt_coalescing(struct device *cdev,
+ struct device_attribute *attr, char *buffer)
+{
+ return snprintf(buffer, PAGE_SIZE, "%d\n", interrupt_coalescing);
+}
+
+static DEVICE_ATTR(interrupt_coalescing,
+ S_IRUGO|S_IWUSR,
+ mvs_show_interrupt_coalescing,
+ mvs_store_interrupt_coalescing);
+
/* task handler */
struct task_struct *mvs_th;
static int __init mvs_init(void)
@@ -739,6 +834,12 @@ static void __exit mvs_exit(void)
kmem_cache_destroy(mvs_task_list_cache);
}
+struct device_attribute *mvst_host_attrs[] = {
+ &dev_attr_driver_version,
+ &dev_attr_interrupt_coalescing,
+ NULL,
+};
+
module_init(mvs_init);
module_exit(mvs_exit);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 0ef2742..4958fef 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -38,7 +38,7 @@ static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
{
- void *bitmap = &mvi->tags;
+ void *bitmap = mvi->tags;
clear_bit(tag, bitmap);
}
@@ -49,14 +49,14 @@ void mvs_tag_free(struct mvs_info *mvi, u32 tag)
void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
{
- void *bitmap = &mvi->tags;
+ void *bitmap = mvi->tags;
set_bit(tag, bitmap);
}
inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
{
unsigned int index, tag;
- void *bitmap = &mvi->tags;
+ void *bitmap = mvi->tags;
index = find_first_zero_bit(bitmap, mvi->tags_num);
tag = index;
@@ -74,126 +74,6 @@ void mvs_tag_init(struct mvs_info *mvi)
mvs_tag_clear(mvi, i);
}
-void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
-{
- u32 i;
- u32 run;
- u32 offset;
-
- offset = 0;
- while (size) {
- printk(KERN_DEBUG"%08X : ", baseaddr + offset);
- if (size >= 16)
- run = 16;
- else
- run = size;
- size -= run;
- for (i = 0; i < 16; i++) {
- if (i < run)
- printk(KERN_DEBUG"%02X ", (u32)data[i]);
- else
- printk(KERN_DEBUG" ");
- }
- printk(KERN_DEBUG": ");
- for (i = 0; i < run; i++)
- printk(KERN_DEBUG"%c",
- isalnum(data[i]) ? data[i] : '.');
- printk(KERN_DEBUG"\n");
- data = &data[16];
- offset += run;
- }
- printk(KERN_DEBUG"\n");
-}
-
-#if (_MV_DUMP > 1)
-static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
- enum sas_protocol proto)
-{
- u32 offset;
- struct mvs_slot_info *slot = &mvi->slot_info[tag];
-
- offset = slot->cmd_size + MVS_OAF_SZ +
- MVS_CHIP_DISP->prd_size() * slot->n_elem;
- dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n",
- tag);
- mvs_hexdump(32, (u8 *) slot->response,
- (u32) slot->buf_dma + offset);
-}
-#endif
-
-static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
- enum sas_protocol proto)
-{
-#if (_MV_DUMP > 1)
- u32 sz, w_ptr;
- u64 addr;
- struct mvs_slot_info *slot = &mvi->slot_info[tag];
-
- /*Delivery Queue */
- sz = MVS_CHIP_SLOT_SZ;
- w_ptr = slot->tx;
- addr = mvi->tx_dma;
- dev_printk(KERN_DEBUG, mvi->dev,
- "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
- dev_printk(KERN_DEBUG, mvi->dev,
- "Delivery Queue Base Address=0x%llX (PA)"
- "(tx_dma=0x%llX), Entry=%04d\n",
- addr, (unsigned long long)mvi->tx_dma, w_ptr);
- mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
- (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
- /*Command List */
- addr = mvi->slot_dma;
- dev_printk(KERN_DEBUG, mvi->dev,
- "Command List Base Address=0x%llX (PA)"
- "(slot_dma=0x%llX), Header=%03d\n",
- addr, (unsigned long long)slot->buf_dma, tag);
- dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag);
- /*mvs_cmd_hdr */
- mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
- (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
- /*1.command table area */
- dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n");
- mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
- /*2.open address frame area */
- dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n");
- mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
- (u32) slot->buf_dma + slot->cmd_size);
- /*3.status buffer */
- mvs_hba_sb_dump(mvi, tag, proto);
- /*4.PRD table */
- dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n");
- mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem,
- (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
- (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
-#endif
-}
-
-static void mvs_hba_cq_dump(struct mvs_info *mvi)
-{
-#if (_MV_DUMP > 2)
- u64 addr;
- void __iomem *regs = mvi->regs;
- u32 entry = mvi->rx_cons + 1;
- u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
-
- /*Completion Queue */
- addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
- dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n",
- mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
- dev_printk(KERN_DEBUG, mvi->dev,
- "Completion List Base Address=0x%llX (PA), "
- "CQ_Entry=%04d, CQ_WP=0x%08X\n",
- addr, entry - 1, mvi->rx[0]);
- mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
- mvi->rx_dma + sizeof(u32) * entry);
-#endif
-}
-
-void mvs_get_sas_addr(void *buf, u32 buflen)
-{
- /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
-}
-
struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
{
unsigned long i = 0, j = 0, hi = 0;
@@ -222,7 +102,6 @@ struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
}
-/* FIXME */
int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
{
unsigned long i = 0, j = 0, n = 0, num = 0;
@@ -253,6 +132,20 @@ int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
return num;
}
+struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi,
+ u8 reg_set)
+{
+ u32 dev_no;
+ for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) {
+ if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED)
+ continue;
+
+ if (mvi->devices[dev_no].taskfileset == reg_set)
+ return &mvi->devices[dev_no];
+ }
+ return NULL;
+}
+
static inline void mvs_free_reg_set(struct mvs_info *mvi,
struct mvs_device *dev)
{
@@ -283,7 +176,6 @@ void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
}
}
-/* FIXME: locking? */
int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata)
{
@@ -309,12 +201,12 @@ int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
if (tmp & PHY_RST_HARD)
break;
- MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1);
+ MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET);
break;
case PHY_FUNC_LINK_RESET:
MVS_CHIP_DISP->phy_enable(mvi, phy_id);
- MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0);
+ MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET);
break;
case PHY_FUNC_DISABLE:
@@ -406,14 +298,10 @@ int mvs_slave_configure(struct scsi_device *sdev)
if (ret)
return ret;
- if (dev_is_sata(dev)) {
- /* may set PIO mode */
- #if MV_DISABLE_NCQ
- struct ata_port *ap = dev->sata_dev.ap;
- struct ata_device *adev = ap->link.device;
- adev->flags |= ATA_DFLAG_NCQ_OFF;
- scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
- #endif
+ if (!dev_is_sata(dev)) {
+ sas_change_queue_depth(sdev,
+ MVS_QUEUE_SIZE,
+ SCSI_QDEPTH_DEFAULT);
}
return 0;
}
@@ -424,6 +312,7 @@ void mvs_scan_start(struct Scsi_Host *shost)
unsigned short core_nr;
struct mvs_info *mvi;
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct mvs_prv_info *mvs_prv = sha->lldd_ha;
core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
@@ -432,15 +321,17 @@ void mvs_scan_start(struct Scsi_Host *shost)
for (i = 0; i < mvi->chip->n_phy; ++i)
mvs_bytes_dmaed(mvi, i);
}
+ mvs_prv->scan_finished = 1;
}
int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
- /* give the phy enabling interrupt event time to come in (1s
- * is empirically about all it takes) */
- if (time < HZ)
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct mvs_prv_info *mvs_prv = sha->lldd_ha;
+
+ if (mvs_prv->scan_finished == 0)
return 0;
- /* Wait for discovery to finish */
+
scsi_flush_work(shost);
return 1;
}
@@ -461,10 +352,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
void *buf_prd;
struct mvs_slot_info *slot = &mvi->slot_info[tag];
u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
-#if _MV_DUMP
- u8 *buf_cmd;
- void *from;
-#endif
+
/*
* DMA-map SMP request, response buffers
*/
@@ -496,15 +384,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
buf_tmp = slot->buf;
buf_tmp_dma = slot->buf_dma;
-#if _MV_DUMP
- buf_cmd = buf_tmp;
- hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
- buf_tmp += req_len;
- buf_tmp_dma += req_len;
- slot->cmd_size = req_len;
-#else
hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
-#endif
/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
buf_oaf = buf_tmp;
@@ -553,12 +433,6 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
/* fill in PRD (scatter/gather) table, if any */
MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
-#if _MV_DUMP
- /* copy cmd table */
- from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
- memcpy(buf_cmd, from + sg_req->offset, req_len);
- kunmap_atomic(from, KM_IRQ0);
-#endif
return 0;
err_out_2:
@@ -616,14 +490,11 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
(mvi_dev->taskfileset << TXQ_SRS_SHIFT);
mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
-#ifndef DISABLE_HOTPLUG_DMA_FIX
if (task->data_dir == DMA_FROM_DEVICE)
flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
else
flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
-#else
- flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
-#endif
+
if (task->ata_task.use_ncq)
flags |= MCH_FPDMA;
if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
@@ -631,11 +502,8 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
flags |= MCH_ATAPI;
}
- /* FIXME: fill in port multiplier number */
-
hdr->flags = cpu_to_le32(flags);
- /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
else
@@ -657,9 +525,6 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
buf_tmp += MVS_ATA_CMD_SZ;
buf_tmp_dma += MVS_ATA_CMD_SZ;
-#if _MV_DUMP
- slot->cmd_size = MVS_ATA_CMD_SZ;
-#endif
/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
/* used for STP. unused for SATA? */
@@ -682,9 +547,6 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
buf_tmp_dma += i;
/* region 4: status buffer (larger the PRD, smaller this buf) ****** */
- /* FIXME: probably unused, for SATA. kept here just in case
- * we get a STP/SATA error information record
- */
slot->response = buf_tmp;
hdr->status_buf = cpu_to_le64(buf_tmp_dma);
if (mvi->flags & MVF_FLAG_SOC)
@@ -715,11 +577,11 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
/* fill in PRD (scatter/gather) table, if any */
MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
-#ifndef DISABLE_HOTPLUG_DMA_FIX
+
if (task->data_dir == DMA_FROM_DEVICE)
- MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma,
+ MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask,
TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
-#endif
+
return 0;
}
@@ -761,6 +623,9 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
}
if (is_tmf)
flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
+ else
+ flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
+
hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
hdr->tags = cpu_to_le32(tag);
hdr->data_len = cpu_to_le32(task->total_xfer_len);
@@ -777,9 +642,6 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
buf_tmp += MVS_SSP_CMD_SZ;
buf_tmp_dma += MVS_SSP_CMD_SZ;
-#if _MV_DUMP
- slot->cmd_size = MVS_SSP_CMD_SZ;
-#endif
/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
buf_oaf = buf_tmp;
@@ -986,7 +848,6 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock(&task->task_state_lock);
- mvs_hba_memory_dump(mvi, tag, task->task_proto);
mvi_dev->running_req++;
++(*pass);
mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
@@ -1189,9 +1050,9 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
mvs_slot_free(mvi, slot_idx);
}
-static void mvs_update_wideport(struct mvs_info *mvi, int i)
+static void mvs_update_wideport(struct mvs_info *mvi, int phy_no)
{
- struct mvs_phy *phy = &mvi->phy[i];
+ struct mvs_phy *phy = &mvi->phy[phy_no];
struct mvs_port *port = phy->port;
int j, no;
@@ -1246,18 +1107,17 @@ static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
return NULL;
MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
- s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+ s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
- s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+ s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
- s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+ s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
- s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+ s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
- /* Workaround: take some ATAPI devices for ATA */
if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
@@ -1269,6 +1129,13 @@ static u32 mvs_is_sig_fis_received(u32 irq_status)
return irq_status & PHYEV_SIG_FIS;
}
+static void mvs_sig_remove_timer(struct mvs_phy *phy)
+{
+ if (phy->timer.function)
+ del_timer(&phy->timer);
+ phy->timer.function = NULL;
+}
+
void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
{
struct mvs_phy *phy = &mvi->phy[i];
@@ -1291,6 +1158,7 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
if (phy->phy_type & PORT_TYPE_SATA) {
phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
if (mvs_is_sig_fis_received(phy->irq_status)) {
+ mvs_sig_remove_timer(phy);
phy->phy_attached = 1;
phy->att_dev_sas_addr =
i + mvi->id * mvi->chip->n_phy;
@@ -1308,7 +1176,6 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
tmp | PHYEV_SIG_FIS);
phy->phy_attached = 0;
phy->phy_type &= ~PORT_TYPE_SATA;
- MVS_CHIP_DISP->phy_reset(mvi, i, 0);
goto out_done;
}
} else if (phy->phy_type & PORT_TYPE_SAS
@@ -1334,9 +1201,9 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
if (MVS_CHIP_DISP->phy_work_around)
MVS_CHIP_DISP->phy_work_around(mvi, i);
}
- mv_dprintk("port %d attach dev info is %x\n",
+ mv_dprintk("phy %d attach dev info is %x\n",
i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
- mv_dprintk("port %d attach sas addr is %llx\n",
+ mv_dprintk("phy %d attach sas addr is %llx\n",
i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
out_done:
if (get_st)
@@ -1361,10 +1228,10 @@ static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
}
hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
- if (sas_port->id >= mvi->chip->n_phy)
- port = &mvi->port[sas_port->id - mvi->chip->n_phy];
+ if (i >= mvi->chip->n_phy)
+ port = &mvi->port[i - mvi->chip->n_phy];
else
- port = &mvi->port[sas_port->id];
+ port = &mvi->port[i];
if (lock)
spin_lock_irqsave(&mvi->lock, flags);
port->port_attached = 1;
@@ -1393,7 +1260,7 @@ static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
return;
}
list_for_each_entry(dev, &port->dev_list, dev_list_node)
- mvs_do_release_task(phy->mvi, phy_no, NULL);
+ mvs_do_release_task(phy->mvi, phy_no, dev);
}
@@ -1457,6 +1324,7 @@ int mvs_dev_found_notify(struct domain_device *dev, int lock)
mvi_device->dev_status = MVS_DEV_NORMAL;
mvi_device->dev_type = dev->dev_type;
mvi_device->mvi_info = mvi;
+ mvi_device->sas_device = dev;
if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
int phy_id;
u8 phy_num = parent_dev->ex_dev.num_phys;
@@ -1508,6 +1376,7 @@ void mvs_dev_gone_notify(struct domain_device *dev)
mv_dprintk("found dev has gone.\n");
}
dev->lldd_dev = NULL;
+ mvi_dev->sas_device = NULL;
spin_unlock_irqrestore(&mvi->lock, flags);
}
@@ -1555,7 +1424,6 @@ static void mvs_tmf_timedout(unsigned long data)
complete(&task->completion);
}
-/* XXX */
#define MVS_TASK_TIMEOUT 20
static int mvs_exec_internal_tmf_task(struct domain_device *dev,
void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
@@ -1588,7 +1456,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
}
wait_for_completion(&task->completion);
- res = -TMF_RESP_FUNC_FAILED;
+ res = TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
@@ -1638,11 +1506,10 @@ static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
u8 *lun, struct mvs_tmf_task *tmf)
{
struct sas_ssp_task ssp_task;
- DECLARE_COMPLETION_ONSTACK(completion);
if (!(dev->tproto & SAS_PROTOCOL_SSP))
return TMF_RESP_FUNC_ESUPP;
- strncpy((u8 *)&ssp_task.LUN, lun, 8);
+ memcpy(ssp_task.LUN, lun, 8);
return mvs_exec_internal_tmf_task(dev, &ssp_task,
sizeof(ssp_task), tmf);
@@ -1666,7 +1533,7 @@ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
int mvs_lu_reset(struct domain_device *dev, u8 *lun)
{
unsigned long flags;
- int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
+ int rc = TMF_RESP_FUNC_FAILED;
struct mvs_tmf_task tmf_task;
struct mvs_device * mvi_dev = dev->lldd_dev;
struct mvs_info *mvi = mvi_dev->mvi_info;
@@ -1675,10 +1542,8 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun)
mvi_dev->dev_status = MVS_DEV_EH;
rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
if (rc == TMF_RESP_FUNC_COMPLETE) {
- num = mvs_find_dev_phyno(dev, phyno);
spin_lock_irqsave(&mvi->lock, flags);
- for (i = 0; i < num; i++)
- mvs_release_task(mvi, dev);
+ mvs_release_task(mvi, dev);
spin_unlock_irqrestore(&mvi->lock, flags);
}
/* If failed, fall-through I_T_Nexus reset */
@@ -1696,11 +1561,12 @@ int mvs_I_T_nexus_reset(struct domain_device *dev)
if (mvi_dev->dev_status != MVS_DEV_EH)
return TMF_RESP_FUNC_COMPLETE;
+ else
+ mvi_dev->dev_status = MVS_DEV_NORMAL;
rc = mvs_debug_I_T_nexus_reset(dev);
mv_printk("%s for device[%x]:rc= %d\n",
__func__, mvi_dev->device_id, rc);
- /* housekeeper */
spin_lock_irqsave(&mvi->lock, flags);
mvs_release_task(mvi, dev);
spin_unlock_irqrestore(&mvi->lock, flags);
@@ -1739,9 +1605,6 @@ int mvs_query_task(struct sas_task *task)
case TMF_RESP_FUNC_FAILED:
case TMF_RESP_FUNC_COMPLETE:
break;
- default:
- rc = TMF_RESP_FUNC_COMPLETE;
- break;
}
}
mv_printk("%s:rc= %d\n", __func__, rc);
@@ -1761,8 +1624,8 @@ int mvs_abort_task(struct sas_task *task)
u32 tag;
if (!mvi_dev) {
- mv_printk("%s:%d TMF_RESP_FUNC_FAILED\n", __func__, __LINE__);
- rc = TMF_RESP_FUNC_FAILED;
+ mv_printk("Device has removed\n");
+ return TMF_RESP_FUNC_FAILED;
}
mvi = mvi_dev->mvi_info;
@@ -1807,25 +1670,17 @@ int mvs_abort_task(struct sas_task *task)
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
- /* to do free register_set */
if (SATA_DEV == dev->dev_type) {
struct mvs_slot_info *slot = task->lldd_task;
- struct task_status_struct *tstat;
u32 slot_idx = (u32)(slot - mvi->slot_info);
- tstat = &task->task_status;
- mv_dprintk(KERN_DEBUG "mv_abort_task() mvi=%p task=%p "
+ mv_dprintk("mvs_abort_task() mvi=%p task=%p "
"slot=%p slot_idx=x%x\n",
mvi, task, slot, slot_idx);
- tstat->stat = SAS_ABORTED_TASK;
- if (mvi_dev && mvi_dev->running_req)
- mvi_dev->running_req--;
- if (sas_protocol_ata(task->task_proto))
- mvs_free_reg_set(mvi, mvi_dev);
+ mvs_tmf_timedout((unsigned long)task);
mvs_slot_task_free(mvi, task, slot, slot_idx);
- return -1;
+ rc = TMF_RESP_FUNC_COMPLETE;
+ goto out;
}
- } else {
- /* SMP */
}
out:
@@ -1891,12 +1746,63 @@ static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
return stat;
}
+void mvs_set_sense(u8 *buffer, int len, int d_sense,
+ int key, int asc, int ascq)
+{
+ memset(buffer, 0, len);
+
+ if (d_sense) {
+ /* Descriptor format */
+ if (len < 4) {
+ mv_printk("Length %d of sense buffer too small to "
+ "fit sense %x:%x:%x", len, key, asc, ascq);
+ }
+
+ buffer[0] = 0x72; /* Response Code */
+ if (len > 1)
+ buffer[1] = key; /* Sense Key */
+ if (len > 2)
+ buffer[2] = asc; /* ASC */
+ if (len > 3)
+ buffer[3] = ascq; /* ASCQ */
+ } else {
+ if (len < 14) {
+ mv_printk("Length %d of sense buffer too small to "
+ "fit sense %x:%x:%x", len, key, asc, ascq);
+ }
+
+ buffer[0] = 0x70; /* Response Code */
+ if (len > 2)
+ buffer[2] = key; /* Sense Key */
+ if (len > 7)
+ buffer[7] = 0x0a; /* Additional Sense Length */
+ if (len > 12)
+ buffer[12] = asc; /* ASC */
+ if (len > 13)
+ buffer[13] = ascq; /* ASCQ */
+ }
+
+ return;
+}
+
+void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
+ u8 key, u8 asc, u8 asc_q)
+{
+ iu->datapres = 2;
+ iu->response_data_len = 0;
+ iu->sense_data_len = 17;
+ iu->status = 02;
+ mvs_set_sense(iu->sense_data, 17, 0,
+ key, asc, asc_q);
+}
+
static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
u32 slot_idx)
{
struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
int stat;
- u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
+ u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response);
+ u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1));
u32 tfs = 0;
enum mvs_port_type type = PORT_TYPE_SAS;
@@ -1908,8 +1814,19 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
stat = SAM_STAT_CHECK_CONDITION;
switch (task->task_proto) {
case SAS_PROTOCOL_SSP:
+ {
stat = SAS_ABORTED_TASK;
+ if ((err_dw0 & NO_DEST) || err_dw1 & bit(31)) {
+ struct ssp_response_iu *iu = slot->response +
+ sizeof(struct mvs_err_info);
+ mvs_fill_ssp_resp_iu(iu, NOT_READY, 0x04, 01);
+ sas_ssp_task_response(mvi->dev, task, iu);
+ stat = SAM_STAT_CHECK_CONDITION;
+ }
+ if (err_dw1 & bit(31))
+ mv_printk("reuse same slot, retry command.\n");
break;
+ }
case SAS_PROTOCOL_SMP:
stat = SAM_STAT_CHECK_CONDITION;
break;
@@ -1918,10 +1835,8 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
{
- if (err_dw0 == 0x80400002)
- mv_printk("find reserved error, why?\n");
-
task->ata_task.use_ncq = 0;
+ stat = SAS_PROTO_RESPONSE;
mvs_sata_done(mvi, task, slot_idx, err_dw0);
}
break;
@@ -1945,8 +1860,6 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
void *to;
enum exec_status sts;
- if (mvi->exp_req)
- mvi->exp_req--;
if (unlikely(!task || !task->lldd_task || !task->dev))
return -1;
@@ -1954,8 +1867,6 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
dev = task->dev;
mvi_dev = dev->lldd_dev;
- mvs_hba_cq_dump(mvi);
-
spin_lock(&task->task_state_lock);
task->task_state_flags &=
~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
@@ -1978,6 +1889,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
return -1;
}
+ /* when no device attaching, go ahead and complete by error handling*/
if (unlikely(!mvi_dev || flags)) {
if (!mvi_dev)
mv_dprintk("port has not device.\n");
@@ -1987,6 +1899,9 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
/* error info record present */
if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
+ mv_dprintk("port %d slot %d rx_desc %X has error info"
+ "%016llX.\n", slot->port->sas_port.id, slot_idx,
+ rx_desc, (u64)(*(u64 *)slot->response));
tstat->stat = mvs_slot_err(mvi, task, slot_idx);
tstat->resp = SAS_TASK_COMPLETE;
goto out;
@@ -2048,8 +1963,7 @@ out:
spin_unlock(&mvi->lock);
if (task->task_done)
task->task_done(task);
- else
- mv_dprintk("why has not task_done.\n");
+
spin_lock(&mvi->lock);
return sts;
@@ -2092,7 +2006,6 @@ void mvs_release_task(struct mvs_info *mvi,
struct domain_device *dev)
{
int i, phyno[WIDE_PORT_MAX_PHY], num;
- /* housekeeper */
num = mvs_find_dev_phyno(dev, phyno);
for (i = 0; i < num; i++)
mvs_do_release_task(mvi, phyno[i], dev);
@@ -2111,13 +2024,13 @@ static void mvs_work_queue(struct work_struct *work)
struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
struct mvs_info *mvi = mwq->mvi;
unsigned long flags;
+ u32 phy_no = (unsigned long) mwq->data;
+ struct sas_ha_struct *sas_ha = mvi->sas;
+ struct mvs_phy *phy = &mvi->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
spin_lock_irqsave(&mvi->lock, flags);
if (mwq->handler & PHY_PLUG_EVENT) {
- u32 phy_no = (unsigned long) mwq->data;
- struct sas_ha_struct *sas_ha = mvi->sas;
- struct mvs_phy *phy = &mvi->phy[phy_no];
- struct asd_sas_phy *sas_phy = &phy->sas_phy;
if (phy->phy_event & PHY_PLUG_OUT) {
u32 tmp;
@@ -2139,6 +2052,11 @@ static void mvs_work_queue(struct work_struct *work)
mv_dprintk("phy%d Attached Device\n", phy_no);
}
}
+ } else if (mwq->handler & EXP_BRCT_CHG) {
+ phy->phy_event &= ~EXP_BRCT_CHG;
+ sas_ha->notify_port_event(sas_phy,
+ PORTE_BROADCAST_RCVD);
+ mv_dprintk("phy%d Got Broadcast Change\n", phy_no);
}
list_del(&mwq->entry);
spin_unlock_irqrestore(&mvi->lock, flags);
@@ -2174,29 +2092,21 @@ static void mvs_sig_time_out(unsigned long tphy)
if (&mvi->phy[phy_no] == phy) {
mv_dprintk("Get signature time out, reset phy %d\n",
phy_no+mvi->id*mvi->chip->n_phy);
- MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1);
+ MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET);
}
}
}
-static void mvs_sig_remove_timer(struct mvs_phy *phy)
-{
- if (phy->timer.function)
- del_timer(&phy->timer);
- phy->timer.function = NULL;
-}
-
void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
{
u32 tmp;
- struct sas_ha_struct *sas_ha = mvi->sas;
struct mvs_phy *phy = &mvi->phy[phy_no];
- struct asd_sas_phy *sas_phy = &phy->sas_phy;
phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
- mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy,
+ MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
+ mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy,
MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
- mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy,
+ mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy,
phy->irq_status);
/*
@@ -2205,11 +2115,12 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
*/
if (phy->irq_status & PHYEV_DCDR_ERR) {
- mv_dprintk("port %d STP decoding error.\n",
+ mv_dprintk("phy %d STP decoding error.\n",
phy_no + mvi->id*mvi->chip->n_phy);
}
if (phy->irq_status & PHYEV_POOF) {
+ mdelay(500);
if (!(phy->phy_event & PHY_PLUG_OUT)) {
int dev_sata = phy->phy_type & PORT_TYPE_SATA;
int ready;
@@ -2220,17 +2131,13 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
(void *)(unsigned long)phy_no,
PHY_PLUG_EVENT);
ready = mvs_is_phy_ready(mvi, phy_no);
- if (!ready)
- mv_dprintk("phy%d Unplug Notice\n",
- phy_no +
- mvi->id * mvi->chip->n_phy);
if (ready || dev_sata) {
if (MVS_CHIP_DISP->stp_reset)
MVS_CHIP_DISP->stp_reset(mvi,
phy_no);
else
MVS_CHIP_DISP->phy_reset(mvi,
- phy_no, 0);
+ phy_no, MVS_SOFT_RESET);
return;
}
}
@@ -2243,13 +2150,12 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
if (phy->timer.function == NULL) {
phy->timer.data = (unsigned long)phy;
phy->timer.function = mvs_sig_time_out;
- phy->timer.expires = jiffies + 10*HZ;
+ phy->timer.expires = jiffies + 5*HZ;
add_timer(&phy->timer);
}
}
if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
- mvs_sig_remove_timer(phy);
mv_dprintk("notify plug in on phy[%d]\n", phy_no);
if (phy->phy_status) {
mdelay(10);
@@ -2263,14 +2169,14 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
}
mvs_update_phyinfo(mvi, phy_no, 0);
if (phy->phy_type & PORT_TYPE_SAS) {
- MVS_CHIP_DISP->phy_reset(mvi, phy_no, 2);
+ MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE);
mdelay(10);
}
mvs_bytes_dmaed(mvi, phy_no);
/* whether driver is going to handle hot plug */
if (phy->phy_event & PHY_PLUG_OUT) {
- mvs_port_notify_formed(sas_phy, 0);
+ mvs_port_notify_formed(&phy->sas_phy, 0);
phy->phy_event &= ~PHY_PLUG_OUT;
}
} else {
@@ -2278,13 +2184,11 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
phy_no + mvi->id*mvi->chip->n_phy);
}
} else if (phy->irq_status & PHYEV_BROAD_CH) {
- mv_dprintk("port %d broadcast change.\n",
+ mv_dprintk("phy %d broadcast change.\n",
phy_no + mvi->id*mvi->chip->n_phy);
- /* exception for Samsung disk drive*/
- mdelay(1000);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ mvs_handle_event(mvi, (void *)(unsigned long)phy_no,
+ EXP_BRCT_CHG);
}
- MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
}
int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index d6fd740..44b4745 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -47,12 +47,8 @@
#define DRV_NAME "mvsas"
#define DRV_VERSION "0.8.2"
-#define _MV_DUMP 0
#define MVS_ID_NOT_MAPPED 0x7f
-/* #define DISABLE_HOTPLUG_DMA_FIX */
-// #define MAX_EXP_RUNNING_REQ 2
#define WIDE_PORT_MAX_PHY 4
-#define MV_DISABLE_NCQ 0
#define mv_printk(fmt, arg ...) \
printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg)
#ifdef MV_DEBUG
@@ -63,6 +59,7 @@
#endif
#define MV_MAX_U32 0xffffffff
+extern int interrupt_coalescing;
extern struct mvs_tgt_initiator mvs_tgt;
extern struct mvs_info *tgt_mvi;
extern const struct mvs_dispatch mvs_64xx_dispatch;
@@ -98,6 +95,11 @@ enum dev_status {
MVS_DEV_EH = 0x1,
};
+enum dev_reset {
+ MVS_SOFT_RESET = 0,
+ MVS_HARD_RESET = 1,
+ MVS_PHY_TUNE = 2,
+};
struct mvs_info;
@@ -129,7 +131,6 @@ struct mvs_dispatch {
u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port);
void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val);
- void (*get_sas_addr)(void *buf, u32 buflen);
void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
void (*clear_srs_irq)(struct mvs_info *mvi, u8 reg_set, u8 clear_all);
void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
@@ -166,9 +167,10 @@ struct mvs_dispatch {
);
int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd);
int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout);
-#ifndef DISABLE_HOTPLUG_DMA_FIX
- void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd);
-#endif
+ void (*dma_fix)(struct mvs_info *mvi, u32 phy_mask,
+ int buf_len, int from, void *prd);
+ void (*tune_interrupt)(struct mvs_info *mvi, u32 time);
+ void (*non_spec_ncq_error)(struct mvs_info *mvi);
};
@@ -178,9 +180,11 @@ struct mvs_chip_info {
u32 fis_offs;
u32 fis_count;
u32 srs_sz;
+ u32 sg_width;
u32 slot_width;
const struct mvs_dispatch *dispatch;
};
+#define MVS_MAX_SG (1U << mvi->chip->sg_width)
#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
#define MVS_RX_FISL_SZ \
(mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100))
@@ -248,6 +252,73 @@ struct mvs_device {
u16 reserved;
};
+/* Generate PHY tunning parameters */
+struct phy_tuning {
+ /* 1 bit, transmitter emphasis enable */
+ u8 trans_emp_en:1;
+ /* 4 bits, transmitter emphasis amplitude */
+ u8 trans_emp_amp:4;
+ /* 3 bits, reserved space */
+ u8 Reserved_2bit_1:3;
+ /* 5 bits, transmitter amplitude */
+ u8 trans_amp:5;
+ /* 2 bits, transmitter amplitude adjust */
+ u8 trans_amp_adj:2;
+ /* 1 bit, reserved space */
+ u8 resv_2bit_2:1;
+ /* 2 bytes, reserved space */
+ u8 reserved[2];
+};
+
+struct ffe_control {
+ /* 4 bits, FFE Capacitor Select (value range 0~F) */
+ u8 ffe_cap_sel:4;
+ /* 3 bits, FFE Resistor Select (value range 0~7) */
+ u8 ffe_rss_sel:3;
+ /* 1 bit reserve*/
+ u8 reserved:1;
+};
+
+/*
+ * HBA_Info_Page is saved in Flash/NVRAM, total 256 bytes.
+ * The data area is valid only Signature="MRVL".
+ * If any member fills with 0xFF, the member is invalid.
+ */
+struct hba_info_page {
+ /* Dword 0 */
+ /* 4 bytes, structure signature,should be "MRVL" at first initial */
+ u8 signature[4];
+
+ /* Dword 1-13 */
+ u32 reserved1[13];
+
+ /* Dword 14-29 */
+ /* 64 bytes, SAS address for each port */
+ u64 sas_addr[8];
+
+ /* Dword 30-31 */
+ /* 8 bytes for vanir 8 port PHY FFE seeting
+ * BIT 0~3 : FFE Capacitor select(value range 0~F)
+ * BIT 4~6 : FFE Resistor select(value range 0~7)
+ * BIT 7: reserve.
+ */
+
+ struct ffe_control ffe_ctl[8];
+ /* Dword 32 -43 */
+ u32 reserved2[12];
+
+ /* Dword 44-45 */
+ /* 8 bytes, 0: 1.5G, 1: 3.0G, should be 0x01 at first initial */
+ u8 phy_rate[8];
+
+ /* Dword 46-53 */
+ /* 32 bytes, PHY tuning parameters for each PHY*/
+ struct phy_tuning phy_tuning[8];
+
+ /* Dword 54-63 */
+ u32 reserved3[10];
+}; /* total 256 bytes */
+
struct mvs_slot_info {
struct list_head entry;
union {
@@ -263,9 +334,6 @@ struct mvs_slot_info {
*/
void *buf;
dma_addr_t buf_dma;
-#if _MV_DUMP
- u32 cmd_size;
-#endif
void *response;
struct mvs_port *port;
struct mvs_device *device;
@@ -319,12 +387,10 @@ struct mvs_info {
const struct mvs_chip_info *chip;
int tags_num;
- DECLARE_BITMAP(tags, MVS_SLOTS);
+ unsigned long *tags;
/* further per-slot information */
struct mvs_phy phy[MVS_MAX_PHYS];
struct mvs_port port[MVS_MAX_PHYS];
- u32 irq;
- u32 exp_req;
u32 id;
u64 sata_reg_set;
struct list_head *hba_list;
@@ -336,12 +402,13 @@ struct mvs_info {
u32 flashsectSize;
void *addon;
+ struct hba_info_page hba_info_param;
struct mvs_device devices[MVS_MAX_DEVICES];
-#ifndef DISABLE_HOTPLUG_DMA_FIX
void *bulk_buffer;
dma_addr_t bulk_buffer_dma;
+ void *bulk_buffer1;
+ dma_addr_t bulk_buffer_dma1;
#define TRASH_BUCKET_SIZE 0x20000
-#endif
void *dma_pool;
struct mvs_slot_info slot_info[0];
};
@@ -349,8 +416,10 @@ struct mvs_info {
struct mvs_prv_info{
u8 n_host;
u8 n_phy;
- u16 reserve;
+ u8 scan_finished;
+ u8 reserve;
struct mvs_info *mvi[2];
+ struct tasklet_struct mv_tasklet;
};
struct mvs_wq {
@@ -414,6 +483,6 @@ void mvs_do_release_task(struct mvs_info *mvi, int phy_no,
void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
-void mvs_hexdump(u32 size, u8 *data, u32 baseaddr);
+struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi, u8 reg_set);
#endif
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index aa05e66..b97c8ab 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -54,7 +54,7 @@
#include <scsi/libsas.h>
#include <scsi/scsi_tcq.h>
#include <scsi/sas_ata.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "pm8001_defs.h"
#define DRV_NAME "pm8001"
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index ee161c6..b86db84 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3870,6 +3870,9 @@ static long pmcraid_ioctl_passthrough(
pmcraid_err("couldn't build passthrough ioadls\n");
goto out_free_buffer;
}
+ } else if (request_size < 0) {
+ rc = -EINVAL;
+ goto out_free_buffer;
}
/* If data is being written into the device, copy the data from user
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 532313e..a31e05f 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -42,8 +42,8 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
int reading;
if (IS_QLA82XX(ha)) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Firmware dump not supported for ISP82xx\n"));
+ ql_dbg(ql_dbg_user, vha, 0x705b,
+ "Firmware dump not supported for ISP82xx\n");
return count;
}
@@ -56,7 +56,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
if (!ha->fw_dump_reading)
break;
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_info, vha, 0x705d,
"Firmware dump cleared on (%ld).\n", vha->host_no);
ha->fw_dump_reading = 0;
@@ -66,7 +66,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
if (ha->fw_dumped && !ha->fw_dump_reading) {
ha->fw_dump_reading = 1;
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_info, vha, 0x705e,
"Raw firmware dump ready for read on (%ld).\n",
vha->host_no);
}
@@ -148,7 +148,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
}
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x705f,
"HBA not online, failing NVRAM update.\n");
return -EAGAIN;
}
@@ -158,6 +158,8 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
count);
+ ql_dbg(ql_dbg_user, vha, 0x7060,
+ "Setting ISP_ABORT_NEEDED\n");
/* NVRAM settings take effect immediately. */
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
@@ -255,9 +257,9 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
ha->optrom_state = QLA_SWAITING;
- DEBUG2(qla_printk(KERN_INFO, ha,
+ ql_dbg(ql_dbg_user, vha, 0x7061,
"Freeing flash region allocation -- 0x%x bytes.\n",
- ha->optrom_region_size));
+ ha->optrom_region_size);
vfree(ha->optrom_buffer);
ha->optrom_buffer = NULL;
@@ -273,7 +275,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
ha->optrom_state = QLA_SREADING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
if (ha->optrom_buffer == NULL) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7062,
"Unable to allocate memory for optrom retrieval "
"(%x).\n", ha->optrom_region_size);
@@ -282,14 +284,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
}
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
- "HBA not online, failing NVRAM update.\n");
+ ql_log(ql_log_warn, vha, 0x7063,
+ "HBA not online, failing NVRAM update.\n");
return -EAGAIN;
}
- DEBUG2(qla_printk(KERN_INFO, ha,
+ ql_dbg(ql_dbg_user, vha, 0x7064,
"Reading flash region -- 0x%x/0x%x.\n",
- ha->optrom_region_start, ha->optrom_region_size));
+ ha->optrom_region_start, ha->optrom_region_size);
memset(ha->optrom_buffer, 0, ha->optrom_region_size);
ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
@@ -328,7 +330,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
valid = 1;
if (!valid) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7065,
"Invalid start region 0x%x/0x%x.\n", start, size);
return -EINVAL;
}
@@ -340,17 +342,17 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
ha->optrom_state = QLA_SWRITING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
if (ha->optrom_buffer == NULL) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7066,
"Unable to allocate memory for optrom update "
- "(%x).\n", ha->optrom_region_size);
+ "(%x)\n", ha->optrom_region_size);
ha->optrom_state = QLA_SWAITING;
return count;
}
- DEBUG2(qla_printk(KERN_INFO, ha,
+ ql_dbg(ql_dbg_user, vha, 0x7067,
"Staging flash region write -- 0x%x/0x%x.\n",
- ha->optrom_region_start, ha->optrom_region_size));
+ ha->optrom_region_start, ha->optrom_region_size);
memset(ha->optrom_buffer, 0, ha->optrom_region_size);
break;
@@ -359,14 +361,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
break;
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7068,
"HBA not online, failing flash update.\n");
return -EAGAIN;
}
- DEBUG2(qla_printk(KERN_INFO, ha,
+ ql_dbg(ql_dbg_user, vha, 0x7069,
"Writing flash region -- 0x%x/0x%x.\n",
- ha->optrom_region_start, ha->optrom_region_size));
+ ha->optrom_region_start, ha->optrom_region_size);
ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
ha->optrom_region_start, ha->optrom_region_size);
@@ -425,7 +427,7 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
return 0;
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x706a,
"HBA not online, failing VPD update.\n");
return -EAGAIN;
}
@@ -440,7 +442,7 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
tmp_data = vmalloc(256);
if (!tmp_data) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x706b,
"Unable to allocate memory for VPD information update.\n");
goto done;
}
@@ -480,7 +482,7 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->sfp_data_dma);
if (!ha->sfp_data) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x706c,
"Unable to allocate memory for SFP read-data.\n");
return 0;
}
@@ -499,9 +501,10 @@ do_read:
rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
addr, offset, SFP_BLOCK_SIZE, 0);
if (rval != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x706d,
"Unable to read SFP data (%x/%x/%x).\n", rval,
addr, offset);
+
count = 0;
break;
}
@@ -538,8 +541,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
type = simple_strtol(buf, NULL, 10);
switch (type) {
case 0x2025c:
- qla_printk(KERN_INFO, ha,
- "Issuing ISP reset on (%ld).\n", vha->host_no);
+ ql_log(ql_log_info, vha, 0x706e,
+ "Issuing ISP reset.\n");
scsi_block_requests(vha->host);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -551,8 +554,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
if (!IS_QLA81XX(ha))
break;
- qla_printk(KERN_INFO, ha,
- "Issuing MPI reset on (%ld).\n", vha->host_no);
+ ql_log(ql_log_info, vha, 0x706f,
+ "Issuing MPI reset.\n");
/* Make sure FC side is not in reset */
qla2x00_wait_for_hba_online(vha);
@@ -560,20 +563,19 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
/* Issue MPI reset */
scsi_block_requests(vha->host);
if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
- qla_printk(KERN_WARNING, ha,
- "MPI reset failed on (%ld).\n", vha->host_no);
+ ql_log(ql_log_warn, vha, 0x7070,
+ "MPI reset failed.\n");
scsi_unblock_requests(vha->host);
break;
case 0x2025e:
if (!IS_QLA82XX(ha) || vha != base_vha) {
- qla_printk(KERN_INFO, ha,
- "FCoE ctx reset not supported for host%ld.\n",
- vha->host_no);
+ ql_log(ql_log_info, vha, 0x7071,
+ "FCoE ctx reset no supported.\n");
return count;
}
- qla_printk(KERN_INFO, ha,
- "Issuing FCoE CTX reset on host%ld.\n", vha->host_no);
+ ql_log(ql_log_info, vha, 0x7072,
+ "Issuing FCoE ctx reset.\n");
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
qla2x00_wait_for_fcoe_ctx_reset(vha);
@@ -611,8 +613,8 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->edc_data_dma);
if (!ha->edc_data) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Unable to allocate memory for EDC write.\n"));
+ ql_log(ql_log_warn, vha, 0x7073,
+ "Unable to allocate memory for EDC write.\n");
return 0;
}
}
@@ -631,9 +633,9 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data,
dev, adr, len, opt);
if (rval != QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n",
- rval, dev, adr, opt, len, buf[8]));
+ ql_log(ql_log_warn, vha, 0x7074,
+ "Unable to write EDC (%x) %02x:%04x:%02x:%02x\n",
+ rval, dev, adr, opt, len, buf[8]);
return 0;
}
@@ -669,8 +671,8 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->edc_data_dma);
if (!ha->edc_data) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Unable to allocate memory for EDC status.\n"));
+ ql_log(ql_log_warn, vha, 0x708c,
+ "Unable to allocate memory for EDC status.\n");
return 0;
}
}
@@ -688,9 +690,9 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data,
dev, adr, len, opt);
if (rval != QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n",
- rval, dev, adr, opt, len));
+ ql_log(ql_log_info, vha, 0x7075,
+ "Unable to write EDC status (%x) %02x:%04x:%02x.\n",
+ rval, dev, adr, opt, len);
return 0;
}
@@ -749,7 +751,7 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
&ha->xgmac_data_dma, GFP_KERNEL);
if (!ha->xgmac_data) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7076,
"Unable to allocate memory for XGMAC read-data.\n");
return 0;
}
@@ -761,7 +763,7 @@ do_read:
rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
XGMAC_DATA_SIZE, &actual_size);
if (rval != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7077,
"Unable to read XGMAC data (%x).\n", rval);
count = 0;
}
@@ -801,7 +803,7 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
&ha->dcbx_tlv_dma, GFP_KERNEL);
if (!ha->dcbx_tlv) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7078,
"Unable to allocate memory for DCBX TLV read-data.\n");
return 0;
}
@@ -813,8 +815,8 @@ do_read:
rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
DCBX_TLV_DATA_SIZE);
if (rval != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
- "Unable to read DCBX TLV data (%x).\n", rval);
+ ql_log(ql_log_warn, vha, 0x7079,
+ "Unable to read DCBX TLV (%x).\n", rval);
count = 0;
}
@@ -869,9 +871,13 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
iter->attr);
if (ret)
- qla_printk(KERN_INFO, vha->hw,
- "Unable to create sysfs %s binary attribute "
- "(%d).\n", iter->name, ret);
+ ql_log(ql_log_warn, vha, 0x00f3,
+ "Unable to create sysfs %s binary attribute (%d).\n",
+ iter->name, ret);
+ else
+ ql_dbg(ql_dbg_init, vha, 0x00f4,
+ "Successfully created sysfs %s binary attribure.\n",
+ iter->name);
}
}
@@ -1126,7 +1132,7 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
return -EPERM;
if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x707a,
"Abort ISP active -- ignoring beacon request.\n");
return -EBUSY;
}
@@ -1322,9 +1328,8 @@ qla2x00_thermal_temp_show(struct device *dev,
temp = frac = 0;
if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
- DEBUG2_3_11(printk(KERN_WARNING
- "%s(%ld): isp reset in progress.\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x707b,
+ "ISP reset active.\n");
else if (!vha->hw->flags.eeh_busy)
rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
if (rval != QLA_SUCCESS)
@@ -1343,8 +1348,8 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
- DEBUG2_3_11(printk("%s(%ld): isp reset in progress.\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x707c,
+ "ISP reset active.\n");
else if (!vha->hw->flags.eeh_busy)
rval = qla2x00_get_firmware_state(vha, state);
if (rval != QLA_SUCCESS)
@@ -1645,8 +1650,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
if (stats == NULL) {
- DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
- __func__, base_vha->host_no));
+ ql_log(ql_log_warn, vha, 0x707d,
+ "Failed to allocate memory for stats.\n");
goto done;
}
memset(stats, 0, DMA_POOL_SIZE);
@@ -1746,15 +1751,14 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
ret = qla24xx_vport_create_req_sanity_check(fc_vport);
if (ret) {
- DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, "
- "status %x\n", ret));
+ ql_log(ql_log_warn, vha, 0x707e,
+ "Vport sanity check failed, status %x\n", ret);
return (ret);
}
vha = qla24xx_create_vhost(fc_vport);
if (vha == NULL) {
- DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n",
- vha));
+ ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
return FC_VPORT_FAILED;
}
if (disable) {
@@ -1764,8 +1768,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
atomic_set(&vha->vp_state, VP_FAILED);
/* ready to create vport */
- qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n",
- vha->vp_idx);
+ ql_log(ql_log_info, vha, 0x7080,
+ "VP entry id %d assigned.\n", vha->vp_idx);
/* initialized vport states */
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -1775,21 +1779,23 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
/* Don't retry or attempt login of this virtual port */
- DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n",
- base_vha->host_no));
+ ql_dbg(ql_dbg_user, vha, 0x7081,
+ "Vport loop state is not UP.\n");
atomic_set(&vha->loop_state, LOOP_DEAD);
if (!disable)
fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
}
- if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
+ if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4) {
+ int prot = 0;
vha->flags.difdix_supported = 1;
- DEBUG18(qla_printk(KERN_INFO, ha,
- "Registering for DIF/DIX type 1 and 3"
- " protection.\n"));
+ ql_dbg(ql_dbg_user, vha, 0x7082,
+ "Registered for DIF/DIX type 1 and 3 protection.\n");
+ if (ql2xenabledif == 1)
+ prot = SHOST_DIX_TYPE0_PROTECTION;
scsi_host_set_prot(vha->host,
- SHOST_DIF_TYPE1_PROTECTION
+ prot | SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
| SHOST_DIF_TYPE3_PROTECTION
| SHOST_DIX_TYPE1_PROTECTION
@@ -1802,8 +1808,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
&ha->pdev->dev)) {
- DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
- vha->host_no, vha->vp_idx));
+ ql_dbg(ql_dbg_user, vha, 0x7083,
+ "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
goto vport_create_failed_2;
}
@@ -1820,6 +1826,10 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
if (ha->flags.cpu_affinity_enabled) {
req = ha->req_q_map[1];
+ ql_dbg(ql_dbg_multiq, vha, 0xc000,
+ "Request queue %p attached with "
+ "VP[%d], cpu affinity =%d\n",
+ req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
goto vport_queue;
} else if (ql2xmaxqueues == 1 || !ha->npiv_info)
goto vport_queue;
@@ -1836,13 +1846,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
qos);
if (!ret)
- qla_printk(KERN_WARNING, ha,
- "Can't create request queue for vp_idx:%d\n",
- vha->vp_idx);
+ ql_log(ql_log_warn, vha, 0x7084,
+ "Can't create request queue for VP[%d]\n",
+ vha->vp_idx);
else {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Request Que:%d (QoS: %d) created for vp_idx:%d\n",
- ret, qos, vha->vp_idx));
+ ql_dbg(ql_dbg_multiq, vha, 0xc001,
+ "Request Que:%d Q0s: %d) created for VP[%d]\n",
+ ret, qos, vha->vp_idx);
+ ql_dbg(ql_dbg_user, vha, 0x7085,
+ "Request Que:%d Q0s: %d) created for VP[%d]\n",
+ ret, qos, vha->vp_idx);
req = ha->req_q_map[ret];
}
}
@@ -1882,12 +1895,13 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
if (vha->timer_active) {
qla2x00_vp_stop_timer(vha);
- DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]"
- " = %p has stopped\n", vha->host_no, vha->vp_idx, vha));
+ ql_dbg(ql_dbg_user, vha, 0x7086,
+ "Timer for the VP[%d] has stopped\n", vha->vp_idx);
}
/* No pending activities shall be there on the vha now */
- DEBUG(msleep(random32()%10)); /* Just to see if something falls on
+ if (ql2xextended_error_logging & ql_dbg_user)
+ msleep(random32()%10); /* Just to see if something falls on
* the net we have placed below */
BUG_ON(atomic_read(&vha->vref_count));
@@ -1901,12 +1915,12 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
- qla_printk(KERN_WARNING, ha,
- "Queue delete failed.\n");
+ ql_log(ql_log_warn, vha, 0x7087,
+ "Queue delete failed.\n");
}
scsi_host_put(vha->host);
- qla_printk(KERN_INFO, ha, "vport %d deleted\n", id);
+ ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 8c10e2c..07d1767 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -36,7 +36,8 @@ done:
}
int
-qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
+qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
+ struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
{
int i, ret, num_valid;
uint8_t *bcode;
@@ -51,18 +52,17 @@ qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
if (bcode_val == 0xFFFFFFFF) {
/* No FCP Priority config data in flash */
- DEBUG2(printk(KERN_INFO
- "%s: No FCP priority config data.\n",
- __func__));
+ ql_dbg(ql_dbg_user, vha, 0x7051,
+ "No FCP Priority config data.\n");
return 0;
}
if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
bcode[3] != 'S') {
/* Invalid FCP priority data header*/
- DEBUG2(printk(KERN_ERR
- "%s: Invalid FCP Priority data header. bcode=0x%x\n",
- __func__, bcode_val));
+ ql_dbg(ql_dbg_user, vha, 0x7052,
+ "Invalid FCP Priority data header. bcode=0x%x.\n",
+ bcode_val);
return 0;
}
if (flag != 1)
@@ -77,15 +77,14 @@ qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
if (num_valid == 0) {
/* No valid FCP priority data entries */
- DEBUG2(printk(KERN_ERR
- "%s: No valid FCP Priority data entries.\n",
- __func__));
+ ql_dbg(ql_dbg_user, vha, 0x7053,
+ "No valid FCP Priority data entries.\n");
ret = 0;
} else {
/* FCP priority data is valid */
- DEBUG2(printk(KERN_INFO
- "%s: Valid FCP priority data. num entries = %d\n",
- __func__, num_valid));
+ ql_dbg(ql_dbg_user, vha, 0x7054,
+ "Valid FCP priority data. num entries = %d.\n",
+ num_valid);
}
return ret;
@@ -182,10 +181,9 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
if (!ha->fcp_prio_cfg) {
ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
if (!ha->fcp_prio_cfg) {
- qla_printk(KERN_WARNING, ha,
- "Unable to allocate memory "
- "for fcp prio config data (%x).\n",
- FCP_PRIO_CFG_SIZE);
+ ql_log(ql_log_warn, vha, 0x7050,
+ "Unable to allocate memory for fcp prio "
+ "config data (%x).\n", FCP_PRIO_CFG_SIZE);
bsg_job->reply->result = (DID_ERROR << 16);
ret = -ENOMEM;
goto exit_fcp_prio_cfg;
@@ -198,9 +196,9 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
FCP_PRIO_CFG_SIZE);
/* validate fcp priority data */
- if (!qla24xx_fcp_prio_cfg_valid(
- (struct qla_fcp_prio_cfg *)
- ha->fcp_prio_cfg, 1)) {
+
+ if (!qla24xx_fcp_prio_cfg_valid(vha,
+ (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
bsg_job->reply->result = (DID_ERROR << 16);
ret = -EINVAL;
/* If buffer was invalidatic int
@@ -256,9 +254,8 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
/* pass through is supported only for ISP 4Gb or higher */
if (!IS_FWI2_CAPABLE(ha)) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld):ELS passthru not supported for ISP23xx based "
- "adapters\n", vha->host_no));
+ ql_dbg(ql_dbg_user, vha, 0x7001,
+ "ELS passthru not supported for ISP23xx based adapters.\n");
rval = -EPERM;
goto done;
}
@@ -266,11 +263,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
/* Multiple SG's are not supported for ELS requests */
if (bsg_job->request_payload.sg_cnt > 1 ||
bsg_job->reply_payload.sg_cnt > 1) {
- DEBUG2(printk(KERN_INFO
- "multiple SG's are not supported for ELS requests"
- " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
- bsg_job->request_payload.sg_cnt,
- bsg_job->reply_payload.sg_cnt));
+ ql_dbg(ql_dbg_user, vha, 0x7002,
+ "Multiple SG's are not suppored for ELS requests, "
+ "request_sg_cnt=%x reply_sg_cnt=%x.\n",
+ bsg_job->request_payload.sg_cnt,
+ bsg_job->reply_payload.sg_cnt);
rval = -EPERM;
goto done;
}
@@ -281,9 +278,9 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
* if not perform fabric login
*/
if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "failed to login port %06X for ELS passthru\n",
- fcport->d_id.b24));
+ ql_dbg(ql_dbg_user, vha, 0x7003,
+ "Failed to login port %06X for ELS passthru.\n",
+ fcport->d_id.b24);
rval = -EIO;
goto done;
}
@@ -314,8 +311,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
}
if (!vha->flags.online) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "host not online\n"));
+ ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
rval = -EIO;
goto done;
}
@@ -337,12 +333,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
- DEBUG2(printk(KERN_INFO
- "dma mapping resulted in different sg counts \
- [request_sg_cnt: %x dma_request_sg_cnt: %x\
- reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
- bsg_job->request_payload.sg_cnt, req_sg_cnt,
- bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
+ ql_log(ql_log_warn, vha, 0x7008,
+ "dma mapping resulted in different sg counts, "
+ "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
+ "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
+ req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
rval = -EAGAIN;
goto done_unmap_sg;
}
@@ -363,15 +358,16 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
"bsg_els_rpt" : "bsg_els_hst");
els->u.bsg_job = bsg_job;
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
- "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
- bsg_job->request->rqst_data.h_els.command_code,
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa));
+ ql_dbg(ql_dbg_user, vha, 0x700a,
+ "bsg rqst type: %s els type: %x - loop-id=%x "
+ "portid=%-2x%02x%02x.\n", type,
+ bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x700e,
+ "qla2x00_start_sp failed = %d\n", rval);
kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
rval = -EIO;
@@ -411,6 +407,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!req_sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x700f,
+ "dma_map_sg return %d for request\n", req_sg_cnt);
rval = -ENOMEM;
goto done;
}
@@ -418,24 +416,25 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if (!rsp_sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x7010,
+ "dma_map_sg return %d for reply\n", rsp_sg_cnt);
rval = -ENOMEM;
goto done;
}
if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "[request_sg_cnt: %x dma_request_sg_cnt: %x\
- reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
- bsg_job->request_payload.sg_cnt, req_sg_cnt,
- bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
+ ql_log(ql_log_warn, vha, 0x7011,
+ "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
+ "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
+ req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
rval = -EAGAIN;
goto done_unmap_sg;
}
if (!vha->flags.online) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "host not online\n"));
+ ql_log(ql_log_warn, vha, 0x7012,
+ "Host is not online.\n");
rval = -EIO;
goto done_unmap_sg;
}
@@ -451,8 +450,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
loop_id = vha->mgmt_svr_loop_id;
break;
default:
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Unknown loop id: %x\n", loop_id));
+ ql_dbg(ql_dbg_user, vha, 0x7013,
+ "Unknown loop id: %x.\n", loop_id);
rval = -EINVAL;
goto done_unmap_sg;
}
@@ -464,6 +463,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
*/
fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (!fcport) {
+ ql_log(ql_log_warn, vha, 0x7014,
+ "Failed to allocate fcport.\n");
rval = -ENOMEM;
goto done_unmap_sg;
}
@@ -479,6 +480,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
/* Alloc SRB structure */
sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
if (!sp) {
+ ql_log(ql_log_warn, vha, 0x7015,
+ "qla2x00_get_ctx_bsg_sp failed.\n");
rval = -ENOMEM;
goto done_free_fcport;
}
@@ -488,15 +491,17 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
ct->name = "bsg_ct";
ct->u.bsg_job = bsg_job;
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
- "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
- (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa));
+ ql_dbg(ql_dbg_user, vha, 0x7016,
+ "bsg rqst type: %s else type: %x - "
+ "loop-id=%x portid=%02x%02x%02x.\n", type,
+ (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x7017,
+ "qla2x00_start_sp failed=%d.\n", rval);
kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
rval = -EIO;
@@ -535,9 +540,8 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
ha->notify_dcbx_comp = 1;
ret = qla81xx_set_port_config(vha, new_config);
if (ret != QLA_SUCCESS) {
- DEBUG2(printk(KERN_ERR
- "%s(%lu): Set port config failed\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7021,
+ "set port config failed.\n");
ha->notify_dcbx_comp = 0;
rval = -EINVAL;
goto done_set_internal;
@@ -545,11 +549,11 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
/* Wait for DCBX complete event */
if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "State change notificaition not received.\n"));
+ ql_dbg(ql_dbg_user, vha, 0x7022,
+ "State change notification not received.\n");
} else
- DEBUG2(qla_printk(KERN_INFO, ha,
- "State change RECEIVED\n"));
+ ql_dbg(ql_dbg_user, vha, 0x7023,
+ "State change received.\n");
ha->notify_dcbx_comp = 0;
@@ -581,9 +585,8 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
ha->notify_dcbx_comp = wait;
ret = qla81xx_set_port_config(vha, new_config);
if (ret != QLA_SUCCESS) {
- DEBUG2(printk(KERN_ERR
- "%s(%lu): Set port config failed\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7025,
+ "Set port config failed.\n");
ha->notify_dcbx_comp = 0;
rval = -EINVAL;
goto done_reset_internal;
@@ -592,14 +595,14 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
/* Wait for DCBX complete event */
if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
(20 * HZ))) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "State change notificaition not received.\n"));
+ ql_dbg(ql_dbg_user, vha, 0x7026,
+ "State change notification not received.\n");
ha->notify_dcbx_comp = 0;
rval = -EINVAL;
goto done_reset_internal;
} else
- DEBUG2(qla_printk(KERN_INFO, ha,
- "State change RECEIVED\n"));
+ ql_dbg(ql_dbg_user, vha, 0x7027,
+ "State change received.\n");
ha->notify_dcbx_comp = 0;
}
@@ -629,11 +632,13 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ ql_log(ql_log_warn, vha, 0x7018, "Abort active or needed.\n");
return -EBUSY;
+ }
if (!vha->flags.online) {
- DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n"));
+ ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
return -EIO;
}
@@ -641,26 +646,31 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
DMA_TO_DEVICE);
- if (!elreq.req_sg_cnt)
+ if (!elreq.req_sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x701a,
+ "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
return -ENOMEM;
+ }
elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
DMA_FROM_DEVICE);
if (!elreq.rsp_sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x701b,
+ "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
rval = -ENOMEM;
goto done_unmap_req_sg;
}
if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
(elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
- DEBUG2(printk(KERN_INFO
- "dma mapping resulted in different sg counts "
- "[request_sg_cnt: %x dma_request_sg_cnt: %x "
- "reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
- bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
- bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
+ ql_log(ql_log_warn, vha, 0x701c,
+ "dma mapping resulted in different sg counts, "
+ "request_sg_cnt: %x dma_request_sg_cnt: %x "
+ "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
+ bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
+ bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
rval = -EAGAIN;
goto done_unmap_sg;
}
@@ -668,8 +678,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
&req_data_dma, GFP_KERNEL);
if (!req_data) {
- DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data "
- "failed for host=%lu\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x701d,
+ "dma alloc failed for req_data.\n");
rval = -ENOMEM;
goto done_unmap_sg;
}
@@ -677,8 +687,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
&rsp_data_dma, GFP_KERNEL);
if (!rsp_data) {
- DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data "
- "failed for host=%lu\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7004,
+ "dma alloc failed for rsp_data.\n");
rval = -ENOMEM;
goto done_free_dma_req;
}
@@ -699,8 +709,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
&& req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
elreq.options == EXTERNAL_LOOPBACK) {
type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld) bsg rqst type: %s\n", vha->host_no, type));
+ ql_dbg(ql_dbg_user, vha, 0x701e,
+ "BSG request type: %s.\n", type);
command_sent = INT_DEF_LB_ECHO_CMD;
rval = qla2x00_echo_test(vha, &elreq, response);
} else {
@@ -708,9 +718,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
memset(config, 0, sizeof(config));
memset(new_config, 0, sizeof(new_config));
if (qla81xx_get_port_config(vha, config)) {
- DEBUG2(printk(KERN_ERR
- "%s(%lu): Get port config failed\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x701f,
+ "Get port config failed.\n");
bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
rval = -EPERM;
@@ -718,11 +727,13 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
}
if (elreq.options != EXTERNAL_LOOPBACK) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Internal: current port config = %x\n",
- config[0]));
+ ql_dbg(ql_dbg_user, vha, 0x7020,
+ "Internal: curent port config = %x\n",
+ config[0]);
if (qla81xx_set_internal_loopback(vha, config,
new_config)) {
+ ql_log(ql_log_warn, vha, 0x7024,
+ "Internal loopback failed.\n");
bsg_job->reply->reply_payload_rcv_len =
0;
bsg_job->reply->result =
@@ -746,9 +757,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
}
type = "FC_BSG_HST_VENDOR_LOOPBACK";
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld) bsg rqst type: %s\n",
- vha->host_no, type));
+ ql_dbg(ql_dbg_user, vha, 0x7028,
+ "BSG request type: %s.\n", type);
command_sent = INT_DEF_LB_LOOPBACK_CMD;
rval = qla2x00_loopback_test(vha, &elreq, response);
@@ -763,17 +773,16 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
if (response[0] == MBS_COMMAND_ERROR &&
response[1] == MBS_LB_RESET) {
- DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
- "ISP\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7029,
+ "MBX command error, Aborting ISP.\n");
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
qla2x00_wait_for_chip_reset(vha);
/* Also reset the MPI */
if (qla81xx_restart_mpi_firmware(vha) !=
QLA_SUCCESS) {
- qla_printk(KERN_INFO, ha,
- "MPI reset failed for host%ld.\n",
- vha->host_no);
+ ql_log(ql_log_warn, vha, 0x702a,
+ "MPI reset failed.\n");
}
bsg_job->reply->reply_payload_rcv_len = 0;
@@ -783,17 +792,16 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
}
} else {
type = "FC_BSG_HST_VENDOR_LOOPBACK";
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld) bsg rqst type: %s\n",
- vha->host_no, type));
+ ql_dbg(ql_dbg_user, vha, 0x702b,
+ "BSG request type: %s.\n", type);
command_sent = INT_DEF_LB_LOOPBACK_CMD;
rval = qla2x00_loopback_test(vha, &elreq, response);
}
}
if (rval) {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
- "request %s failed\n", vha->host_no, type));
+ ql_log(ql_log_warn, vha, 0x702c,
+ "Vendor request %s failed.\n", type);
fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
sizeof(struct fc_bsg_reply);
@@ -805,8 +813,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
- "request %s completed\n", vha->host_no, type));
+ ql_dbg(ql_dbg_user, vha, 0x702d,
+ "Vendor request %s completed.\n", type);
bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
sizeof(response) + sizeof(uint8_t);
@@ -851,12 +859,13 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ ql_log(ql_log_warn, vha, 0x702e, "Abort active or needed.\n");
return -EBUSY;
+ }
if (!IS_QLA84XX(ha)) {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
- "exiting.\n", vha->host_no));
+ ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
return -EINVAL;
}
@@ -865,14 +874,14 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
if (rval) {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
- "request 84xx reset failed\n", vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7030,
+ "Vendor request 84xx reset failed.\n");
rval = bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
- "request 84xx reset completed\n", vha->host_no));
+ ql_dbg(ql_dbg_user, vha, 0x7031,
+ "Vendor request 84xx reset completed.\n");
bsg_job->reply->result = DID_OK;
}
@@ -902,21 +911,24 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
return -EBUSY;
if (!IS_QLA84XX(ha)) {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
- "exiting.\n", vha->host_no));
+ ql_dbg(ql_dbg_user, vha, 0x7032,
+ "Not 84xx, exiting.\n");
return -EINVAL;
}
sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- if (!sg_cnt)
+ if (!sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x7033,
+ "dma_map_sg returned %d for request.\n", sg_cnt);
return -ENOMEM;
+ }
if (sg_cnt != bsg_job->request_payload.sg_cnt) {
- DEBUG2(printk(KERN_INFO
- "dma mapping resulted in different sg counts "
- "request_sg_cnt: %x dma_request_sg_cnt: %x ",
- bsg_job->request_payload.sg_cnt, sg_cnt));
+ ql_log(ql_log_warn, vha, 0x7034,
+ "DMA mapping resulted in different sg counts, "
+ "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
+ bsg_job->request_payload.sg_cnt, sg_cnt);
rval = -EAGAIN;
goto done_unmap_sg;
}
@@ -925,8 +937,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
&fw_dma, GFP_KERNEL);
if (!fw_buf) {
- DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf "
- "failed for host=%lu\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7035,
+ "DMA alloc failed for fw_buf.\n");
rval = -ENOMEM;
goto done_unmap_sg;
}
@@ -936,8 +948,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
if (!mn) {
- DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
- "failed for host=%lu\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7036,
+ "DMA alloc failed for fw buffer.\n");
rval = -ENOMEM;
goto done_free_fw_buf;
}
@@ -965,15 +977,15 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
if (rval) {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
- "request 84xx updatefw failed\n", vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7037,
+ "Vendor request 84xx updatefw failed.\n");
rval = bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
- "request 84xx updatefw completed\n", vha->host_no));
+ ql_dbg(ql_dbg_user, vha, 0x7038,
+ "Vendor request 84xx updatefw completed.\n");
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
bsg_job->reply->result = DID_OK;
@@ -1009,27 +1021,30 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ ql_log(ql_log_warn, vha, 0x7039,
+ "Abort active or needed.\n");
return -EBUSY;
+ }
if (!IS_QLA84XX(ha)) {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
- "exiting.\n", vha->host_no));
+ ql_log(ql_log_warn, vha, 0x703a,
+ "Not 84xx, exiting.\n");
return -EINVAL;
}
ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
sizeof(struct fc_bsg_request));
if (!ql84_mgmt) {
- DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x703b,
+ "MGMT header not provided, exiting.\n");
return -EINVAL;
}
mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
if (!mn) {
- DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
- "failed for host=%lu\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x703c,
+ "DMA alloc failed for fw buffer.\n");
return -ENOMEM;
}
@@ -1044,6 +1059,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if (!sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x703d,
+ "dma_map_sg returned %d for reply.\n", sg_cnt);
rval = -ENOMEM;
goto exit_mgmt;
}
@@ -1051,10 +1068,10 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
dma_direction = DMA_FROM_DEVICE;
if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
- DEBUG2(printk(KERN_INFO
- "dma mapping resulted in different sg counts "
- "reply_sg_cnt: %x dma_reply_sg_cnt: %x\n",
- bsg_job->reply_payload.sg_cnt, sg_cnt));
+ ql_log(ql_log_warn, vha, 0x703e,
+ "DMA mapping resulted in different sg counts, "
+ "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
+ bsg_job->reply_payload.sg_cnt, sg_cnt);
rval = -EAGAIN;
goto done_unmap_sg;
}
@@ -1064,9 +1081,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
&mgmt_dma, GFP_KERNEL);
if (!mgmt_b) {
- DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
- "failed for host=%lu\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x703f,
+ "DMA alloc failed for mgmt_b.\n");
rval = -ENOMEM;
goto done_unmap_sg;
}
@@ -1094,6 +1110,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x7040,
+ "dma_map_sg returned %d.\n", sg_cnt);
rval = -ENOMEM;
goto exit_mgmt;
}
@@ -1101,10 +1119,10 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
dma_direction = DMA_TO_DEVICE;
if (sg_cnt != bsg_job->request_payload.sg_cnt) {
- DEBUG2(printk(KERN_INFO
- "dma mapping resulted in different sg counts "
- "request_sg_cnt: %x dma_request_sg_cnt: %x ",
- bsg_job->request_payload.sg_cnt, sg_cnt));
+ ql_log(ql_log_warn, vha, 0x7041,
+ "DMA mapping resulted in different sg counts, "
+ "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
+ bsg_job->request_payload.sg_cnt, sg_cnt);
rval = -EAGAIN;
goto done_unmap_sg;
}
@@ -1113,9 +1131,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
&mgmt_dma, GFP_KERNEL);
if (!mgmt_b) {
- DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
- "failed for host=%lu\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7042,
+ "DMA alloc failed for mgmt_b.\n");
rval = -ENOMEM;
goto done_unmap_sg;
}
@@ -1156,15 +1173,15 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
if (rval) {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
- "request 84xx mgmt failed\n", vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7043,
+ "Vendor request 84xx mgmt failed.\n");
rval = bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
- "request 84xx mgmt completed\n", vha->host_no));
+ ql_dbg(ql_dbg_user, vha, 0x7044,
+ "Vendor request 84xx mgmt completed.\n");
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
bsg_job->reply->result = DID_OK;
@@ -1204,7 +1221,6 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
{
struct Scsi_Host *host = bsg_job->shost;
scsi_qla_host_t *vha = shost_priv(host);
- struct qla_hw_data *ha = vha->hw;
int rval = 0;
struct qla_port_param *port_param = NULL;
fc_port_t *fcport = NULL;
@@ -1215,26 +1231,27 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ ql_log(ql_log_warn, vha, 0x7045, "abort active or needed.\n");
return -EBUSY;
+ }
if (!IS_IIDMA_CAPABLE(vha->hw)) {
- DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not "
- "supported\n", __func__, vha->host_no));
+ ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
return -EINVAL;
}
port_param = (struct qla_port_param *)((char *)bsg_job->request +
sizeof(struct fc_bsg_request));
if (!port_param) {
- DEBUG2(printk("%s(%ld): port_param header not provided, "
- "exiting.\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7047,
+ "port_param header not provided.\n");
return -EINVAL;
}
if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
- DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7048,
+ "Invalid destination type.\n");
return -EINVAL;
}
@@ -1249,21 +1266,20 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
}
if (!fcport) {
- DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7049,
+ "Failed to find port.\n");
return -EINVAL;
}
if (atomic_read(&fcport->state) != FCS_ONLINE) {
- DEBUG2(printk(KERN_ERR "%s(%ld): Port not online\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x704a,
+ "Port is not online.\n");
return -EINVAL;
}
if (fcport->flags & FCF_LOGIN_NEEDED) {
- DEBUG2(printk(KERN_ERR "%s(%ld): Remote port not logged in, "
- "flags = 0x%x\n",
- __func__, vha->host_no, fcport->flags));
+ ql_log(ql_log_warn, vha, 0x704b,
+ "Remote port not logged in flags = 0x%x.\n", fcport->flags);
return -EINVAL;
}
@@ -1275,15 +1291,13 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
&port_param->speed, mb);
if (rval) {
- DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for "
- "%02x%02x%02x%02x%02x%02x%02x%02x -- "
- "%04x %x %04x %04x.\n",
- vha->host_no, fcport->port_name[0],
- fcport->port_name[1],
- fcport->port_name[2], fcport->port_name[3],
- fcport->port_name[4], fcport->port_name[5],
- fcport->port_name[6], fcport->port_name[7], rval,
- fcport->fp_speed, mb[0], mb[1]));
+ ql_log(ql_log_warn, vha, 0x704c,
+ "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
+ "%04x %x %04x %04x.\n", fcport->port_name[0],
+ fcport->port_name[1], fcport->port_name[2],
+ fcport->port_name[3], fcport->port_name[4],
+ fcport->port_name[5], fcport->port_name[6],
+ fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
rval = 0;
bsg_job->reply->result = (DID_ERROR << 16);
@@ -1307,11 +1321,12 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
}
static int
-qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
+qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
uint8_t is_update)
{
uint32_t start = 0;
int valid = 0;
+ struct qla_hw_data *ha = vha->hw;
bsg_job->reply->reply_payload_rcv_len = 0;
@@ -1319,14 +1334,20 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
return -EINVAL;
start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
- if (start > ha->optrom_size)
+ if (start > ha->optrom_size) {
+ ql_log(ql_log_warn, vha, 0x7055,
+ "start %d > optrom_size %d.\n", start, ha->optrom_size);
return -EINVAL;
+ }
- if (ha->optrom_state != QLA_SWAITING)
+ if (ha->optrom_state != QLA_SWAITING) {
+ ql_log(ql_log_info, vha, 0x7056,
+ "optrom_state %d.\n", ha->optrom_state);
return -EBUSY;
+ }
ha->optrom_region_start = start;
-
+ ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
if (is_update) {
if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
valid = 1;
@@ -1337,9 +1358,9 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
IS_QLA8XXX_TYPE(ha))
valid = 1;
if (!valid) {
- qla_printk(KERN_WARNING, ha,
- "Invalid start region 0x%x/0x%x.\n",
- start, bsg_job->request_payload.payload_len);
+ ql_log(ql_log_warn, vha, 0x7058,
+ "Invalid start region 0x%x/0x%x.\n", start,
+ bsg_job->request_payload.payload_len);
return -EINVAL;
}
@@ -1358,9 +1379,9 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
if (!ha->optrom_buffer) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7059,
"Read: Unable to allocate memory for optrom retrieval "
- "(%x).\n", ha->optrom_region_size);
+ "(%x)\n", ha->optrom_region_size);
ha->optrom_state = QLA_SWAITING;
return -ENOMEM;
@@ -1378,7 +1399,7 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
struct qla_hw_data *ha = vha->hw;
int rval = 0;
- rval = qla2x00_optrom_setup(bsg_job, ha, 0);
+ rval = qla2x00_optrom_setup(bsg_job, vha, 0);
if (rval)
return rval;
@@ -1406,7 +1427,7 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
struct qla_hw_data *ha = vha->hw;
int rval = 0;
- rval = qla2x00_optrom_setup(bsg_job, ha, 1);
+ rval = qla2x00_optrom_setup(bsg_job, vha, 1);
if (rval)
return rval;
@@ -1464,6 +1485,23 @@ int
qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
{
int ret = -EINVAL;
+ struct fc_rport *rport;
+ fc_port_t *fcport = NULL;
+ struct Scsi_Host *host;
+ scsi_qla_host_t *vha;
+
+ if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
+ rport = bsg_job->rport;
+ fcport = *(fc_port_t **) rport->dd_data;
+ host = rport_to_shost(rport);
+ vha = shost_priv(host);
+ } else {
+ host = bsg_job->shost;
+ vha = shost_priv(host);
+ }
+
+ ql_dbg(ql_dbg_user, vha, 0x7000,
+ "Entered %s msgcode=%d.\n", __func__, bsg_job->request->msgcode);
switch (bsg_job->request->msgcode) {
case FC_BSG_RPT_ELS:
@@ -1480,7 +1518,7 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
case FC_BSG_HST_DEL_RPORT:
case FC_BSG_RPT_CT:
default:
- DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
+ ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
break;
}
return ret;
@@ -1514,17 +1552,15 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
&& (sp_bsg->u.bsg_job == bsg_job)) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld): mbx "
- "abort_command failed\n",
- vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7089,
+ "mbx abort_command "
+ "failed.\n");
bsg_job->req->errors =
bsg_job->reply->result = -EIO;
} else {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld): mbx "
- "abort_command success\n",
- vha->host_no));
+ ql_dbg(ql_dbg_user, vha, 0x708a,
+ "mbx abort_command "
+ "success.\n");
bsg_job->req->errors =
bsg_job->reply->result = 0;
}
@@ -1535,8 +1571,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
}
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld) SRB not found to abort\n", vha->host_no));
+ ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
return 0;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index c53719a..d79cd8a 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -4,10 +4,36 @@
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
+
+/*
+ * Table for showing the current message id in use for particular level
+ * Change this table for addition of log/debug messages.
+ * ----------------------------------------------------------------------
+ * | Level | Last Value Used | Holes |
+ * ----------------------------------------------------------------------
+ * | Module Init and Probe | 0x0116 | |
+ * | Mailbox commands | 0x1126 | |
+ * | Device Discovery | 0x2083 | |
+ * | Queue Command and IO tracing | 0x302e | 0x3008 |
+ * | DPC Thread | 0x401c | |
+ * | Async Events | 0x5059 | |
+ * | Timer Routines | 0x600d | |
+ * | User Space Interactions | 0x709d | |
+ * | Task Management | 0x8041 | |
+ * | AER/EEH | 0x900f | |
+ * | Virtual Port | 0xa007 | |
+ * | ISP82XX Specific | 0xb04f | |
+ * | MultiQ | 0xc00b | |
+ * | Misc | 0xd00b | |
+ * ----------------------------------------------------------------------
+ */
+
#include "qla_def.h"
#include <linux/delay.h>
+static uint32_t ql_dbg_offset = 0x800;
+
static inline void
qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
{
@@ -383,11 +409,11 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
struct qla_hw_data *ha = vha->hw;
if (rval != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
- "Failed to dump firmware (%x)!!!\n", rval);
+ ql_log(ql_log_warn, vha, 0xd000,
+ "Failed to dump firmware (%x).\n", rval);
ha->fw_dumped = 0;
} else {
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_info, vha, 0xd001,
"Firmware dump saved to temp buffer (%ld/%p).\n",
vha->host_no, ha->fw_dump);
ha->fw_dumped = 1;
@@ -419,15 +445,16 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!ha->fw_dump) {
- qla_printk(KERN_WARNING, ha,
- "No buffer available for dump!!!\n");
+ ql_log(ql_log_warn, vha, 0xd002,
+ "No buffer available for dump.\n");
goto qla2300_fw_dump_failed;
}
if (ha->fw_dumped) {
- qla_printk(KERN_WARNING, ha,
- "Firmware has been previously dumped (%p) -- ignoring "
- "request...\n", ha->fw_dump);
+ ql_log(ql_log_warn, vha, 0xd003,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n",
+ ha->fw_dump);
goto qla2300_fw_dump_failed;
}
fw = &ha->fw_dump->isp.isp23;
@@ -582,15 +609,16 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!ha->fw_dump) {
- qla_printk(KERN_WARNING, ha,
- "No buffer available for dump!!!\n");
+ ql_log(ql_log_warn, vha, 0xd004,
+ "No buffer available for dump.\n");
goto qla2100_fw_dump_failed;
}
if (ha->fw_dumped) {
- qla_printk(KERN_WARNING, ha,
- "Firmware has been previously dumped (%p) -- ignoring "
- "request...\n", ha->fw_dump);
+ ql_log(ql_log_warn, vha, 0xd005,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n",
+ ha->fw_dump);
goto qla2100_fw_dump_failed;
}
fw = &ha->fw_dump->isp.isp21;
@@ -779,15 +807,16 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!ha->fw_dump) {
- qla_printk(KERN_WARNING, ha,
- "No buffer available for dump!!!\n");
+ ql_log(ql_log_warn, vha, 0xd006,
+ "No buffer available for dump.\n");
goto qla24xx_fw_dump_failed;
}
if (ha->fw_dumped) {
- qla_printk(KERN_WARNING, ha,
- "Firmware has been previously dumped (%p) -- ignoring "
- "request...\n", ha->fw_dump);
+ ql_log(ql_log_warn, vha, 0xd007,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n",
+ ha->fw_dump);
goto qla24xx_fw_dump_failed;
}
fw = &ha->fw_dump->isp.isp24;
@@ -1017,15 +1046,16 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!ha->fw_dump) {
- qla_printk(KERN_WARNING, ha,
- "No buffer available for dump!!!\n");
+ ql_log(ql_log_warn, vha, 0xd008,
+ "No buffer available for dump.\n");
goto qla25xx_fw_dump_failed;
}
if (ha->fw_dumped) {
- qla_printk(KERN_WARNING, ha,
- "Firmware has been previously dumped (%p) -- ignoring "
- "request...\n", ha->fw_dump);
+ ql_log(ql_log_warn, vha, 0xd009,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n",
+ ha->fw_dump);
goto qla25xx_fw_dump_failed;
}
fw = &ha->fw_dump->isp.isp25;
@@ -1328,15 +1358,16 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!ha->fw_dump) {
- qla_printk(KERN_WARNING, ha,
- "No buffer available for dump!!!\n");
+ ql_log(ql_log_warn, vha, 0xd00a,
+ "No buffer available for dump.\n");
goto qla81xx_fw_dump_failed;
}
if (ha->fw_dumped) {
- qla_printk(KERN_WARNING, ha,
- "Firmware has been previously dumped (%p) -- ignoring "
- "request...\n", ha->fw_dump);
+ ql_log(ql_log_warn, vha, 0xd00b,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n",
+ ha->fw_dump);
goto qla81xx_fw_dump_failed;
}
fw = &ha->fw_dump->isp.isp81;
@@ -1619,106 +1650,255 @@ qla81xx_fw_dump_failed:
/****************************************************************************/
/* Driver Debug Functions. */
/****************************************************************************/
-
+/*
+ * This function is for formatting and logging debug information.
+ * It is to be used when vha is available. It formats the message
+ * and logs it to the messages file.
+ * parameters:
+ * level: The level of the debug messages to be printed.
+ * If ql2xextended_error_logging value is correctly set,
+ * this message will appear in the messages file.
+ * vha: Pointer to the scsi_qla_host_t.
+ * id: This is a unique identifier for the level. It identifies the
+ * part of the code from where the message originated.
+ * msg: The message to be displayed.
+ */
void
-qla2x00_dump_regs(scsi_qla_host_t *vha)
-{
- int i;
- struct qla_hw_data *ha = vha->hw;
- struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
- uint16_t __iomem *mbx_reg;
+ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
+
+ char pbuf[QL_DBG_BUF_LEN];
+ va_list ap;
+ uint32_t len;
+ struct pci_dev *pdev = NULL;
+
+ memset(pbuf, 0, QL_DBG_BUF_LEN);
+
+ va_start(ap, msg);
+
+ if ((level & ql2xextended_error_logging) == level) {
+ if (vha != NULL) {
+ pdev = vha->hw->pdev;
+ /* <module-name> <pci-name> <msg-id>:<host> Message */
+ sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
+ dev_name(&(pdev->dev)), id + ql_dbg_offset,
+ vha->host_no);
+ } else
+ sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
+ "0000:00:00.0", id + ql_dbg_offset);
+
+ len = strlen(pbuf);
+ vsprintf(pbuf+len, msg, ap);
+ pr_warning("%s", pbuf);
+ }
- mbx_reg = IS_FWI2_CAPABLE(ha) ? &reg24->mailbox0:
- MAILBOX_REG(ha, reg, 0);
+ va_end(ap);
- printk("Mailbox registers:\n");
- for (i = 0; i < 6; i++)
- printk("scsi(%ld): mbox %d 0x%04x \n", vha->host_no, i,
- RD_REG_WORD(mbx_reg++));
}
-
+/*
+ * This function is for formatting and logging debug information.
+ * It is to be used when vha is not available and pci is availble,
+ * i.e., before host allocation. It formats the message and logs it
+ * to the messages file.
+ * parameters:
+ * level: The level of the debug messages to be printed.
+ * If ql2xextended_error_logging value is correctly set,
+ * this message will appear in the messages file.
+ * pdev: Pointer to the struct pci_dev.
+ * id: This is a unique id for the level. It identifies the part
+ * of the code from where the message originated.
+ * msg: The message to be displayed.
+ */
void
-qla2x00_dump_buffer(uint8_t * b, uint32_t size)
-{
- uint32_t cnt;
- uint8_t c;
+ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
- printk(" 0 1 2 3 4 5 6 7 8 9 "
- "Ah Bh Ch Dh Eh Fh\n");
- printk("----------------------------------------"
- "----------------------\n");
-
- for (cnt = 0; cnt < size;) {
- c = *b++;
- printk("%02x",(uint32_t) c);
- cnt++;
- if (!(cnt % 16))
- printk("\n");
- else
- printk(" ");
+ char pbuf[QL_DBG_BUF_LEN];
+ va_list ap;
+ uint32_t len;
+
+ if (pdev == NULL)
+ return;
+
+ memset(pbuf, 0, QL_DBG_BUF_LEN);
+
+ va_start(ap, msg);
+
+ if ((level & ql2xextended_error_logging) == level) {
+ /* <module-name> <dev-name>:<msg-id> Message */
+ sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
+ dev_name(&(pdev->dev)), id + ql_dbg_offset);
+
+ len = strlen(pbuf);
+ vsprintf(pbuf+len, msg, ap);
+ pr_warning("%s", pbuf);
}
- if (cnt % 16)
- printk("\n");
+
+ va_end(ap);
+
}
+/*
+ * This function is for formatting and logging log messages.
+ * It is to be used when vha is available. It formats the message
+ * and logs it to the messages file. All the messages will be logged
+ * irrespective of value of ql2xextended_error_logging.
+ * parameters:
+ * level: The level of the log messages to be printed in the
+ * messages file.
+ * vha: Pointer to the scsi_qla_host_t
+ * id: This is a unique id for the level. It identifies the
+ * part of the code from where the message originated.
+ * msg: The message to be displayed.
+ */
void
-qla2x00_dump_buffer_zipped(uint8_t *b, uint32_t size)
-{
- uint32_t cnt;
- uint8_t c;
- uint8_t last16[16], cur16[16];
- uint32_t lc = 0, num_same16 = 0, j;
+ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
- printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 "
- "Ah Bh Ch Dh Eh Fh\n");
- printk(KERN_DEBUG "----------------------------------------"
- "----------------------\n");
+ char pbuf[QL_DBG_BUF_LEN];
+ va_list ap;
+ uint32_t len;
+ struct pci_dev *pdev = NULL;
- for (cnt = 0; cnt < size;) {
- c = *b++;
+ memset(pbuf, 0, QL_DBG_BUF_LEN);
- cur16[lc++] = c;
+ va_start(ap, msg);
- cnt++;
- if (cnt % 16)
- continue;
-
- /* We have 16 now */
- lc = 0;
- if (num_same16 == 0) {
- memcpy(last16, cur16, 16);
- num_same16++;
- continue;
+ if (level <= ql_errlev) {
+ if (vha != NULL) {
+ pdev = vha->hw->pdev;
+ /* <module-name> <msg-id>:<host> Message */
+ sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
+ dev_name(&(pdev->dev)), id, vha->host_no);
+ } else
+ sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
+ "0000:00:00.0", id);
+
+ len = strlen(pbuf);
+ vsprintf(pbuf+len, msg, ap);
+
+ switch (level) {
+ case 0: /* FATAL LOG */
+ pr_crit("%s", pbuf);
+ break;
+ case 1:
+ pr_err("%s", pbuf);
+ break;
+ case 2:
+ pr_warn("%s", pbuf);
+ break;
+ default:
+ pr_info("%s", pbuf);
+ break;
}
- if (memcmp(cur16, last16, 16) == 0) {
- num_same16++;
- continue;
+ }
+
+ va_end(ap);
+}
+
+/*
+ * This function is for formatting and logging log messages.
+ * It is to be used when vha is not available and pci is availble,
+ * i.e., before host allocation. It formats the message and logs
+ * it to the messages file. All the messages are logged irrespective
+ * of the value of ql2xextended_error_logging.
+ * parameters:
+ * level: The level of the log messages to be printed in the
+ * messages file.
+ * pdev: Pointer to the struct pci_dev.
+ * id: This is a unique id for the level. It identifies the
+ * part of the code from where the message originated.
+ * msg: The message to be displayed.
+ */
+void
+ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
+
+ char pbuf[QL_DBG_BUF_LEN];
+ va_list ap;
+ uint32_t len;
+
+ if (pdev == NULL)
+ return;
+
+ memset(pbuf, 0, QL_DBG_BUF_LEN);
+
+ va_start(ap, msg);
+
+ if (level <= ql_errlev) {
+ /* <module-name> <dev-name>:<msg-id> Message */
+ sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
+ dev_name(&(pdev->dev)), id);
+
+ len = strlen(pbuf);
+ vsprintf(pbuf+len, msg, ap);
+ switch (level) {
+ case 0: /* FATAL LOG */
+ pr_crit("%s", pbuf);
+ break;
+ case 1:
+ pr_err("%s", pbuf);
+ break;
+ case 2:
+ pr_warn("%s", pbuf);
+ break;
+ default:
+ pr_info("%s", pbuf);
+ break;
}
- for (j = 0; j < 16; j++)
- printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
- printk(KERN_DEBUG "\n");
-
- if (num_same16 > 1)
- printk(KERN_DEBUG "> prev pattern repeats (%u)"
- "more times\n", num_same16-1);
- memcpy(last16, cur16, 16);
- num_same16 = 1;
}
- if (num_same16) {
- for (j = 0; j < 16; j++)
- printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
- printk(KERN_DEBUG "\n");
+ va_end(ap);
+}
- if (num_same16 > 1)
- printk(KERN_DEBUG "> prev pattern repeats (%u)"
- "more times\n", num_same16-1);
+void
+ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
+{
+ int i;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
+ struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
+ uint16_t __iomem *mbx_reg;
+
+ if ((level & ql2xextended_error_logging) == level) {
+
+ if (IS_QLA82XX(ha))
+ mbx_reg = &reg82->mailbox_in[0];
+ else if (IS_FWI2_CAPABLE(ha))
+ mbx_reg = &reg24->mailbox0;
+ else
+ mbx_reg = MAILBOX_REG(ha, reg, 0);
+
+ ql_dbg(level, vha, id, "Mailbox registers:\n");
+ for (i = 0; i < 6; i++)
+ ql_dbg(level, vha, id,
+ "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
}
- if (lc) {
- for (j = 0; j < lc; j++)
- printk(KERN_DEBUG "%02x ", (uint32_t)cur16[j]);
- printk(KERN_DEBUG "\n");
+}
+
+
+void
+ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
+ uint8_t *b, uint32_t size)
+{
+ uint32_t cnt;
+ uint8_t c;
+ if ((level & ql2xextended_error_logging) == level) {
+
+ ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 "
+ "9 Ah Bh Ch Dh Eh Fh\n");
+ ql_dbg(level, vha, id, "----------------------------------"
+ "----------------------------\n");
+
+ ql_dbg(level, vha, id, "");
+ for (cnt = 0; cnt < size;) {
+ c = *b++;
+ printk("%02x", (uint32_t) c);
+ cnt++;
+ if (!(cnt % 16))
+ printk("\n");
+ else
+ printk(" ");
+ }
+ if (cnt % 16)
+ ql_dbg(level, vha, id, "\n");
}
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 9304145..98a377b 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -8,146 +8,6 @@
#include "qla_def.h"
/*
- * Driver debug definitions.
- */
-/* #define QL_DEBUG_LEVEL_1 */ /* Output register accesses to COM1 */
-/* #define QL_DEBUG_LEVEL_2 */ /* Output error msgs to COM1 */
-/* #define QL_DEBUG_LEVEL_3 */ /* Output function trace msgs to COM1 */
-/* #define QL_DEBUG_LEVEL_4 */ /* Output NVRAM trace msgs to COM1 */
-/* #define QL_DEBUG_LEVEL_5 */ /* Output ring trace msgs to COM1 */
-/* #define QL_DEBUG_LEVEL_6 */ /* Output WATCHDOG timer trace to COM1 */
-/* #define QL_DEBUG_LEVEL_7 */ /* Output RISC load trace msgs to COM1 */
-/* #define QL_DEBUG_LEVEL_8 */ /* Output ring saturation msgs to COM1 */
-/* #define QL_DEBUG_LEVEL_9 */ /* Output IOCTL trace msgs */
-/* #define QL_DEBUG_LEVEL_10 */ /* Output IOCTL error msgs */
-/* #define QL_DEBUG_LEVEL_11 */ /* Output Mbx Cmd trace msgs */
-/* #define QL_DEBUG_LEVEL_12 */ /* Output IP trace msgs */
-/* #define QL_DEBUG_LEVEL_13 */ /* Output fdmi function trace msgs */
-/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
-/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
-/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
-/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
-/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */
-
-/*
-* Macros use for debugging the driver.
-*/
-
-#define DEBUG(x) do { if (ql2xextended_error_logging) { x; } } while (0)
-
-#if defined(QL_DEBUG_LEVEL_1)
-#define DEBUG1(x) do {x;} while (0)
-#else
-#define DEBUG1(x) do {} while (0)
-#endif
-
-#define DEBUG2(x) do { if (ql2xextended_error_logging) { x; } } while (0)
-#define DEBUG2_3(x) do { if (ql2xextended_error_logging) { x; } } while (0)
-#define DEBUG2_3_11(x) do { if (ql2xextended_error_logging) { x; } } while (0)
-#define DEBUG2_9_10(x) do { if (ql2xextended_error_logging) { x; } } while (0)
-#define DEBUG2_11(x) do { if (ql2xextended_error_logging) { x; } } while (0)
-#define DEBUG2_13(x) do { if (ql2xextended_error_logging) { x; } } while (0)
-#define DEBUG2_16(x) do { if (ql2xextended_error_logging) { x; } } while (0)
-#define DEBUG2_17(x) do { if (ql2xextended_error_logging) { x; } } while (0)
-
-#if defined(QL_DEBUG_LEVEL_3)
-#define DEBUG3(x) do {x;} while (0)
-#define DEBUG3_11(x) do {x;} while (0)
-#else
-#define DEBUG3(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_4)
-#define DEBUG4(x) do {x;} while (0)
-#else
-#define DEBUG4(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_5)
-#define DEBUG5(x) do {x;} while (0)
-#else
-#define DEBUG5(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_7)
-#define DEBUG7(x) do {x;} while (0)
-#else
-#define DEBUG7(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_9)
-#define DEBUG9(x) do {x;} while (0)
-#define DEBUG9_10(x) do {x;} while (0)
-#else
-#define DEBUG9(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_10)
-#define DEBUG10(x) do {x;} while (0)
-#define DEBUG9_10(x) do {x;} while (0)
-#else
-#define DEBUG10(x) do {} while (0)
- #if !defined(DEBUG9_10)
- #define DEBUG9_10(x) do {} while (0)
- #endif
-#endif
-
-#if defined(QL_DEBUG_LEVEL_11)
-#define DEBUG11(x) do{x;} while(0)
-#if !defined(DEBUG3_11)
-#define DEBUG3_11(x) do{x;} while(0)
-#endif
-#else
-#define DEBUG11(x) do{} while(0)
- #if !defined(QL_DEBUG_LEVEL_3)
- #define DEBUG3_11(x) do{} while(0)
- #endif
-#endif
-
-#if defined(QL_DEBUG_LEVEL_12)
-#define DEBUG12(x) do {x;} while (0)
-#else
-#define DEBUG12(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_13)
-#define DEBUG13(x) do {x;} while (0)
-#else
-#define DEBUG13(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_14)
-#define DEBUG14(x) do {x;} while (0)
-#else
-#define DEBUG14(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_15)
-#define DEBUG15(x) do {x;} while (0)
-#else
-#define DEBUG15(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_16)
-#define DEBUG16(x) do {x;} while (0)
-#else
-#define DEBUG16(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_17)
-#define DEBUG17(x) do {x;} while (0)
-#else
-#define DEBUG17(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_18)
-#define DEBUG18(x) do {if (ql2xextended_error_logging) x; } while (0)
-#else
-#define DEBUG18(x) do {} while (0)
-#endif
-
-
-/*
* Firmware Dump structure definition
*/
@@ -370,3 +230,50 @@ struct qla2xxx_fw_dump {
struct qla81xx_fw_dump isp81;
} isp;
};
+
+#define QL_MSGHDR "qla2xxx"
+
+#define ql_log_fatal 0 /* display fatal errors */
+#define ql_log_warn 1 /* display critical errors */
+#define ql_log_info 2 /* display all recovered errors */
+#define ql_log_all 3 /* This value is only used by ql_errlev.
+ * No messages will use this value.
+ * This should be always highest value
+ * as compared to other log levels.
+ */
+
+extern int ql_errlev;
+
+void
+ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
+void
+ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
+
+void
+ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
+void
+ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
+
+/* Debug Levels */
+/* The 0x40000000 is the max value any debug level can have
+ * as ql2xextended_error_logging is of type signed int
+ */
+#define ql_dbg_init 0x40000000 /* Init Debug */
+#define ql_dbg_mbx 0x20000000 /* MBX Debug */
+#define ql_dbg_disc 0x10000000 /* Device Discovery Debug */
+#define ql_dbg_io 0x08000000 /* IO Tracing Debug */
+#define ql_dbg_dpc 0x04000000 /* DPC Thead Debug */
+#define ql_dbg_async 0x02000000 /* Async events Debug */
+#define ql_dbg_timer 0x01000000 /* Timer Debug */
+#define ql_dbg_user 0x00800000 /* User Space Interations Debug */
+#define ql_dbg_taskm 0x00400000 /* Task Management Debug */
+#define ql_dbg_aer 0x00200000 /* AER/EEH Debug */
+#define ql_dbg_multiq 0x00100000 /* MultiQ Debug */
+#define ql_dbg_p3p 0x00080000 /* P3P specific Debug */
+#define ql_dbg_vport 0x00040000 /* Virtual Port Debug */
+#define ql_dbg_buffer 0x00020000 /* For dumping the buffer/regs */
+#define ql_dbg_misc 0x00010000 /* For dumping everything that is not
+ * not covered by upper categories
+ */
+
+#define QL_DBG_BUF_LEN 512
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index cc5a792..a03eaf4 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2529,6 +2529,7 @@ struct qla_hw_data {
#define DT_ISP8021 BIT_14
#define DT_ISP_LAST (DT_ISP8021 << 1)
+#define DT_T10_PI BIT_25
#define DT_IIDMA BIT_26
#define DT_FWI2 BIT_27
#define DT_ZIO_SUPPORTED BIT_28
@@ -2572,6 +2573,7 @@ struct qla_hw_data {
#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha))
#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
+#define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI)
#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index a5a4e12..0b4c2b7 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -64,7 +64,7 @@ qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
/* Pause tracing to flush FCE buffers. */
rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
if (rval)
- qla_printk(KERN_WARNING, ha,
+ ql_dbg(ql_dbg_user, vha, 0x705c,
"DebugFS: Unable to disable FCE (%d).\n", rval);
ha->flags.fce_enabled = 0;
@@ -92,7 +92,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
ha->fce_mb, &ha->fce_bufs);
if (rval) {
- qla_printk(KERN_WARNING, ha,
+ ql_dbg(ql_dbg_user, vha, 0x700d,
"DebugFS: Unable to reinitialize FCE (%d).\n", rval);
ha->flags.fce_enabled = 0;
}
@@ -125,8 +125,8 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
atomic_set(&qla2x00_dfs_root_count, 0);
qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
if (!qla2x00_dfs_root) {
- qla_printk(KERN_NOTICE, ha,
- "DebugFS: Unable to create root directory.\n");
+ ql_log(ql_log_warn, vha, 0x00f7,
+ "Unable to create debugfs root directory.\n");
goto out;
}
@@ -137,8 +137,8 @@ create_dir:
mutex_init(&ha->fce_mutex);
ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
if (!ha->dfs_dir) {
- qla_printk(KERN_NOTICE, ha,
- "DebugFS: Unable to create ha directory.\n");
+ ql_log(ql_log_warn, vha, 0x00f8,
+ "Unable to create debugfs ha directory.\n");
goto out;
}
@@ -148,8 +148,8 @@ create_nodes:
ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
&dfs_fce_ops);
if (!ha->dfs_fce) {
- qla_printk(KERN_NOTICE, ha,
- "DebugFS: Unable to fce node.\n");
+ ql_log(ql_log_warn, vha, 0x00f9,
+ "Unable to create debugfs fce node.\n");
goto out;
}
out:
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 691783a..aa69486 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -537,6 +537,11 @@ struct sts_entry_24xx {
/*
* If DIF Error is set in comp_status, these additional fields are
* defined:
+ *
+ * !!! NOTE: Firmware sends expected/actual DIF data in big endian
+ * format; but all of the "data" field gets swab32-d in the beginning
+ * of qla2x00_status_entry().
+ *
* &data[10] : uint8_t report_runt_bg[2]; - computed guard
* &data[12] : uint8_t actual_dif[8]; - DIF Data received
* &data[20] : uint8_t expected_dif[8]; - DIF Data computed
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0b38122..29b1a3e 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -185,7 +185,7 @@ extern int qla24xx_start_scsi(srb_t *sp);
int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
uint16_t, uint16_t, uint8_t);
extern int qla2x00_start_sp(srb_t *);
-extern uint16_t qla24xx_calc_iocbs(uint16_t);
+extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
extern int qla24xx_dif_start_scsi(srb_t *);
@@ -439,6 +439,9 @@ extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
extern void qla2x00_dump_regs(scsi_qla_host_t *);
extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
+extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t);
+extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t,
+ uint8_t *, uint32_t);
/*
* Global Function Prototypes in qla_gs.c source file.
@@ -478,7 +481,8 @@ extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16
extern int qla2x00_echo_test(scsi_qla_host_t *,
struct msg_echo_lb *, uint16_t *);
extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
-extern int qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *, uint8_t);
+extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *,
+ struct qla_fcp_prio_cfg *, uint8_t);
/*
* Global Function Prototypes in qla_dfs.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 8cd9066..37937aa 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -121,11 +121,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
rval = QLA_FUNCTION_FAILED;
if (ms_pkt->entry_status != 0) {
- DEBUG2_3(printk(KERN_WARNING "scsi(%ld): %s failed, error status "
- "(%x) on port_id: %02x%02x%02x.\n",
- vha->host_no, routine, ms_pkt->entry_status,
- vha->d_id.b.domain, vha->d_id.b.area,
- vha->d_id.b.al_pa));
+ ql_dbg(ql_dbg_disc, vha, 0x2031,
+ "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
+ routine, ms_pkt->entry_status, vha->d_id.b.domain,
+ vha->d_id.b.area, vha->d_id.b.al_pa);
} else {
if (IS_FWI2_CAPABLE(ha))
comp_status = le16_to_cpu(
@@ -138,24 +137,24 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
case CS_DATA_OVERRUN: /* Overrun? */
if (ct_rsp->header.response !=
__constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
- DEBUG2_3(printk("scsi(%ld): %s failed, "
- "rejected request on port_id: %02x%02x%02x\n",
- vha->host_no, routine,
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
+ "%s failed rejected request on port_id: "
+ "%02x%02x%02x.\n", routine,
vha->d_id.b.domain, vha->d_id.b.area,
- vha->d_id.b.al_pa));
- DEBUG2_3(qla2x00_dump_buffer(
- (uint8_t *)&ct_rsp->header,
- sizeof(struct ct_rsp_hdr)));
+ vha->d_id.b.al_pa);
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
+ 0x2078, (uint8_t *)&ct_rsp->header,
+ sizeof(struct ct_rsp_hdr));
rval = QLA_INVALID_COMMAND;
} else
rval = QLA_SUCCESS;
break;
default:
- DEBUG2_3(printk("scsi(%ld): %s failed, completion "
- "status (%x) on port_id: %02x%02x%02x.\n",
- vha->host_no, routine, comp_status,
+ ql_dbg(ql_dbg_disc, vha, 0x2033,
+ "%s failed, completion status (%x) on port_id: "
+ "%02x%02x%02x.\n", routine, comp_status,
vha->d_id.b.domain, vha->d_id.b.area,
- vha->d_id.b.al_pa));
+ vha->d_id.b.al_pa);
break;
}
}
@@ -202,8 +201,8 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): GA_NXT issue IOCB failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2062,
+ "GA_NXT issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
@@ -222,11 +221,10 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
fcport->d_id.b.domain = 0xf0;
- DEBUG2_3(printk("scsi(%ld): GA_NXT entry - "
- "nn %02x%02x%02x%02x%02x%02x%02x%02x "
+ ql_dbg(ql_dbg_disc, vha, 0x2063,
+ "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
"pn %02x%02x%02x%02x%02x%02x%02x%02x "
- "portid=%02x%02x%02x.\n",
- vha->host_no,
+ "port_id=%02x%02x%02x.\n",
fcport->node_name[0], fcport->node_name[1],
fcport->node_name[2], fcport->node_name[3],
fcport->node_name[4], fcport->node_name[5],
@@ -236,7 +234,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->port_name[4], fcport->port_name[5],
fcport->port_name[6], fcport->port_name[7],
fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa));
+ fcport->d_id.b.al_pa);
}
return (rval);
@@ -287,8 +285,8 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): GID_PT issue IOCB failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2055,
+ "GID_PT issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
@@ -364,8 +362,8 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): GPN_ID issue IOCB failed "
- "(%d).\n", vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2056,
+ "GPN_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GPN_ID") != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
@@ -424,8 +422,8 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): GNN_ID issue IOCB failed "
- "(%d).\n", vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2057,
+ "GNN_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GNN_ID") != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
@@ -434,11 +432,10 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
memcpy(list[i].node_name,
ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
- DEBUG2_3(printk("scsi(%ld): GID_PT entry - "
- "nn %02x%02x%02x%02x%02x%02x%02x%02x "
- "pn %02x%02x%02x%02x%02x%02x%02x%02x "
+ ql_dbg(ql_dbg_disc, vha, 0x2058,
+ "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02X%02x "
+ "pn %02x%02x%02x%02x%02x%02x%02X%02x "
"portid=%02x%02x%02x.\n",
- vha->host_no,
list[i].node_name[0], list[i].node_name[1],
list[i].node_name[2], list[i].node_name[3],
list[i].node_name[4], list[i].node_name[5],
@@ -448,7 +445,7 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
list[i].port_name[4], list[i].port_name[5],
list[i].port_name[6], list[i].port_name[7],
list[i].d_id.b.domain, list[i].d_id.b.area,
- list[i].d_id.b.al_pa));
+ list[i].d_id.b.al_pa);
}
/* Last device exit. */
@@ -499,14 +496,14 @@ qla2x00_rft_id(scsi_qla_host_t *vha)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): RFT_ID issue IOCB failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2043,
+ "RFT_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2044,
+ "RFT_ID exiting normally.\n");
}
return (rval);
@@ -528,8 +525,8 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
struct ct_sns_rsp *ct_rsp;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- DEBUG2(printk("scsi(%ld): RFF_ID call unsupported on "
- "ISP2100/ISP2200.\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2046,
+ "RFF_ID call not supported on ISP2100/ISP2200.\n");
return (QLA_SUCCESS);
}
@@ -556,14 +553,14 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): RFF_ID issue IOCB failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2047,
+ "RFF_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG2(printk("scsi(%ld): RFF_ID exiting normally.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2048,
+ "RFF_ID exiting normally.\n");
}
return (rval);
@@ -609,14 +606,14 @@ qla2x00_rnn_id(scsi_qla_host_t *vha)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): RNN_ID issue IOCB failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x204d,
+ "RNN_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x204e,
+ "RNN_ID exiting normally.\n");
}
return (rval);
@@ -647,8 +644,8 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
struct ct_sns_rsp *ct_rsp;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- DEBUG2(printk("scsi(%ld): RSNN_ID call unsupported on "
- "ISP2100/ISP2200.\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2050,
+ "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
return (QLA_SUCCESS);
}
@@ -682,14 +679,14 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): RSNN_NN issue IOCB failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2051,
+ "RSNN_NN issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG2(printk("scsi(%ld): RSNN_NN exiting normally.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2052,
+ "RSNN_NN exiting normally.\n");
}
return (rval);
@@ -757,13 +754,14 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): GA_NXT Send SNS failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x205f,
+ "GA_NXT Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.gan_data[8] != 0x80 ||
sns_cmd->p.gan_data[9] != 0x02) {
- DEBUG2_3(printk("scsi(%ld): GA_NXT failed, rejected request, "
- "ga_nxt_rsp:\n", vha->host_no));
- DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gan_data, 16));
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207d,
+ "GA_NXT failed, rejected request ga_nxt_rsp:\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
+ sns_cmd->p.gan_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
/* Populate fc_port_t entry. */
@@ -778,11 +776,10 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
fcport->d_id.b.domain = 0xf0;
- DEBUG2_3(printk("scsi(%ld): GA_NXT entry - "
- "nn %02x%02x%02x%02x%02x%02x%02x%02x "
+ ql_dbg(ql_dbg_disc, vha, 0x2061,
+ "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
"pn %02x%02x%02x%02x%02x%02x%02x%02x "
- "portid=%02x%02x%02x.\n",
- vha->host_no,
+ "port_id=%02x%02x%02x.\n",
fcport->node_name[0], fcport->node_name[1],
fcport->node_name[2], fcport->node_name[3],
fcport->node_name[4], fcport->node_name[5],
@@ -792,7 +789,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->port_name[4], fcport->port_name[5],
fcport->port_name[6], fcport->port_name[7],
fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa));
+ fcport->d_id.b.al_pa);
}
return (rval);
@@ -831,13 +828,14 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): GID_PT Send SNS failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x206d,
+ "GID_PT Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.gid_data[8] != 0x80 ||
sns_cmd->p.gid_data[9] != 0x02) {
- DEBUG2_3(printk("scsi(%ld): GID_PT failed, rejected request, "
- "gid_rsp:\n", vha->host_no));
- DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gid_data, 16));
+ ql_dbg(ql_dbg_disc, vha, 0x202f,
+ "GID_PT failed, rejected request, gid_rsp:\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
+ sns_cmd->p.gid_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
/* Set port IDs in switch info list. */
@@ -900,13 +898,14 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): GPN_ID Send SNS failed "
- "(%d).\n", vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2032,
+ "GPN_ID Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.gpn_data[8] != 0x80 ||
sns_cmd->p.gpn_data[9] != 0x02) {
- DEBUG2_3(printk("scsi(%ld): GPN_ID failed, rejected "
- "request, gpn_rsp:\n", vha->host_no));
- DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gpn_data, 16));
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
+ "GPN_ID failed, rejected request, gpn_rsp:\n");
+ ql_dump_buffer(ql_dbg_disc, vha, 0x207f,
+ sns_cmd->p.gpn_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
/* Save portname */
@@ -955,24 +954,24 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): GNN_ID Send SNS failed "
- "(%d).\n", vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x203f,
+ "GNN_ID Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.gnn_data[8] != 0x80 ||
sns_cmd->p.gnn_data[9] != 0x02) {
- DEBUG2_3(printk("scsi(%ld): GNN_ID failed, rejected "
- "request, gnn_rsp:\n", vha->host_no));
- DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gnn_data, 16));
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
+ "GNN_ID failed, rejected request, gnn_rsp:\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
+ sns_cmd->p.gnn_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
/* Save nodename */
memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
WWN_SIZE);
- DEBUG2_3(printk("scsi(%ld): GID_PT entry - "
- "nn %02x%02x%02x%02x%02x%02x%02x%02x "
+ ql_dbg(ql_dbg_disc, vha, 0x206e,
+ "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
"pn %02x%02x%02x%02x%02x%02x%02x%02x "
- "portid=%02x%02x%02x.\n",
- vha->host_no,
+ "port_id=%02x%02x%02x.\n",
list[i].node_name[0], list[i].node_name[1],
list[i].node_name[2], list[i].node_name[3],
list[i].node_name[4], list[i].node_name[5],
@@ -982,7 +981,7 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
list[i].port_name[4], list[i].port_name[5],
list[i].port_name[6], list[i].port_name[7],
list[i].d_id.b.domain, list[i].d_id.b.area,
- list[i].d_id.b.al_pa));
+ list[i].d_id.b.al_pa);
}
/* Last device exit. */
@@ -1025,17 +1024,18 @@ qla2x00_sns_rft_id(scsi_qla_host_t *vha)
sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): RFT_ID Send SNS failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2060,
+ "RFT_ID Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.rft_data[8] != 0x80 ||
sns_cmd->p.rft_data[9] != 0x02) {
- DEBUG2_3(printk("scsi(%ld): RFT_ID failed, rejected request, "
- "rft_rsp:\n", vha->host_no));
- DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rft_data, 16));
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
+ "RFT_ID failed, rejected request rft_rsp:\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
+ sns_cmd->p.rft_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2073,
+ "RFT_ID exiting normally.\n");
}
return (rval);
@@ -1081,17 +1081,18 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): RNN_ID Send SNS failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x204a,
+ "RNN_ID Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.rnn_data[8] != 0x80 ||
sns_cmd->p.rnn_data[9] != 0x02) {
- DEBUG2_3(printk("scsi(%ld): RNN_ID failed, rejected request, "
- "rnn_rsp:\n", vha->host_no));
- DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rnn_data, 16));
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
+ "RNN_ID failed, rejected request, rnn_rsp:\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
+ sns_cmd->p.rnn_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x204c,
+ "RNN_ID exiting normally.\n");
}
return (rval);
@@ -1116,10 +1117,10 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
mb, BIT_1|BIT_0);
if (mb[0] != MBS_COMMAND_COMPLETE) {
- DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
- "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n",
- __func__, vha->host_no, vha->mgmt_svr_loop_id, mb[0], mb[1],
- mb[2], mb[6], mb[7]));
+ ql_dbg(ql_dbg_disc, vha, 0x2024,
+ "Failed management_server login: loopid=%x mb[0]=%x "
+ "mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
+ vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6], mb[7]);
ret = QLA_FUNCTION_FAILED;
} else
vha->flags.management_server_logged_in = 1;
@@ -1292,11 +1293,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
size += 4 + WWN_SIZE;
- DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n",
- __func__, vha->host_no,
- eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2],
- eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5],
- eiter->a.node_name[6], eiter->a.node_name[7]));
+ ql_dbg(ql_dbg_disc, vha, 0x2025,
+ "NodeName = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
+ eiter->a.node_name[0], eiter->a.node_name[1],
+ eiter->a.node_name[2], eiter->a.node_name[3],
+ eiter->a.node_name[4], eiter->a.node_name[5],
+ eiter->a.node_name[6], eiter->a.node_name[7]);
/* Manufacturer. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1307,8 +1309,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, vha->host_no,
- eiter->a.manufacturer));
+ ql_dbg(ql_dbg_disc, vha, 0x2026,
+ "Manufacturer = %s.\n", eiter->a.manufacturer);
/* Serial number. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1320,8 +1322,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, vha->host_no,
- eiter->a.serial_num));
+ ql_dbg(ql_dbg_disc, vha, 0x2027,
+ "Serial no. = %s.\n", eiter->a.serial_num);
/* Model name. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1332,8 +1334,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, vha->host_no,
- eiter->a.model));
+ ql_dbg(ql_dbg_disc, vha, 0x2028,
+ "Model Name = %s.\n", eiter->a.model);
/* Model description. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1345,8 +1347,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, vha->host_no,
- eiter->a.model_desc));
+ ql_dbg(ql_dbg_disc, vha, 0x2029,
+ "Model Desc = %s.\n", eiter->a.model_desc);
/* Hardware version. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1357,8 +1359,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, vha->host_no,
- eiter->a.hw_version));
+ ql_dbg(ql_dbg_disc, vha, 0x202a,
+ "Hardware ver = %s.\n", eiter->a.hw_version);
/* Driver version. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1369,8 +1371,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, vha->host_no,
- eiter->a.driver_version));
+ ql_dbg(ql_dbg_disc, vha, 0x202b,
+ "Driver ver = %s.\n", eiter->a.driver_version);
/* Option ROM version. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1381,8 +1383,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, vha->host_no,
- eiter->a.orom_version));
+ ql_dbg(ql_dbg_disc, vha , 0x202c,
+ "Optrom vers = %s.\n", eiter->a.orom_version);
/* Firmware version */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1393,44 +1395,46 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, vha->host_no,
- eiter->a.fw_version));
+ ql_dbg(ql_dbg_disc, vha, 0x202d,
+ "Firmware vers = %s.\n", eiter->a.fw_version);
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
- DEBUG13(printk("%s(%ld): RHBA identifier="
- "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
- vha->host_no, ct_req->req.rhba.hba_identifier[0],
+ ql_dbg(ql_dbg_disc, vha, 0x202e,
+ "RHBA identifier = "
+ "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n",
+ ct_req->req.rhba.hba_identifier[0],
ct_req->req.rhba.hba_identifier[1],
ct_req->req.rhba.hba_identifier[2],
ct_req->req.rhba.hba_identifier[3],
ct_req->req.rhba.hba_identifier[4],
ct_req->req.rhba.hba_identifier[5],
ct_req->req.rhba.hba_identifier[6],
- ct_req->req.rhba.hba_identifier[7], size));
- DEBUG13(qla2x00_dump_buffer(entries, size));
+ ct_req->req.rhba.hba_identifier[7], size);
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
+ entries, size);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2030,
+ "RHBA issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
ct_rsp->header.explanation_code ==
CT_EXPL_ALREADY_REGISTERED) {
- DEBUG2_13(printk("%s(%ld): HBA already registered.\n",
- __func__, vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2034,
+ "HBA already registered.\n");
rval = QLA_ALREADY_REGISTERED;
}
} else {
- DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2035,
+ "RHBA exiting normally.\n");
}
return rval;
@@ -1464,26 +1468,26 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
/* Prepare FDMI command arguments -- portname. */
memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
- DEBUG13(printk("%s(%ld): DHBA portname="
- "%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, vha->host_no,
+ ql_dbg(ql_dbg_disc, vha, 0x2036,
+ "DHBA portname = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
- ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]));
+ ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2037,
+ "DHBA issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2038,
+ "DHBA exiting normally.\n");
}
return rval;
@@ -1534,9 +1538,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter->a.fc4_types[2] = 0x01;
size += 4 + 32;
- DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__,
- vha->host_no, eiter->a.fc4_types[2],
- eiter->a.fc4_types[1]));
+ ql_dbg(ql_dbg_disc, vha, 0x2039,
+ "FC4_TYPES=%02x %02x.\n",
+ eiter->a.fc4_types[2],
+ eiter->a.fc4_types[1]);
/* Supported speed. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1561,8 +1566,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
FDMI_PORT_SPEED_1GB);
size += 4 + 4;
- DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, vha->host_no,
- eiter->a.sup_speed));
+ ql_dbg(ql_dbg_disc, vha, 0x203a,
+ "Supported_Speed=%x.\n", eiter->a.sup_speed);
/* Current speed. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1596,8 +1601,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
}
size += 4 + 4;
- DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, vha->host_no,
- eiter->a.cur_speed));
+ ql_dbg(ql_dbg_disc, vha, 0x203b,
+ "Current_Speed=%x.\n", eiter->a.cur_speed);
/* Max frame size. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1609,8 +1614,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
size += 4 + 4;
- DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, vha->host_no,
- eiter->a.max_frame_size));
+ ql_dbg(ql_dbg_disc, vha, 0x203c,
+ "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
/* OS device name. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1621,8 +1626,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, vha->host_no,
- eiter->a.os_dev_name));
+ ql_dbg(ql_dbg_disc, vha, 0x204b,
+ "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
/* Hostname. */
if (strlen(fc_host_system_hostname(vha->host))) {
@@ -1637,35 +1642,36 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): HOSTNAME=%s.\n", __func__,
- vha->host_no, eiter->a.host_name));
+ ql_dbg(ql_dbg_disc, vha, 0x203d,
+ "HostName=%s.\n", eiter->a.host_name);
}
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
- DEBUG13(printk("%s(%ld): RPA portname="
- "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
- vha->host_no, ct_req->req.rpa.port_name[0],
- ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2],
- ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4],
- ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6],
- ct_req->req.rpa.port_name[7], size));
- DEBUG13(qla2x00_dump_buffer(entries, size));
+ ql_dbg(ql_dbg_disc, vha, 0x203e,
+ "RPA portname= %02x%02x%02x%02x%02X%02x%02x%02x size=%d.\n",
+ ct_req->req.rpa.port_name[0], ct_req->req.rpa.port_name[1],
+ ct_req->req.rpa.port_name[2], ct_req->req.rpa.port_name[3],
+ ct_req->req.rpa.port_name[4], ct_req->req.rpa.port_name[5],
+ ct_req->req.rpa.port_name[6], ct_req->req.rpa.port_name[7],
+ size);
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
+ entries, size);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2040,
+ "RPA issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG2(printk("scsi(%ld): RPA exiting normally.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2041,
+ "RPA exiting nornally.\n");
}
return rval;
@@ -1749,8 +1755,8 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): GFPN_ID issue IOCB "
- "failed (%d).\n", vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2023,
+ "GFPN_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GFPN_ID") != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
@@ -1860,8 +1866,8 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): GPSC issue IOCB "
- "failed (%d).\n", vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2059,
+ "GPSC issue IOCB failed (%d).\n", rval);
} else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GPSC")) != QLA_SUCCESS) {
/* FM command unsupported? */
@@ -1870,9 +1876,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
CT_REASON_INVALID_COMMAND_CODE ||
ct_rsp->header.reason_code ==
CT_REASON_COMMAND_UNSUPPORTED)) {
- DEBUG2(printk("scsi(%ld): GPSC command "
- "unsupported, disabling query...\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x205a,
+ "GPSC command unsupported, disabling "
+ "query.\n");
ha->flags.gpsc_supported = 0;
rval = QLA_FUNCTION_FAILED;
break;
@@ -1898,9 +1904,10 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
break;
}
- DEBUG2_3(printk("scsi(%ld): GPSC ext entry - "
- "fpn %02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
- "speed=%04x.\n", vha->host_no,
+ ql_dbg(ql_dbg_disc, vha, 0x205b,
+ "GPSC ext entry - fpn "
+ "%02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
+ "speed=%04x.\n",
list[i].fabric_port_name[0],
list[i].fabric_port_name[1],
list[i].fabric_port_name[2],
@@ -1910,7 +1917,7 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
list[i].fabric_port_name[6],
list[i].fabric_port_name[7],
be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
- be16_to_cpu(ct_rsp->rsp.gpsc.speed)));
+ be16_to_cpu(ct_rsp->rsp.gpsc.speed));
}
/* Last device exit. */
@@ -1968,14 +1975,12 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
- DEBUG2_3(printk(KERN_INFO
- "scsi(%ld): GFF_ID issue IOCB failed "
- "(%d).\n", vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x205c,
+ "GFF_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GFF_ID") != QLA_SUCCESS) {
- DEBUG2_3(printk(KERN_INFO
- "scsi(%ld): GFF_ID IOCB status had a "
- "failure status code\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x205d,
+ "GFF_ID IOCB status had a failure status code.\n");
} else {
fcp_scsi_features =
ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 920b76b..37da04d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -153,11 +153,10 @@ qla2x00_async_iocb_timeout(srb_t *sp)
fc_port_t *fcport = sp->fcport;
struct srb_ctx *ctx = sp->ctx;
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld:%x): Async-%s timeout - portid=%02x%02x%02x.\n",
- fcport->vha->host_no, sp->handle,
- ctx->name, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa));
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
+ "Async-%s timeout - portid=%02x%02x%02x.\n",
+ ctx->name, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
fcport->flags &= ~FCF_ASYNC_SENT;
if (ctx->type == SRB_LOGIN_CMD) {
@@ -211,11 +210,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
if (rval != QLA_SUCCESS)
goto done_free_sp;
- DEBUG2(printk(KERN_DEBUG
- "scsi(%ld:%x): Async-login - loop-id=%x portid=%02x%02x%02x "
- "retries=%d.\n", fcport->vha->host_no, sp->handle, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
- fcport->login_retry));
+ ql_dbg(ql_dbg_disc, vha, 0x2072,
+ "Async-login - loopid=%x portid=%02x%02x%02x retries=%d.\n",
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa, fcport->login_retry);
return rval;
done_free_sp:
@@ -259,10 +257,10 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
if (rval != QLA_SUCCESS)
goto done_free_sp;
- DEBUG2(printk(KERN_DEBUG
- "scsi(%ld:%x): Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
- fcport->vha->host_no, sp->handle, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
+ ql_dbg(ql_dbg_disc, vha, 0x2070,
+ "Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
return rval;
done_free_sp:
@@ -309,11 +307,10 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
if (rval != QLA_SUCCESS)
goto done_free_sp;
- DEBUG2(printk(KERN_DEBUG
- "scsi(%ld:%x): Async-adisc - loop-id=%x portid=%02x%02x%02x.\n",
- fcport->vha->host_no, sp->handle, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
-
+ ql_dbg(ql_dbg_disc, vha, 0x206f,
+ "Async-adisc - loopid=%x portid=%02x%02x%02x.\n",
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
return rval;
done_free_sp:
@@ -362,11 +359,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
if (rval != QLA_SUCCESS)
goto done_free_sp;
- DEBUG2(printk(KERN_DEBUG
- "scsi(%ld:%x): Async-tmf - loop-id=%x portid=%02x%02x%02x.\n",
- fcport->vha->host_no, sp->handle, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
-
+ ql_dbg(ql_dbg_taskm, vha, 0x802f,
+ "Async-tmf loop-id=%x portid=%02x%02x%02x.\n",
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
return rval;
done_free_sp:
@@ -471,9 +467,8 @@ qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
- DEBUG2_3_11(printk(KERN_WARNING
- "%s(%ld): TM IOCB failed (%x).\n",
- __func__, vha->host_no, rval));
+ ql_dbg(ql_dbg_taskm, vha, 0x8030,
+ "TM IOCB failed (%x).\n", rval);
}
return;
@@ -519,11 +514,12 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
set_bit(0, ha->req_qid_map);
set_bit(0, ha->rsp_qid_map);
- qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
+ ql_log(ql_log_info, vha, 0x0040,
+ "Configuring PCI space...\n");
rval = ha->isp_ops->pci_config(vha);
if (rval) {
- DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
- vha->host_no));
+ ql_log(ql_log_warn, vha, 0x0044,
+ "Unable to configure PCI space.\n");
return (rval);
}
@@ -531,20 +527,21 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
rval = qla2xxx_get_flash_info(vha);
if (rval) {
- DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
- vha->host_no));
+ ql_log(ql_log_fatal, vha, 0x004f,
+ "Unable to validate FLASH data.\n");
return (rval);
}
ha->isp_ops->get_flash_version(vha, req->ring);
-
- qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
+ ql_log(ql_log_info, vha, 0x0061,
+ "Configure NVRAM parameters...\n");
ha->isp_ops->nvram_config(vha);
if (ha->flags.disable_serdes) {
/* Mask HBA via NVRAM settings? */
- qla_printk(KERN_INFO, ha, "Masking HBA WWPN "
+ ql_log(ql_log_info, vha, 0x0077,
+ "Masking HBA WWPN "
"%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
vha->port_name[0], vha->port_name[1],
vha->port_name[2], vha->port_name[3],
@@ -553,7 +550,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
return QLA_FUNCTION_FAILED;
}
- qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
+ ql_log(ql_log_info, vha, 0x0078,
+ "Verifying loaded RISC code...\n");
if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
rval = ha->isp_ops->chip_diag(vha);
@@ -567,7 +565,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
if (IS_QLA84XX(ha)) {
ha->cs84xx = qla84xx_get_chip(vha);
if (!ha->cs84xx) {
- qla_printk(KERN_ERR, ha,
+ ql_log(ql_log_warn, vha, 0x00d0,
"Unable to configure ISP84XX.\n");
return QLA_FUNCTION_FAILED;
}
@@ -579,8 +577,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
/* Issue verify 84xx FW IOCB to complete 84xx initialization */
rval = qla84xx_init_chip(vha);
if (rval != QLA_SUCCESS) {
- qla_printk(KERN_ERR, ha,
- "Unable to initialize ISP84XX.\n");
+ ql_log(ql_log_warn, vha, 0x00d4,
+ "Unable to initialize ISP84XX.\n");
qla84xx_put_chip(vha);
}
}
@@ -797,9 +795,7 @@ qla2x00_isp_firmware(scsi_qla_host_t *vha)
rval = QLA_FUNCTION_FAILED;
if (ha->flags.disable_risc_code_load) {
- DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n",
- vha->host_no));
- qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n");
+ ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
/* Verify checksum of loaded RISC code. */
rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
@@ -810,10 +806,9 @@ qla2x00_isp_firmware(scsi_qla_host_t *vha)
}
}
- if (rval) {
- DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n",
- vha->host_no));
- }
+ if (rval)
+ ql_dbg(ql_dbg_init, vha, 0x007a,
+ "**** Load RISC code ****.\n");
return (rval);
}
@@ -1105,8 +1100,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
/* Assume a failed state */
rval = QLA_FUNCTION_FAILED;
- DEBUG3(printk("scsi(%ld): Testing device at %lx.\n",
- vha->host_no, (u_long)&reg->flash_address));
+ ql_dbg(ql_dbg_init, vha, 0x007b,
+ "Testing device at %lx.\n", (u_long)&reg->flash_address);
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1128,8 +1123,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
if (!cnt)
goto chip_diag_failed;
- DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n",
- vha->host_no));
+ ql_dbg(ql_dbg_init, vha, 0x007c,
+ "Reset register cleared by chip reset.\n");
/* Reset RISC processor. */
WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
@@ -1150,7 +1145,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
goto chip_diag_failed;
/* Check product ID of chip */
- DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no));
+ ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product Id of chip.\n");
mb[1] = RD_MAILBOX_REG(ha, reg, 1);
mb[2] = RD_MAILBOX_REG(ha, reg, 2);
@@ -1158,8 +1153,9 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
mb[3] != PROD_ID_3) {
- qla_printk(KERN_WARNING, ha,
- "Wrong product ID = 0x%x,0x%x,0x%x\n", mb[1], mb[2], mb[3]);
+ ql_log(ql_log_warn, vha, 0x0062,
+ "Wrong product ID = 0x%x,0x%x,0x%x.\n",
+ mb[1], mb[2], mb[3]);
goto chip_diag_failed;
}
@@ -1178,8 +1174,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
if (IS_QLA2200(ha) &&
RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
/* Limit firmware transfer size with a 2200A */
- DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
ha->device_type |= DT_ISP2200A;
ha->fw_transfer_size = 128;
@@ -1188,24 +1183,20 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
/* Wrap Incoming Mailboxes Test. */
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no));
+ ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
rval = qla2x00_mbx_reg_test(vha);
- if (rval) {
- DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
- vha->host_no));
- qla_printk(KERN_WARNING, ha,
- "Failed mailbox send register test\n");
- }
- else {
+ if (rval)
+ ql_log(ql_log_warn, vha, 0x0080,
+ "Failed mailbox send register test.\n");
+ else
/* Flag a successful rval */
rval = QLA_SUCCESS;
- }
spin_lock_irqsave(&ha->hardware_lock, flags);
chip_diag_failed:
if (rval)
- DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED "
- "****\n", vha->host_no));
+ ql_log(ql_log_info, vha, 0x0081,
+ "Chip diagnostics **** FAILED ****.\n");
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1232,10 +1223,8 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
rval = qla2x00_mbx_reg_test(vha);
if (rval) {
- DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
- vha->host_no));
- qla_printk(KERN_WARNING, ha,
- "Failed mailbox send register test\n");
+ ql_log(ql_log_warn, vha, 0x0082,
+ "Failed mailbox send register test.\n");
} else {
/* Flag a successful rval */
rval = QLA_SUCCESS;
@@ -1257,8 +1246,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
struct rsp_que *rsp = ha->rsp_q_map[0];
if (ha->fw_dump) {
- qla_printk(KERN_WARNING, ha,
- "Firmware dump previously allocated.\n");
+ ql_dbg(ql_dbg_init, vha, 0x00bd,
+ "Firmware dump already allocated.\n");
return;
}
@@ -1288,8 +1277,9 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
GFP_KERNEL);
if (!tc) {
- qla_printk(KERN_WARNING, ha, "Unable to allocate "
- "(%d KB) for FCE.\n", FCE_SIZE / 1024);
+ ql_log(ql_log_warn, vha, 0x00be,
+ "Unable to allocate (%d KB) for FCE.\n",
+ FCE_SIZE / 1024);
goto try_eft;
}
@@ -1297,16 +1287,15 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
ha->fce_mb, &ha->fce_bufs);
if (rval) {
- qla_printk(KERN_WARNING, ha, "Unable to initialize "
- "FCE (%d).\n", rval);
+ ql_log(ql_log_warn, vha, 0x00bf,
+ "Unable to initialize FCE (%d).\n", rval);
dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
tc_dma);
ha->flags.fce_enabled = 0;
goto try_eft;
}
-
- qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n",
- FCE_SIZE / 1024);
+ ql_log(ql_log_info, vha, 0x00c0,
+ "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
ha->flags.fce_enabled = 1;
@@ -1317,23 +1306,23 @@ try_eft:
tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
GFP_KERNEL);
if (!tc) {
- qla_printk(KERN_WARNING, ha, "Unable to allocate "
- "(%d KB) for EFT.\n", EFT_SIZE / 1024);
+ ql_log(ql_log_warn, vha, 0x00c1,
+ "Unable to allocate (%d KB) for EFT.\n",
+ EFT_SIZE / 1024);
goto cont_alloc;
}
memset(tc, 0, EFT_SIZE);
rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
if (rval) {
- qla_printk(KERN_WARNING, ha, "Unable to initialize "
- "EFT (%d).\n", rval);
+ ql_log(ql_log_warn, vha, 0x00c2,
+ "Unable to initialize EFT (%d).\n", rval);
dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
tc_dma);
goto cont_alloc;
}
-
- qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n",
- EFT_SIZE / 1024);
+ ql_log(ql_log_info, vha, 0x00c3,
+ "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
eft_size = EFT_SIZE;
ha->eft_dma = tc_dma;
@@ -1350,8 +1339,9 @@ cont_alloc:
ha->fw_dump = vmalloc(dump_size);
if (!ha->fw_dump) {
- qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for "
- "firmware dump!!!\n", dump_size / 1024);
+ ql_log(ql_log_warn, vha, 0x00c4,
+ "Unable to allocate (%d KB) for firmware dump.\n",
+ dump_size / 1024);
if (ha->fce) {
dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
@@ -1368,8 +1358,8 @@ cont_alloc:
}
return;
}
- qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n",
- dump_size / 1024);
+ ql_log(ql_log_info, vha, 0x00c5,
+ "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
ha->fw_dump_len = dump_size;
ha->fw_dump->signature[0] = 'Q';
@@ -1398,23 +1388,21 @@ qla81xx_mpi_sync(scsi_qla_host_t *vha)
int rval;
uint16_t dc;
uint32_t dw;
- struct qla_hw_data *ha = vha->hw;
if (!IS_QLA81XX(vha->hw))
return QLA_SUCCESS;
rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
if (rval != QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "Sync-MPI: Unable to acquire semaphore.\n"));
+ ql_log(ql_log_warn, vha, 0x0105,
+ "Unable to acquire semaphore.\n");
goto done;
}
pci_read_config_word(vha->hw->pdev, 0x54, &dc);
rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
if (rval != QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "Sync-MPI: Unable to read sync.\n"));
+ ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
goto done_release;
}
@@ -1426,15 +1414,14 @@ qla81xx_mpi_sync(scsi_qla_host_t *vha)
dw |= dc;
rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
if (rval != QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "Sync-MPI: Unable to gain sync.\n"));
+ ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
}
done_release:
rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
if (rval != QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "Sync-MPI: Unable to release semaphore.\n"));
+ ql_log(ql_log_warn, vha, 0x006d,
+ "Unable to release semaphore.\n");
}
done:
@@ -1479,14 +1466,14 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
/* Load firmware sequences */
rval = ha->isp_ops->load_risc(vha, &srisc_address);
if (rval == QLA_SUCCESS) {
- DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC "
- "code.\n", vha->host_no));
+ ql_dbg(ql_dbg_init, vha, 0x00c9,
+ "Verifying Checksum of loaded RISC code.\n");
rval = qla2x00_verify_checksum(vha, srisc_address);
if (rval == QLA_SUCCESS) {
/* Start firmware execution. */
- DEBUG(printk("scsi(%ld): Checksum OK, start "
- "firmware.\n", vha->host_no));
+ ql_dbg(ql_dbg_init, vha, 0x00ca,
+ "Starting firmware.\n");
rval = qla2x00_execute_fw(vha, srisc_address);
/* Retrieve firmware information. */
@@ -1522,9 +1509,9 @@ enable_82xx_npiv:
}
}
} else {
- DEBUG2(printk(KERN_INFO
- "scsi(%ld): ISP Firmware failed checksum.\n",
- vha->host_no));
+ ql_log(ql_log_fatal, vha, 0x00cd,
+ "ISP Firmware failed checksum.\n");
+ goto failed;
}
}
@@ -1549,7 +1536,7 @@ enable_82xx_npiv:
ha->flags.fac_supported = 1;
ha->fdt_block_size = size << 2;
} else {
- qla_printk(KERN_ERR, ha,
+ ql_log(ql_log_warn, vha, 0x00ce,
"Unsupported FAC firmware (%d.%02d.%02d).\n",
ha->fw_major_version, ha->fw_minor_version,
ha->fw_subminor_version);
@@ -1557,8 +1544,8 @@ enable_82xx_npiv:
}
failed:
if (rval) {
- DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
- vha->host_no));
+ ql_log(ql_log_fatal, vha, 0x00cf,
+ "Setup chip ****FAILED****.\n");
}
return (rval);
@@ -1608,10 +1595,11 @@ qla2x00_update_fw_options(scsi_qla_host_t *vha)
return;
/* Serial Link options. */
- DEBUG3(printk("scsi(%ld): Serial link options:\n",
- vha->host_no));
- DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options,
- sizeof(ha->fw_seriallink_options)));
+ ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
+ "Serial link options.\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
+ (uint8_t *)&ha->fw_seriallink_options,
+ sizeof(ha->fw_seriallink_options));
ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
if (ha->fw_seriallink_options[3] & BIT_2) {
@@ -1688,7 +1676,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
le16_to_cpu(ha->fw_seriallink_options24[2]),
le16_to_cpu(ha->fw_seriallink_options24[3]));
if (rval != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x0104,
"Unable to update Serial Link options (%x).\n", rval);
}
}
@@ -1746,8 +1734,9 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
icb->rid = __constant_cpu_to_le16(rid);
if (ha->flags.msix_enabled) {
msix = &ha->msix_entries[1];
- DEBUG2_17(printk(KERN_INFO
- "Registering vector 0x%x for base que\n", msix->entry));
+ ql_dbg(ql_dbg_init, vha, 0x00fd,
+ "Registering vector 0x%x for base que.\n",
+ msix->entry);
icb->msix = cpu_to_le16(msix->entry);
}
/* Use alternate PCI bus number */
@@ -1764,8 +1753,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
icb->firmware_options_2 &=
__constant_cpu_to_le32(~BIT_22);
ha->flags.disable_msix_handshake = 1;
- qla_printk(KERN_INFO, ha,
- "MSIX Handshake Disable Mode turned on\n");
+ ql_dbg(ql_dbg_init, vha, 0x00fe,
+ "MSIX Handshake Disable Mode turned on.\n");
} else {
icb->firmware_options_2 |=
__constant_cpu_to_le32(BIT_22);
@@ -1850,7 +1839,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
/* Update any ISP specific firmware options before initialization. */
ha->isp_ops->update_fw_options(vha);
- DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no));
+ ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
if (ha->flags.npiv_supported) {
if (ha->operating_mode == LOOP)
@@ -1866,11 +1855,11 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
rval = qla2x00_init_firmware(vha, ha->init_cb_size);
if (rval) {
- DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n",
- vha->host_no));
+ ql_log(ql_log_fatal, vha, 0x00d2,
+ "Init Firmware **** FAILED ****.\n");
} else {
- DEBUG3(printk("scsi(%ld): Init firmware -- success.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_init, vha, 0x00d3,
+ "Init Firmware -- success.\n");
}
return (rval);
@@ -1913,10 +1902,8 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
/* Wait for ISP to finish LIP */
if (!vha->flags.init_done)
- qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n");
-
- DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n",
- vha->host_no));
+ ql_log(ql_log_info, vha, 0x801e,
+ "Waiting for LIP to complete.\n");
do {
rval = qla2x00_get_firmware_state(vha, state);
@@ -1925,30 +1912,35 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
vha->device_flags &= ~DFLG_NO_CABLE;
}
if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
- DEBUG16(printk("scsi(%ld): fw_state=%x "
- "84xx=%x.\n", vha->host_no, state[0],
- state[2]));
+ ql_dbg(ql_dbg_taskm, vha, 0x801f,
+ "fw_state=%x 84xx=%x.\n", state[0],
+ state[2]);
if ((state[2] & FSTATE_LOGGED_IN) &&
(state[2] & FSTATE_WAITING_FOR_VERIFY)) {
- DEBUG16(printk("scsi(%ld): Sending "
- "verify iocb.\n", vha->host_no));
+ ql_dbg(ql_dbg_taskm, vha, 0x8028,
+ "Sending verify iocb.\n");
cs84xx_time = jiffies;
rval = qla84xx_init_chip(vha);
- if (rval != QLA_SUCCESS)
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn,
+ vha, 0x8043,
+ "Init chip failed.\n");
break;
+ }
/* Add time taken to initialize. */
cs84xx_time = jiffies - cs84xx_time;
wtime += cs84xx_time;
mtime += cs84xx_time;
- DEBUG16(printk("scsi(%ld): Increasing "
- "wait time by %ld. New time %ld\n",
- vha->host_no, cs84xx_time, wtime));
+ ql_dbg(ql_dbg_taskm, vha, 0x8042,
+ "Increasing wait time by %ld. "
+ "New time %ld.\n", cs84xx_time,
+ wtime);
}
} else if (state[0] == FSTATE_READY) {
- DEBUG(printk("scsi(%ld): F/W Ready - OK \n",
- vha->host_no));
+ ql_dbg(ql_dbg_taskm, vha, 0x8037,
+ "F/W Ready - OK.\n");
qla2x00_get_retry_cnt(vha, &ha->retry_count,
&ha->login_timeout, &ha->r_a_tov);
@@ -1965,7 +1957,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
* other than Wait for Login.
*/
if (time_after_eq(jiffies, mtime)) {
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_info, vha, 0x8038,
"Cable is unplugged...\n");
vha->device_flags |= DFLG_NO_CABLE;
@@ -1985,17 +1977,17 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
/* Delay for a while */
msleep(500);
- DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
- vha->host_no, state[0], jiffies));
+ ql_dbg(ql_dbg_taskm, vha, 0x8039,
+ "fw_state=%x curr time=%lx.\n", state[0], jiffies);
} while (1);
- DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n",
- vha->host_no, state[0], state[1], state[2], state[3], state[4],
- jiffies));
+ ql_dbg(ql_dbg_taskm, vha, 0x803a,
+ "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0],
+ state[1], state[2], state[3], state[4], jiffies);
if (rval) {
- DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
- vha->host_no));
+ ql_log(ql_log_warn, vha, 0x803b,
+ "Firmware ready **** FAILED ****.\n");
}
return (rval);
@@ -2034,19 +2026,19 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
IS_QLA8XXX_TYPE(ha) ||
(rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
- DEBUG2(printk("%s(%ld) Loop is in a transition state\n",
- __func__, vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2008,
+ "Loop is in a transition state.\n");
} else {
- qla_printk(KERN_WARNING, ha,
- "ERROR -- Unable to get host loop ID.\n");
+ ql_log(ql_log_warn, vha, 0x2009,
+ "Unable to get host loop ID.\n");
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
return (rval);
}
if (topo == 4) {
- qla_printk(KERN_INFO, ha,
- "Cannot get topology - retrying.\n");
+ ql_log(ql_log_info, vha, 0x200a,
+ "Cannot get topology - retrying.\n");
return (QLA_FUNCTION_FAILED);
}
@@ -2059,31 +2051,27 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
switch (topo) {
case 0:
- DEBUG3(printk("scsi(%ld): HBA in NL topology.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
ha->current_topology = ISP_CFG_NL;
strcpy(connect_type, "(Loop)");
break;
case 1:
- DEBUG3(printk("scsi(%ld): HBA in FL topology.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
ha->switch_cap = sw_cap;
ha->current_topology = ISP_CFG_FL;
strcpy(connect_type, "(FL_Port)");
break;
case 2:
- DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
ha->operating_mode = P2P;
ha->current_topology = ISP_CFG_N;
strcpy(connect_type, "(N_Port-to-N_Port)");
break;
case 3:
- DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
ha->switch_cap = sw_cap;
ha->operating_mode = P2P;
ha->current_topology = ISP_CFG_F;
@@ -2091,9 +2079,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
break;
default:
- DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. "
- "Using NL.\n",
- vha->host_no, topo));
+ ql_dbg(ql_dbg_disc, vha, 0x200f,
+ "HBA in unknown topology %x, using NL.\n", topo);
ha->current_topology = ISP_CFG_NL;
strcpy(connect_type, "(Loop)");
break;
@@ -2106,14 +2093,16 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
vha->d_id.b.al_pa = al_pa;
if (!vha->flags.init_done)
- qla_printk(KERN_INFO, ha,
- "Topology - %s, Host Loop address 0x%x\n",
+ ql_log(ql_log_info, vha, 0x2010,
+ "Topology - %s, Host Loop address 0x%x.\n",
connect_type, vha->loop_id);
if (rval) {
- DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no));
+ ql_log(ql_log_warn, vha, 0x2011,
+ "%s FAILED\n", __func__);
} else {
- DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2012,
+ "%s success\n", __func__);
}
return(rval);
@@ -2227,18 +2216,22 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
chksum += *ptr++;
- DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
- DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
+ ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
+ "Contents of NVRAM.\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
+ (uint8_t *)nv, ha->nvram_size);
/* Bad NVRAM data, set defaults parameters. */
if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
/* Reset NVRAM data. */
- qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
- "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
- nv->nvram_version);
- qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
- "invalid -- WWPN) defaults.\n");
+ ql_log(ql_log_warn, vha, 0x0064,
+ "Inconisistent NVRAM "
+ "detected: checksum=0x%x id=%c version=0x%x.\n",
+ chksum, nv->id[0], nv->nvram_version);
+ ql_log(ql_log_warn, vha, 0x0065,
+ "Falling back to "
+ "functioning (yet invalid -- WWPN) defaults.\n");
/*
* Set default initialization control block.
@@ -2382,8 +2375,13 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
/*
* Set host adapter parameters.
*/
+
+ /*
+ * BIT_7 in the host-parameters section allows for modification to
+ * internal driver logging.
+ */
if (nv->host_p[0] & BIT_7)
- ql2xextended_error_logging = 1;
+ ql2xextended_error_logging = 0x7fffffff;
ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
/* Always load RISC code on non ISP2[12]00 chips. */
if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
@@ -2488,10 +2486,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
if (ha->zio_mode != QLA_ZIO_DISABLED) {
ha->zio_mode = QLA_ZIO_MODE_6;
- DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer "
- "delay (%d us).\n", vha->host_no, ha->zio_mode,
- ha->zio_timer * 100));
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_info, vha, 0x0068,
"ZIO mode %d enabled; timer delay (%d us).\n",
ha->zio_mode, ha->zio_timer * 100);
@@ -2502,8 +2497,8 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
}
if (rval) {
- DEBUG2_3(printk(KERN_WARNING
- "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
+ ql_log(ql_log_warn, vha, 0x0069,
+ "NVRAM configuration failed.\n");
}
return (rval);
}
@@ -2574,15 +2569,15 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
rval = qla2x00_configure_hba(vha);
if (rval != QLA_SUCCESS) {
- DEBUG(printk("scsi(%ld): Unable to configure HBA.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2013,
+ "Unable to configure HBA.\n");
return (rval);
}
}
save_flags = flags = vha->dpc_flags;
- DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n",
- vha->host_no, flags));
+ ql_dbg(ql_dbg_disc, vha, 0x2014,
+ "Configure loop -- dpc flags = 0x%lx.\n", flags);
/*
* If we have both an RSCN and PORT UPDATE pending then handle them
@@ -2619,15 +2614,21 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
}
if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
- if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2015,
+ "Loop resync needed, failing.\n");
rval = QLA_FUNCTION_FAILED;
+ }
else
rval = qla2x00_configure_local_loop(vha);
}
if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
- if (LOOP_TRANSITION(vha))
+ if (LOOP_TRANSITION(vha)) {
+ ql_dbg(ql_dbg_disc, vha, 0x201e,
+ "Needs RSCN update and loop transition.\n");
rval = QLA_FUNCTION_FAILED;
+ }
else
rval = qla2x00_configure_fabric(vha);
}
@@ -2638,16 +2639,17 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
rval = QLA_FUNCTION_FAILED;
} else {
atomic_set(&vha->loop_state, LOOP_READY);
-
- DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2069,
+ "LOOP READY.\n");
}
}
if (rval) {
- DEBUG2_3(printk("%s(%ld): *** FAILED ***\n",
- __func__, vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x206a,
+ "%s *** FAILED ***.\n", __func__);
} else {
- DEBUG3(printk("%s: exiting normally\n", __func__));
+ ql_dbg(ql_dbg_disc, vha, 0x206b,
+ "%s: exiting normally.\n", __func__);
}
/* Restore state if a resync event occurred during processing */
@@ -2695,8 +2697,10 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
new_fcport = NULL;
entries = MAX_FIBRE_DEVICES;
- DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no));
- DEBUG3(qla2x00_get_fcal_position_map(vha, NULL));
+ ql_dbg(ql_dbg_disc, vha, 0x2016,
+ "Getting FCAL position map.\n");
+ if (ql2xextended_error_logging & ql_dbg_disc)
+ qla2x00_get_fcal_position_map(vha, NULL);
/* Get list of logged in devices. */
memset(ha->gid_list, 0, GID_LIST_SIZE);
@@ -2705,14 +2709,17 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
if (rval != QLA_SUCCESS)
goto cleanup_allocation;
- DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n",
- vha->host_no, entries));
- DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list,
- entries * sizeof(struct gid_list_info)));
+ ql_dbg(ql_dbg_disc, vha, 0x2017,
+ "Entries in ID list (%d).\n", entries);
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
+ (uint8_t *)ha->gid_list,
+ entries * sizeof(struct gid_list_info));
/* Allocate temporary fcport for any new fcports discovered. */
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
+ ql_log(ql_log_warn, vha, 0x2018,
+ "Memory allocation failed for fcport.\n");
rval = QLA_MEMORY_ALLOC_FAILED;
goto cleanup_allocation;
}
@@ -2726,9 +2733,9 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
fcport->port_type != FCT_BROADCAST &&
(fcport->flags & FCF_FABRIC_DEVICE) == 0) {
- DEBUG(printk("scsi(%ld): Marking port lost, "
- "loop_id=0x%04x\n",
- vha->host_no, fcport->loop_id));
+ ql_dbg(ql_dbg_disc, vha, 0x2019,
+ "Marking port lost loop_id=0x%04x.\n",
+ fcport->loop_id);
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
}
@@ -2769,12 +2776,12 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
new_fcport->vp_idx = vha->vp_idx;
rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
if (rval2 != QLA_SUCCESS) {
- DEBUG2(printk("scsi(%ld): Failed to retrieve fcport "
- "information -- get_port_database=%x, "
- "loop_id=0x%04x\n",
- vha->host_no, rval2, new_fcport->loop_id));
- DEBUG2(printk("scsi(%ld): Scheduling resync...\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x201a,
+ "Failed to retrieve fcport information "
+ "-- get_port_database=%x, loop_id=0x%04x.\n",
+ rval2, new_fcport->loop_id);
+ ql_dbg(ql_dbg_disc, vha, 0x201b,
+ "Scheduling resync.\n");
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
continue;
}
@@ -2810,6 +2817,8 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
fcport = new_fcport;
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
+ ql_log(ql_log_warn, vha, 0x201c,
+ "Failed to allocate memory for fcport.\n");
rval = QLA_MEMORY_ALLOC_FAILED;
goto cleanup_allocation;
}
@@ -2828,8 +2837,8 @@ cleanup_allocation:
kfree(new_fcport);
if (rval != QLA_SUCCESS) {
- DEBUG2(printk("scsi(%ld): Configure local loop error exit: "
- "rval=%x\n", vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x201d,
+ "Configure local loop error exit: rval=%x.\n", rval);
}
return (rval);
@@ -2858,27 +2867,27 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
mb);
if (rval != QLA_SUCCESS) {
- DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA "
- "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n",
- vha->host_no, fcport->port_name[0], fcport->port_name[1],
+ ql_dbg(ql_dbg_disc, vha, 0x2004,
+ "Unable to adjust iIDMA "
+ "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x "
+ "%04x.\n", fcport->port_name[0], fcport->port_name[1],
fcport->port_name[2], fcport->port_name[3],
fcport->port_name[4], fcport->port_name[5],
fcport->port_name[6], fcport->port_name[7], rval,
- fcport->fp_speed, mb[0], mb[1]));
+ fcport->fp_speed, mb[0], mb[1]);
} else {
link_speed = link_speeds[LS_UNKNOWN];
if (fcport->fp_speed < 5)
link_speed = link_speeds[fcport->fp_speed];
else if (fcport->fp_speed == 0x13)
link_speed = link_speeds[5];
- DEBUG2(qla_printk(KERN_INFO, ha,
- "iIDMA adjusted to %s GB/s on "
- "%02x%02x%02x%02x%02x%02x%02x%02x.\n",
- link_speed, fcport->port_name[0],
- fcport->port_name[1], fcport->port_name[2],
- fcport->port_name[3], fcport->port_name[4],
- fcport->port_name[5], fcport->port_name[6],
- fcport->port_name[7]));
+ ql_dbg(ql_dbg_disc, vha, 0x2005,
+ "iIDMA adjusted to %s GB/s "
+ "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed,
+ fcport->port_name[0], fcport->port_name[1],
+ fcport->port_name[2], fcport->port_name[3],
+ fcport->port_name[4], fcport->port_name[5],
+ fcport->port_name[6], fcport->port_name[7]);
}
}
@@ -2887,7 +2896,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
{
struct fc_rport_identifiers rport_ids;
struct fc_rport *rport;
- struct qla_hw_data *ha = vha->hw;
unsigned long flags;
qla2x00_rport_del(fcport);
@@ -2899,8 +2907,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
if (!rport) {
- qla_printk(KERN_WARNING, ha,
- "Unable to allocate fc remote port!\n");
+ ql_log(ql_log_warn, vha, 0x2006,
+ "Unable to allocate fc remote port.\n");
return;
}
spin_lock_irqsave(fcport->vha->host->host_lock, flags);
@@ -2975,8 +2983,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
loop_id = SNS_FL_PORT;
rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
if (rval != QLA_SUCCESS) {
- DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL "
- "Port\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x201f,
+ "MBX_GET_PORT_NAME failed, No FL Port.\n");
vha->device_flags &= ~SWITCH_FOUND;
return (QLA_SUCCESS);
@@ -3003,32 +3011,32 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
0xfc, mb, BIT_1 | BIT_0);
if (mb[0] != MBS_COMMAND_COMPLETE) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
- "mb[2]=%x mb[6]=%x mb[7]=%x\n", loop_id,
- mb[0], mb[1], mb[2], mb[6], mb[7]));
+ ql_dbg(ql_dbg_disc, vha, 0x2042,
+ "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
+ "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1],
+ mb[2], mb[6], mb[7]);
return (QLA_SUCCESS);
}
if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
if (qla2x00_rft_id(vha)) {
/* EMPTY */
- DEBUG2(printk("scsi(%ld): Register FC-4 "
- "TYPE failed.\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2045,
+ "Register FC-4 TYPE failed.\n");
}
if (qla2x00_rff_id(vha)) {
/* EMPTY */
- DEBUG2(printk("scsi(%ld): Register FC-4 "
- "Features failed.\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2049,
+ "Register FC-4 Features failed.\n");
}
if (qla2x00_rnn_id(vha)) {
/* EMPTY */
- DEBUG2(printk("scsi(%ld): Register Node Name "
- "failed.\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Register Node Name failed.\n");
} else if (qla2x00_rsnn_nn(vha)) {
/* EMPTY */
- DEBUG2(printk("scsi(%ld): Register Symbolic "
- "Node Name failed.\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2053,
+ "Register Symobilic Node Name failed.\n");
}
}
@@ -3132,8 +3140,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
if (rval) {
- DEBUG2(printk("scsi(%ld): Configure fabric error exit: "
- "rval=%d\n", vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2068,
+ "Configure fabric error exit rval=%d.\n", rval);
}
return (rval);
@@ -3175,8 +3183,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
if (!swl) {
/*EMPTY*/
- DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
- "on GA_NXT\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2054,
+ "GID_PT allocations failed, fallback on GA_NXT.\n");
} else {
if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
kfree(swl);
@@ -3201,6 +3209,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
/* Allocate temporary fcport for any new fcports discovered. */
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
+ ql_log(ql_log_warn, vha, 0x205e,
+ "Failed to allocate memory for fcport.\n");
kfree(swl);
return (QLA_MEMORY_ALLOC_FAILED);
}
@@ -3247,9 +3257,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
/* Send GA_NXT to the switch */
rval = qla2x00_ga_nxt(vha, new_fcport);
if (rval != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
- "SNS scan failed -- assuming zero-entry "
- "result...\n");
+ ql_log(ql_log_warn, vha, 0x2064,
+ "SNS scan failed -- assuming "
+ "zero-entry result.\n");
list_for_each_entry_safe(fcport, fcptemp,
new_fcports, list) {
list_del(&fcport->list);
@@ -3265,9 +3275,11 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
wrap.b24 = new_fcport->d_id.b24;
first_dev = 0;
} else if (new_fcport->d_id.b24 == wrap.b24) {
- DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n",
- vha->host_no, new_fcport->d_id.b.domain,
- new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa));
+ ql_dbg(ql_dbg_disc, vha, 0x2065,
+ "Device wrap (%02x%02x%02x).\n",
+ new_fcport->d_id.b.domain,
+ new_fcport->d_id.b.area,
+ new_fcport->d_id.b.al_pa);
break;
}
@@ -3372,6 +3384,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
nxt_d_id.b24 = new_fcport->d_id.b24;
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
+ ql_log(ql_log_warn, vha, 0x2066,
+ "Memory allocation failed for fcport.\n");
kfree(swl);
return (QLA_MEMORY_ALLOC_FAILED);
}
@@ -3501,10 +3515,10 @@ qla2x00_device_resync(scsi_qla_host_t *vha)
d_id.b.area = MSB(LSW(rscn_entry));
d_id.b.al_pa = LSB(LSW(rscn_entry));
- DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = "
- "[%02x/%02x%02x%02x].\n",
- vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain,
- d_id.b.area, d_id.b.al_pa));
+ ql_dbg(ql_dbg_disc, vha, 0x2020,
+ "RSCN queue entry[%d] = [%02x/%02x%02x%02x].\n",
+ vha->rscn_out_ptr, format, d_id.b.domain, d_id.b.area,
+ d_id.b.al_pa);
vha->rscn_out_ptr++;
if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
@@ -3520,17 +3534,17 @@ qla2x00_device_resync(scsi_qla_host_t *vha)
if (rscn_entry != vha->rscn_queue[rscn_out_iter])
break;
- DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue "
- "entry found at [%d].\n", vha->host_no,
- rscn_out_iter));
+ ql_dbg(ql_dbg_disc, vha, 0x2021,
+ "Skipping duplicate RSCN queue entry found at "
+ "[%d].\n", rscn_out_iter);
vha->rscn_out_ptr = rscn_out_iter;
}
/* Queue overflow, set switch default case. */
if (vha->flags.rscn_queue_overflow) {
- DEBUG(printk("scsi(%ld): device_resync: rscn "
- "overflow.\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2022,
+ "device_resync: rscn overflow.\n");
format = 3;
vha->flags.rscn_queue_overflow = 0;
@@ -3659,10 +3673,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
tmp_loopid = 0;
for (;;) {
- DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x "
- "for port %02x%02x%02x.\n",
- vha->host_no, fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa));
+ ql_dbg(ql_dbg_disc, vha, 0x2000,
+ "Trying Fabric Login w/loop id 0x%04x for port "
+ "%02x%02x%02x.\n",
+ fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
/* Login fcport on switch. */
ha->isp_ops->fabric_login(vha, fcport->loop_id,
@@ -3680,10 +3695,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
tmp_loopid = fcport->loop_id;
fcport->loop_id = mb[1];
- DEBUG(printk("Fabric Login: port in use - next "
- "loop id=0x%04x, port Id=%02x%02x%02x.\n",
+ ql_dbg(ql_dbg_disc, vha, 0x2001,
+ "Fabric Login: port in use - next loop "
+ "id=0x%04x, port id= %02x%02x%02x.\n",
fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa));
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
} else if (mb[0] == MBS_COMMAND_COMPLETE) {
/*
@@ -3744,11 +3760,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
/*
* unrecoverable / not handled error
*/
- DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x "
- "loop_id=%x jiffies=%lx.\n",
- __func__, vha->host_no, mb[0],
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, fcport->loop_id, jiffies));
+ ql_dbg(ql_dbg_disc, vha, 0x2002,
+ "Failed=%x port_id=%02x%02x%02x loop_id=%x "
+ "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ fcport->loop_id, jiffies);
*next_loopid = fcport->loop_id;
ha->isp_ops->fabric_logout(vha, fcport->loop_id,
@@ -3822,15 +3838,12 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
req = vha->req;
rsp = req->rsp;
- atomic_set(&vha->loop_state, LOOP_UPDATE);
clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
if (vha->flags.online) {
if (!(rval = qla2x00_fw_ready(vha))) {
/* Wait at most MAX_TARGET RSCNs for a stable link. */
wait_time = 256;
do {
- atomic_set(&vha->loop_state, LOOP_UPDATE);
-
/* Issue a marker after FW becomes ready. */
qla2x00_marker(vha, req, rsp, 0, 0,
MK_SYNC_ALL);
@@ -3852,7 +3865,8 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
return (QLA_FUNCTION_FAILED);
if (rval)
- DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
+ ql_dbg(ql_dbg_disc, vha, 0x206c,
+ "%s *** FAILED ***.\n", __func__);
return (rval);
}
@@ -3929,8 +3943,8 @@ qla82xx_quiescent_state_cleanup(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp;
- qla_printk(KERN_INFO, ha,
- "Performing ISP error recovery - ha= %p.\n", ha);
+ ql_dbg(ql_dbg_p3p, vha, 0xb002,
+ "Performing ISP error recovery - ha=%p.\n", ha);
atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -3964,8 +3978,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
ha->qla_stats.total_isp_aborts++;
- qla_printk(KERN_INFO, ha,
- "Performing ISP error recovery - ha= %p.\n", ha);
+ ql_log(ql_log_info, vha, 0x00af,
+ "Performing ISP error recovery - ha=%p.\n", ha);
/* For ISP82XX, reset_chip is just disabling interrupts.
* Driver waits for the completion of the commands.
@@ -4016,6 +4030,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
/* Make sure for ISP 82XX IO DMA is complete */
if (IS_QLA82XX(ha)) {
qla82xx_chip_reset_cleanup(vha);
+ ql_log(ql_log_info, vha, 0x00b4,
+ "Done chip reset cleanup.\n");
/* Done waiting for pending commands.
* Reset the online flag.
@@ -4097,7 +4113,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
ha->fce_dma, ha->fce_bufs, ha->fce_mb,
&ha->fce_bufs);
if (rval) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x8033,
"Unable to reinitialize FCE "
"(%d).\n", rval);
ha->flags.fce_enabled = 0;
@@ -4109,7 +4125,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
rval = qla2x00_enable_eft_trace(vha,
ha->eft_dma, EFT_NUM_BUFFERS);
if (rval) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x8034,
"Unable to reinitialize EFT "
"(%d).\n", rval);
}
@@ -4118,9 +4134,9 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
vha->flags.online = 1;
if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
if (ha->isp_abort_cnt == 0) {
- qla_printk(KERN_WARNING, ha,
- "ISP error recovery failed - "
- "board disabled\n");
+ ql_log(ql_log_fatal, vha, 0x8035,
+ "ISP error recover failed - "
+ "board disabled.\n");
/*
* The next call disables the board
* completely.
@@ -4132,16 +4148,16 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
status = 0;
} else { /* schedule another ISP abort */
ha->isp_abort_cnt--;
- DEBUG(printk("qla%ld: ISP abort - "
- "retry remaining %d\n",
- vha->host_no, ha->isp_abort_cnt));
+ ql_dbg(ql_dbg_taskm, vha, 0x8020,
+ "ISP abort - retry remaining %d.\n",
+ ha->isp_abort_cnt);
status = 1;
}
} else {
ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
- DEBUG(printk("qla2x00(%ld): ISP error recovery "
- "- retrying (%d) more times\n",
- vha->host_no, ha->isp_abort_cnt));
+ ql_dbg(ql_dbg_taskm, vha, 0x8021,
+ "ISP error recovery - retrying (%d) "
+ "more times.\n", ha->isp_abort_cnt);
set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
status = 1;
}
@@ -4150,9 +4166,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
}
if (!status) {
- DEBUG(printk(KERN_INFO
- "qla2x00_abort_isp(%ld): succeeded.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
spin_lock_irqsave(&ha->vport_slock, flags);
list_for_each_entry(vp, &ha->vp_list, list) {
@@ -4169,8 +4183,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->vport_slock, flags);
} else {
- qla_printk(KERN_INFO, ha,
- "qla2x00_abort_isp: **** FAILED ****\n");
+ ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n");
}
return(status);
@@ -4211,8 +4224,8 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
status = qla2x00_fw_ready(vha);
if (!status) {
- DEBUG(printk("%s(): Start configure loop, "
- "status = %d\n", __func__, status));
+ ql_dbg(ql_dbg_taskm, vha, 0x8031,
+ "Start configure loop status = %d.\n", status);
/* Issue a marker after FW becomes ready. */
qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
@@ -4234,9 +4247,8 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
if ((vha->device_flags & DFLG_NO_CABLE))
status = 0;
- DEBUG(printk("%s(): Configure loop done, status = 0x%x\n",
- __func__,
- status));
+ ql_dbg(ql_dbg_taskm, vha, 0x8032,
+ "Configure loop done, status = 0x%x.\n", status);
}
return (status);
}
@@ -4256,13 +4268,13 @@ qla25xx_init_queues(struct qla_hw_data *ha)
rsp->options &= ~BIT_0;
ret = qla25xx_init_rsp_que(base_vha, rsp);
if (ret != QLA_SUCCESS)
- DEBUG2_17(printk(KERN_WARNING
- "%s Rsp que:%d init failed\n", __func__,
- rsp->id));
+ ql_dbg(ql_dbg_init, base_vha, 0x00ff,
+ "%s Rsp que: %d init failed.\n",
+ __func__, rsp->id);
else
- DEBUG2_17(printk(KERN_INFO
- "%s Rsp que:%d inited\n", __func__,
- rsp->id));
+ ql_dbg(ql_dbg_init, base_vha, 0x0100,
+ "%s Rsp que: %d inited.\n",
+ __func__, rsp->id);
}
}
for (i = 1; i < ha->max_req_queues; i++) {
@@ -4272,13 +4284,13 @@ qla25xx_init_queues(struct qla_hw_data *ha)
req->options &= ~BIT_0;
ret = qla25xx_init_req_que(base_vha, req);
if (ret != QLA_SUCCESS)
- DEBUG2_17(printk(KERN_WARNING
- "%s Req que:%d init failed\n", __func__,
- req->id));
+ ql_dbg(ql_dbg_init, base_vha, 0x0101,
+ "%s Req que: %d init failed.\n",
+ __func__, req->id);
else
- DEBUG2_17(printk(KERN_WARNING
- "%s Req que:%d inited\n", __func__,
- req->id));
+ ql_dbg(ql_dbg_init, base_vha, 0x0102,
+ "%s Req que: %d inited.\n",
+ __func__, req->id);
}
}
return ret;
@@ -4397,19 +4409,22 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
chksum += le32_to_cpu(*dptr++);
- DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
- DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
+ ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
+ "Contents of NVRAM\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
+ (uint8_t *)nv, ha->nvram_size);
/* Bad NVRAM data, set defaults parameters. */
if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
|| nv->id[3] != ' ' ||
nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
/* Reset NVRAM data. */
- qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
- "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
- le16_to_cpu(nv->nvram_version));
- qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
- "invalid -- WWPN) defaults.\n");
+ ql_log(ql_log_warn, vha, 0x006b,
+ "Inconisistent NVRAM detected: checksum=0x%x id=%c "
+ "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
+ ql_log(ql_log_warn, vha, 0x006c,
+ "Falling back to functioning (yet invalid -- WWPN) "
+ "defaults.\n");
/*
* Set default initialization control block.
@@ -4587,10 +4602,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
if (ha->zio_mode != QLA_ZIO_DISABLED) {
ha->zio_mode = QLA_ZIO_MODE_6;
- DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
- "(%d us).\n", vha->host_no, ha->zio_mode,
- ha->zio_timer * 100));
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_info, vha, 0x006f,
"ZIO mode %d enabled; timer delay (%d us).\n",
ha->zio_mode, ha->zio_timer * 100);
@@ -4601,8 +4613,8 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
}
if (rval) {
- DEBUG2_3(printk(KERN_WARNING
- "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
+ ql_log(ql_log_warn, vha, 0x0070,
+ "NVRAM configuration failed.\n");
}
return (rval);
}
@@ -4620,8 +4632,8 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
- qla_printk(KERN_INFO, ha,
- "FW: Loading from flash (%x)...\n", faddr);
+ ql_dbg(ql_dbg_init, vha, 0x008b,
+ "Loading firmware from flash (%x).\n", faddr);
rval = QLA_SUCCESS;
@@ -4637,11 +4649,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
- qla_printk(KERN_WARNING, ha,
- "Unable to verify integrity of flash firmware image!\n");
- qla_printk(KERN_WARNING, ha,
- "Firmware data: %08x %08x %08x %08x!\n", dcode[0],
- dcode[1], dcode[2], dcode[3]);
+ ql_log(ql_log_fatal, vha, 0x008c,
+ "Unable to verify the integrity of flash firmware "
+ "image.\n");
+ ql_log(ql_log_fatal, vha, 0x008d,
+ "Firmware data: %08x %08x %08x %08x.\n",
+ dcode[0], dcode[1], dcode[2], dcode[3]);
return QLA_FUNCTION_FAILED;
}
@@ -4660,9 +4673,10 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
if (dlen > risc_size)
dlen = risc_size;
- DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
- "addr %x, number of dwords 0x%x, offset 0x%x.\n",
- vha->host_no, risc_addr, dlen, faddr));
+ ql_dbg(ql_dbg_init, vha, 0x008e,
+ "Loading risc segment@ risc addr %x "
+ "number of dwords 0x%x offset 0x%x.\n",
+ risc_addr, dlen, faddr);
qla24xx_read_flash_data(vha, dcode, faddr, dlen);
for (i = 0; i < dlen; i++)
@@ -4671,12 +4685,9 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
rval = qla2x00_load_ram(vha, req->dma, risc_addr,
dlen);
if (rval) {
- DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
- "segment %d of firmware\n", vha->host_no,
- fragment));
- qla_printk(KERN_WARNING, ha,
- "[ERROR] Failed to load segment %d of "
- "firmware\n", fragment);
+ ql_log(ql_log_fatal, vha, 0x008f,
+ "Failed to load segment %d of firmware.\n",
+ fragment);
break;
}
@@ -4709,9 +4720,10 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
/* Load firmware blob. */
blob = qla2x00_request_firmware(vha);
if (!blob) {
- qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
- qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
- "from: " QLA_FW_URL ".\n");
+ ql_log(ql_log_info, vha, 0x0083,
+ "Fimware image unavailable.\n");
+ ql_log(ql_log_info, vha, 0x0084,
+ "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
return QLA_FUNCTION_FAILED;
}
@@ -4724,8 +4736,8 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
/* Validate firmware image by checking version. */
if (blob->fw->size < 8 * sizeof(uint16_t)) {
- qla_printk(KERN_WARNING, ha,
- "Unable to verify integrity of firmware image (%Zd)!\n",
+ ql_log(ql_log_fatal, vha, 0x0085,
+ "Unable to verify integrity of firmware image (%Zd).\n",
blob->fw->size);
goto fail_fw_integrity;
}
@@ -4734,11 +4746,11 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
wcode[2] == 0 && wcode[3] == 0)) {
- qla_printk(KERN_WARNING, ha,
- "Unable to verify integrity of firmware image!\n");
- qla_printk(KERN_WARNING, ha,
- "Firmware data: %04x %04x %04x %04x!\n", wcode[0],
- wcode[1], wcode[2], wcode[3]);
+ ql_log(ql_log_fatal, vha, 0x0086,
+ "Unable to verify integrity of firmware image.\n");
+ ql_log(ql_log_fatal, vha, 0x0087,
+ "Firmware data: %04x %04x %04x %04x.\n",
+ wcode[0], wcode[1], wcode[2], wcode[3]);
goto fail_fw_integrity;
}
@@ -4751,9 +4763,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
/* Validate firmware image size. */
fwclen += risc_size * sizeof(uint16_t);
if (blob->fw->size < fwclen) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_fatal, vha, 0x0088,
"Unable to verify integrity of firmware image "
- "(%Zd)!\n", blob->fw->size);
+ "(%Zd).\n", blob->fw->size);
goto fail_fw_integrity;
}
@@ -4762,10 +4774,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
wlen = (uint16_t)(ha->fw_transfer_size >> 1);
if (wlen > risc_size)
wlen = risc_size;
-
- DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
- "addr %x, number of words 0x%x.\n", vha->host_no,
- risc_addr, wlen));
+ ql_dbg(ql_dbg_init, vha, 0x0089,
+ "Loading risc segment@ risc addr %x number of "
+ "words 0x%x.\n", risc_addr, wlen);
for (i = 0; i < wlen; i++)
wcode[i] = swab16(fwcode[i]);
@@ -4773,12 +4784,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
rval = qla2x00_load_ram(vha, req->dma, risc_addr,
wlen);
if (rval) {
- DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
- "segment %d of firmware\n", vha->host_no,
- fragment));
- qla_printk(KERN_WARNING, ha,
- "[ERROR] Failed to load segment %d of "
- "firmware\n", fragment);
+ ql_log(ql_log_fatal, vha, 0x008a,
+ "Failed to load segment %d of firmware.\n",
+ fragment);
break;
}
@@ -4814,15 +4822,17 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
/* Load firmware blob. */
blob = qla2x00_request_firmware(vha);
if (!blob) {
- qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
- qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
- "from: " QLA_FW_URL ".\n");
+ ql_log(ql_log_warn, vha, 0x0090,
+ "Fimware image unavailable.\n");
+ ql_log(ql_log_warn, vha, 0x0091,
+ "Firmware images can be retrieved from: "
+ QLA_FW_URL ".\n");
return QLA_FUNCTION_FAILED;
}
- qla_printk(KERN_INFO, ha,
- "FW: Loading via request-firmware...\n");
+ ql_log(ql_log_info, vha, 0x0092,
+ "Loading via request-firmware.\n");
rval = QLA_SUCCESS;
@@ -4834,8 +4844,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
/* Validate firmware image by checking version. */
if (blob->fw->size < 8 * sizeof(uint32_t)) {
- qla_printk(KERN_WARNING, ha,
- "Unable to verify integrity of firmware image (%Zd)!\n",
+ ql_log(ql_log_fatal, vha, 0x0093,
+ "Unable to verify integrity of firmware image (%Zd).\n",
blob->fw->size);
goto fail_fw_integrity;
}
@@ -4845,11 +4855,12 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
- qla_printk(KERN_WARNING, ha,
- "Unable to verify integrity of firmware image!\n");
- qla_printk(KERN_WARNING, ha,
- "Firmware data: %08x %08x %08x %08x!\n", dcode[0],
- dcode[1], dcode[2], dcode[3]);
+ ql_log(ql_log_fatal, vha, 0x0094,
+ "Unable to verify integrity of firmware image (%Zd).\n",
+ blob->fw->size);
+ ql_log(ql_log_fatal, vha, 0x0095,
+ "Firmware data: %08x %08x %08x %08x.\n",
+ dcode[0], dcode[1], dcode[2], dcode[3]);
goto fail_fw_integrity;
}
@@ -4861,9 +4872,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
/* Validate firmware image size. */
fwclen += risc_size * sizeof(uint32_t);
if (blob->fw->size < fwclen) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_fatal, vha, 0x0096,
"Unable to verify integrity of firmware image "
- "(%Zd)!\n", blob->fw->size);
+ "(%Zd).\n", blob->fw->size);
goto fail_fw_integrity;
}
@@ -4874,9 +4885,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
if (dlen > risc_size)
dlen = risc_size;
- DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
- "addr %x, number of dwords 0x%x.\n", vha->host_no,
- risc_addr, dlen));
+ ql_dbg(ql_dbg_init, vha, 0x0097,
+ "Loading risc segment@ risc addr %x "
+ "number of dwords 0x%x.\n", risc_addr, dlen);
for (i = 0; i < dlen; i++)
dcode[i] = swab32(fwcode[i]);
@@ -4884,12 +4895,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
rval = qla2x00_load_ram(vha, req->dma, risc_addr,
dlen);
if (rval) {
- DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
- "segment %d of firmware\n", vha->host_no,
- fragment));
- qla_printk(KERN_WARNING, ha,
- "[ERROR] Failed to load segment %d of "
- "firmware\n", fragment);
+ ql_log(ql_log_fatal, vha, 0x0098,
+ "Failed to load segment %d of firmware.\n",
+ fragment);
break;
}
@@ -4953,14 +4961,13 @@ try_blob_fw:
if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
return rval;
- qla_printk(KERN_ERR, ha,
- "FW: Attempting to fallback to golden firmware...\n");
+ ql_log(ql_log_info, vha, 0x0099,
+ "Attempting to fallback to golden firmware.\n");
rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
if (rval != QLA_SUCCESS)
return rval;
- qla_printk(KERN_ERR, ha,
- "FW: Please update operational firmware...\n");
+ ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
ha->flags.running_gold_fw = 1;
return rval;
@@ -4987,8 +4994,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
continue;
if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
continue;
- qla_printk(KERN_INFO, ha,
- "Attempting retry of stop-firmware command...\n");
+ ql_log(ql_log_info, vha, 0x8015,
+ "Attempting retry of stop-firmware command.\n");
ret = qla2x00_stop_firmware(vha);
}
}
@@ -5023,10 +5030,10 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
/* Login to SNS first */
ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
if (mb[0] != MBS_COMMAND_COMPLETE) {
- DEBUG15(qla_printk(KERN_INFO, ha,
- "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
- "mb[2]=%x mb[6]=%x mb[7]=%x\n", NPH_SNS,
- mb[0], mb[1], mb[2], mb[6], mb[7]));
+ ql_dbg(ql_dbg_init, vha, 0x0103,
+ "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
+ "mb[6]=%x mb[7]=%x.\n",
+ NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
return (QLA_FUNCTION_FAILED);
}
@@ -5146,19 +5153,23 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
chksum += le32_to_cpu(*dptr++);
- DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
- DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
+ ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
+ "Contents of NVRAM:\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
+ (uint8_t *)nv, ha->nvram_size);
/* Bad NVRAM data, set defaults parameters. */
if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
|| nv->id[3] != ' ' ||
nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
/* Reset NVRAM data. */
- qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
- "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
+ ql_log(ql_log_info, vha, 0x0073,
+ "Inconisistent NVRAM detected: checksum=0x%x id=%c "
+ "version=0x%x.\n", chksum, nv->id[0],
le16_to_cpu(nv->nvram_version));
- qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
- "invalid -- WWPN) defaults.\n");
+ ql_log(ql_log_info, vha, 0x0074,
+ "Falling back to functioning (yet invalid -- WWPN) "
+ "defaults.\n");
/*
* Set default initialization control block.
@@ -5350,12 +5361,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
if (ha->zio_mode != QLA_ZIO_DISABLED) {
ha->zio_mode = QLA_ZIO_MODE_6;
- DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
- "(%d us).\n", vha->host_no, ha->zio_mode,
- ha->zio_timer * 100));
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_info, vha, 0x0075,
"ZIO mode %d enabled; timer delay (%d us).\n",
- ha->zio_mode, ha->zio_timer * 100);
+ ha->zio_mode,
+ ha->zio_timer * 100);
icb->firmware_options_2 |= cpu_to_le32(
(uint32_t)ha->zio_mode);
@@ -5364,8 +5373,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
}
if (rval) {
- DEBUG2_3(printk(KERN_WARNING
- "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
+ ql_log(ql_log_warn, vha, 0x0076,
+ "NVRAM configuration failed.\n");
}
return (rval);
}
@@ -5388,9 +5397,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
status = qla2x00_fw_ready(vha);
if (!status) {
- qla_printk(KERN_INFO, ha,
- "%s(): Start configure loop, "
- "status = %d\n", __func__, status);
+ ql_log(ql_log_info, vha, 0x803c,
+ "Start configure loop, status =%d.\n", status);
/* Issue a marker after FW becomes ready. */
qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
@@ -5412,9 +5420,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
if ((vha->device_flags & DFLG_NO_CABLE))
status = 0;
- qla_printk(KERN_INFO, ha,
- "%s(): Configure loop done, status = 0x%x\n",
- __func__, status);
+ ql_log(ql_log_info, vha, 0x803d,
+ "Configure loop done, status = 0x%x.\n", status);
}
if (!status) {
@@ -5450,9 +5457,9 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
ha->fce_dma, ha->fce_bufs, ha->fce_mb,
&ha->fce_bufs);
if (rval) {
- qla_printk(KERN_WARNING, ha,
- "Unable to reinitialize FCE "
- "(%d).\n", rval);
+ ql_log(ql_log_warn, vha, 0x803e,
+ "Unable to reinitialize FCE (%d).\n",
+ rval);
ha->flags.fce_enabled = 0;
}
}
@@ -5462,17 +5469,16 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
rval = qla2x00_enable_eft_trace(vha,
ha->eft_dma, EFT_NUM_BUFFERS);
if (rval) {
- qla_printk(KERN_WARNING, ha,
- "Unable to reinitialize EFT "
- "(%d).\n", rval);
+ ql_log(ql_log_warn, vha, 0x803f,
+ "Unable to reinitialize EFT (%d).\n",
+ rval);
}
}
}
if (!status) {
- DEBUG(printk(KERN_INFO
- "qla82xx_restart_isp(%ld): succeeded.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_taskm, vha, 0x8040,
+ "qla82xx_restart_isp succeeded.\n");
spin_lock_irqsave(&ha->vport_slock, flags);
list_for_each_entry(vp, &ha->vp_list, list) {
@@ -5489,8 +5495,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->vport_slock, flags);
} else {
- qla_printk(KERN_INFO, ha,
- "qla82xx_restart_isp: **** FAILED ****\n");
+ ql_log(ql_log_warn, vha, 0x8041,
+ "qla82xx_restart_isp **** FAILED ****.\n");
}
return status;
@@ -5640,9 +5646,8 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
if (ret == QLA_SUCCESS)
fcport->fcp_prio = priority;
else
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld): Unable to activate fcp priority, "
- " ret=0x%x\n", vha->host_no, ret));
+ ql_dbg(ql_dbg_user, vha, 0x704f,
+ "Unable to activate fcp priority, ret=0x%x.\n", ret);
return ret;
}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 4c8167e..9902834 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -94,11 +94,40 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state)
/* Don't print state transitions during initial allocation of fcport */
if (old_state && old_state != state) {
- DEBUG(qla_printk(KERN_WARNING, fcport->vha->hw,
- "scsi(%ld): FCPort state transitioned from %s to %s - "
- "portid=%02x%02x%02x.\n", fcport->vha->host_no,
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
+ "FCPort state transitioned from %s to %s - "
+ "portid=%02x%02x%02x.\n",
port_state_str[old_state], port_state_str[state],
fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa));
+ fcport->d_id.b.al_pa);
}
}
+
+static inline int
+qla2x00_hba_err_chk_enabled(srb_t *sp)
+{
+ /*
+ * Uncomment when corresponding SCSI changes are done.
+ *
+ if (!sp->cmd->prot_chk)
+ return 0;
+ *
+ */
+
+ switch (scsi_get_prot_op(sp->cmd)) {
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ if (ql2xenablehba_err_chk >= 1)
+ return 1;
+ break;
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ if (ql2xenablehba_err_chk >= 2)
+ return 1;
+ break;
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ return 1;
+ }
+ return 0;
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 7bac3cd..dbec896 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -150,7 +150,8 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
/* We only support T10 DIF right now */
if (guard != SHOST_DIX_GUARD_CRC) {
- DEBUG2(printk(KERN_ERR "Unsupported guard: %d\n", guard));
+ ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
+ "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
return 0;
}
@@ -343,9 +344,10 @@ qla2x00_start_scsi(srb_t *sp)
/* Send marker if required */
if (vha->marker_needed != 0) {
- if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
- != QLA_SUCCESS)
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS) {
return (QLA_FUNCTION_FAILED);
+ }
vha->marker_needed = 0;
}
@@ -490,8 +492,8 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
mrk24 = NULL;
mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
if (mrk == NULL) {
- DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
- __func__, base_vha->host_no));
+ ql_log(ql_log_warn, base_vha, 0x3026,
+ "Failed to allocate Marker IOCB.\n");
return (QLA_FUNCTION_FAILED);
}
@@ -547,9 +549,10 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
- DEBUG5(printk("%s(): IOCB data:\n", __func__));
- DEBUG5(qla2x00_dump_buffer(
- (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
+ ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d,
+ "IOCB data:\n");
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
+ (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE);
/* Adjust ring index. */
req->ring_index++;
@@ -604,7 +607,7 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
* Returns the number of IOCB entries needed to store @dsds.
*/
inline uint16_t
-qla24xx_calc_iocbs(uint16_t dsds)
+qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
{
uint16_t iocbs;
@@ -614,8 +617,6 @@ qla24xx_calc_iocbs(uint16_t dsds)
if ((dsds - 1) % 5)
iocbs++;
}
- DEBUG3(printk(KERN_DEBUG "%s(): Required PKT(s) = %d\n",
- __func__, iocbs));
return iocbs;
}
@@ -708,19 +709,28 @@ struct fw_dif_context {
*
*/
static inline void
-qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
+qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
unsigned int protcnt)
{
- struct sd_dif_tuple *spt;
- unsigned char op = scsi_get_prot_op(cmd);
+ struct scsi_cmnd *cmd = sp->cmd;
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
switch (scsi_get_prot_type(cmd)) {
- /* For TYPE 0 protection: no checking */
case SCSI_PROT_DIF_TYPE0:
- pkt->ref_tag_mask[0] = 0x00;
- pkt->ref_tag_mask[1] = 0x00;
- pkt->ref_tag_mask[2] = 0x00;
- pkt->ref_tag_mask[3] = 0x00;
+ /*
+ * No check for ql2xenablehba_err_chk, as it would be an
+ * I/O error if hba tag generation is not done.
+ */
+ pkt->ref_tag = cpu_to_le32((uint32_t)
+ (0xffffffff & scsi_get_lba(cmd)));
+
+ if (!qla2x00_hba_err_chk_enabled(sp))
+ break;
+
+ pkt->ref_tag_mask[0] = 0xff;
+ pkt->ref_tag_mask[1] = 0xff;
+ pkt->ref_tag_mask[2] = 0xff;
+ pkt->ref_tag_mask[3] = 0xff;
break;
/*
@@ -728,20 +738,16 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
* match LBA in CDB + N
*/
case SCSI_PROT_DIF_TYPE2:
- if (!ql2xenablehba_err_chk)
- break;
-
- if (scsi_prot_sg_count(cmd)) {
- spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
- scsi_prot_sglist(cmd)[0].offset;
- pkt->app_tag = swab32(spt->app_tag);
- pkt->app_tag_mask[0] = 0xff;
- pkt->app_tag_mask[1] = 0xff;
- }
+ pkt->app_tag = __constant_cpu_to_le16(0);
+ pkt->app_tag_mask[0] = 0x0;
+ pkt->app_tag_mask[1] = 0x0;
pkt->ref_tag = cpu_to_le32((uint32_t)
(0xffffffff & scsi_get_lba(cmd)));
+ if (!qla2x00_hba_err_chk_enabled(sp))
+ break;
+
/* enable ALL bytes of the ref tag */
pkt->ref_tag_mask[0] = 0xff;
pkt->ref_tag_mask[1] = 0xff;
@@ -761,26 +767,15 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
* 16 bit app tag.
*/
case SCSI_PROT_DIF_TYPE1:
- if (!ql2xenablehba_err_chk)
+ pkt->ref_tag = cpu_to_le32((uint32_t)
+ (0xffffffff & scsi_get_lba(cmd)));
+ pkt->app_tag = __constant_cpu_to_le16(0);
+ pkt->app_tag_mask[0] = 0x0;
+ pkt->app_tag_mask[1] = 0x0;
+
+ if (!qla2x00_hba_err_chk_enabled(sp))
break;
- if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
- op == SCSI_PROT_WRITE_PASS)) {
- spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
- scsi_prot_sglist(cmd)[0].offset;
- DEBUG18(printk(KERN_DEBUG
- "%s(): LBA from user %p, lba = 0x%x\n",
- __func__, spt, (int)spt->ref_tag));
- pkt->ref_tag = swab32(spt->ref_tag);
- pkt->app_tag_mask[0] = 0x0;
- pkt->app_tag_mask[1] = 0x0;
- } else {
- pkt->ref_tag = cpu_to_le32((uint32_t)
- (0xffffffff & scsi_get_lba(cmd)));
- pkt->app_tag = __constant_cpu_to_le16(0);
- pkt->app_tag_mask[0] = 0x0;
- pkt->app_tag_mask[1] = 0x0;
- }
/* enable ALL bytes of the ref tag */
pkt->ref_tag_mask[0] = 0xff;
pkt->ref_tag_mask[1] = 0xff;
@@ -789,15 +784,169 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
break;
}
- DEBUG18(printk(KERN_DEBUG
- "%s(): Setting protection Tags: (BIG) ref tag = 0x%x,"
- " app tag = 0x%x, prot SG count %d , cmd lba 0x%x,"
- " prot_type=%u\n", __func__, pkt->ref_tag, pkt->app_tag, protcnt,
- (int)scsi_get_lba(cmd), scsi_get_prot_type(cmd)));
+ ql_dbg(ql_dbg_io, vha, 0x3009,
+ "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
+ "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
+ pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
+ scsi_get_prot_type(cmd), cmd);
}
+struct qla2_sgx {
+ dma_addr_t dma_addr; /* OUT */
+ uint32_t dma_len; /* OUT */
+
+ uint32_t tot_bytes; /* IN */
+ struct scatterlist *cur_sg; /* IN */
+
+ /* for book keeping, bzero on initial invocation */
+ uint32_t bytes_consumed;
+ uint32_t num_bytes;
+ uint32_t tot_partial;
+
+ /* for debugging */
+ uint32_t num_sg;
+ srb_t *sp;
+};
+
+static int
+qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
+ uint32_t *partial)
+{
+ struct scatterlist *sg;
+ uint32_t cumulative_partial, sg_len;
+ dma_addr_t sg_dma_addr;
+
+ if (sgx->num_bytes == sgx->tot_bytes)
+ return 0;
+
+ sg = sgx->cur_sg;
+ cumulative_partial = sgx->tot_partial;
+
+ sg_dma_addr = sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+
+ sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
+
+ if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
+ sgx->dma_len = (blk_sz - cumulative_partial);
+ sgx->tot_partial = 0;
+ sgx->num_bytes += blk_sz;
+ *partial = 0;
+ } else {
+ sgx->dma_len = sg_len - sgx->bytes_consumed;
+ sgx->tot_partial += sgx->dma_len;
+ *partial = 1;
+ }
+
+ sgx->bytes_consumed += sgx->dma_len;
+
+ if (sg_len == sgx->bytes_consumed) {
+ sg = sg_next(sg);
+ sgx->num_sg++;
+ sgx->cur_sg = sg;
+ sgx->bytes_consumed = 0;
+ }
+
+ return 1;
+}
static int
+qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
+ uint32_t *dsd, uint16_t tot_dsds)
+{
+ void *next_dsd;
+ uint8_t avail_dsds = 0;
+ uint32_t dsd_list_len;
+ struct dsd_dma *dsd_ptr;
+ struct scatterlist *sg_prot;
+ uint32_t *cur_dsd = dsd;
+ uint16_t used_dsds = tot_dsds;
+
+ uint32_t prot_int;
+ uint32_t partial;
+ struct qla2_sgx sgx;
+ dma_addr_t sle_dma;
+ uint32_t sle_dma_len, tot_prot_dma_len = 0;
+ struct scsi_cmnd *cmd = sp->cmd;
+
+ prot_int = cmd->device->sector_size;
+
+ memset(&sgx, 0, sizeof(struct qla2_sgx));
+ sgx.tot_bytes = scsi_bufflen(sp->cmd);
+ sgx.cur_sg = scsi_sglist(sp->cmd);
+ sgx.sp = sp;
+
+ sg_prot = scsi_prot_sglist(sp->cmd);
+
+ while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
+
+ sle_dma = sgx.dma_addr;
+ sle_dma_len = sgx.dma_len;
+alloc_and_fill:
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
+ QLA_DSDS_PER_IOCB : used_dsds;
+ dsd_list_len = (avail_dsds + 1) * 12;
+ used_dsds -= avail_dsds;
+
+ /* allocate tracking DS */
+ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+ if (!dsd_ptr)
+ return 1;
+
+ /* allocate new list */
+ dsd_ptr->dsd_addr = next_dsd =
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
+ &dsd_ptr->dsd_list_dma);
+
+ if (!next_dsd) {
+ /*
+ * Need to cleanup only this dsd_ptr, rest
+ * will be done by sp_free_dma()
+ */
+ kfree(dsd_ptr);
+ return 1;
+ }
+
+ list_add_tail(&dsd_ptr->list,
+ &((struct crc_context *)sp->ctx)->dsd_list);
+
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
+
+ /* add new list to cmd iocb or last list */
+ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = dsd_list_len;
+ cur_dsd = (uint32_t *)next_dsd;
+ }
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sle_dma_len);
+ avail_dsds--;
+
+ if (partial == 0) {
+ /* Got a full protection interval */
+ sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
+ sle_dma_len = 8;
+
+ tot_prot_dma_len += sle_dma_len;
+ if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
+ tot_prot_dma_len = 0;
+ sg_prot = sg_next(sg_prot);
+ }
+
+ partial = 1; /* So as to not re-enter this block */
+ goto alloc_and_fill;
+ }
+ }
+ /* Null termination */
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ return 0;
+}
+static int
qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
uint16_t tot_dsds)
{
@@ -809,6 +958,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
uint32_t *cur_dsd = dsd;
int i;
uint16_t used_dsds = tot_dsds;
+ scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
uint8_t *cp;
@@ -853,9 +1003,10 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
cur_dsd = (uint32_t *)next_dsd;
}
sle_dma = sg_dma_address(sg);
- DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x,"
- " len =%d\n", __func__ , cur_dsd, i, LSD(sle_dma),
- MSD(sle_dma), sg_dma_len(sg)));
+ ql_dbg(ql_dbg_io, vha, 0x300a,
+ "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
+ cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
+ sp->cmd);
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
@@ -863,8 +1014,8 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
cp = page_address(sg_page(sg)) + sg->offset;
- DEBUG18(printk("%s(): User Data buffer= %p:\n",
- __func__ , cp));
+ ql_dbg(ql_dbg_io, vha, 0x300b,
+ "User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
}
}
/* Null termination */
@@ -888,7 +1039,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
struct scsi_cmnd *cmd;
uint32_t *cur_dsd = dsd;
uint16_t used_dsds = tot_dsds;
-
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
uint8_t *cp;
@@ -935,10 +1086,11 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
}
sle_dma = sg_dma_address(sg);
if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
- DEBUG18(printk(KERN_DEBUG
- "%s(): %p, sg entry %d - addr =0x%x"
- "0x%x, len =%d\n", __func__ , cur_dsd, i,
- LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg)));
+ ql_dbg(ql_dbg_io, vha, 0x3027,
+ "%s(): %p, sg_entry %d - "
+ "addr=0x%x0x%x, len=%d.\n",
+ __func__, cur_dsd, i,
+ LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
}
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
@@ -946,8 +1098,9 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
cp = page_address(sg_page(sg)) + sg->offset;
- DEBUG18(printk("%s(): Protection Data buffer = %p:\n",
- __func__ , cp));
+ ql_dbg(ql_dbg_io, vha, 0x3028,
+ "%s(): Protection Data buffer = %p.\n", __func__,
+ cp);
}
avail_dsds--;
}
@@ -975,7 +1128,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
struct scsi_cmnd *cmd;
struct scatterlist *cur_seg;
int sgc;
- uint32_t total_bytes;
+ uint32_t total_bytes = 0;
uint32_t data_bytes;
uint32_t dif_bytes;
uint8_t bundling = 1;
@@ -996,22 +1149,16 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
*((uint32_t *)(&cmd_pkt->entry_type)) =
__constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
+ vha = sp->fcport->vha;
+ ha = vha->hw;
+
/* No data transfer */
data_bytes = scsi_bufflen(cmd);
if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
- DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
- __func__, data_bytes));
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
return QLA_SUCCESS;
}
- vha = sp->fcport->vha;
- ha = vha->hw;
-
- DEBUG18(printk(KERN_DEBUG
- "%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__,
- vha->host_no, sp, scsi_get_prot_op(sp->cmd)));
-
cmd_pkt->vp_index = sp->fcport->vp_idx;
/* Set transfer direction */
@@ -1023,8 +1170,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
__constant_cpu_to_le16(CF_READ_DATA);
}
- tot_prot_dsds = scsi_prot_sg_count(cmd);
- if (!tot_prot_dsds)
+ if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
+ (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
+ (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
+ (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
bundling = 0;
/* Allocate CRC context from global pool */
@@ -1047,7 +1196,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
- qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *)
+ qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
&crc_ctx_pkt->ref_tag, tot_prot_dsds);
cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
@@ -1056,8 +1205,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
/* Determine SCSI command length -- align to 4 byte boundary */
if (cmd->cmd_len > 16) {
- DEBUG18(printk(KERN_INFO "%s(): **** SCSI CMD > 16\n",
- __func__));
additional_fcpcdb_len = cmd->cmd_len - 16;
if ((cmd->cmd_len % 4) != 0) {
/* SCSI cmd > 16 bytes must be multiple of 4 */
@@ -1078,7 +1225,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
fcp_cmnd->additional_cdb_len |= 2;
int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
- host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun));
memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
@@ -1108,21 +1254,29 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
- DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
- "entries %d, data bytes %d, Protection entries %d\n",
- __func__, vha->host_no, tot_dsds, (tot_dsds-tot_prot_dsds),
- data_bytes, tot_prot_dsds));
-
/* Compute dif len and adjust data len to incude protection */
- total_bytes = data_bytes;
dif_bytes = 0;
blk_size = cmd->device->sector_size;
- if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
- dif_bytes = (data_bytes / blk_size) * 8;
- total_bytes += dif_bytes;
+ dif_bytes = (data_bytes / blk_size) * 8;
+
+ switch (scsi_get_prot_op(sp->cmd)) {
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ total_bytes = data_bytes;
+ data_bytes += dif_bytes;
+ break;
+
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ total_bytes = data_bytes + dif_bytes;
+ break;
+ default:
+ BUG();
}
- if (!ql2xenablehba_err_chk)
+ if (!qla2x00_hba_err_chk_enabled(sp))
fw_prot_opts |= 0x10; /* Disable Guard tag checking */
if (!bundling) {
@@ -1150,14 +1304,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
additional_fcpcdb_len);
*fcp_dl = htonl(total_bytes);
- DEBUG18(printk(KERN_INFO "%s(%ld): dif bytes = 0x%x (%d), total bytes"
- " = 0x%x (%d), dat block size =0x%x (%d)\n", __func__,
- vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes,
- crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size));
-
if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
- DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
- __func__, data_bytes));
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
return QLA_SUCCESS;
}
@@ -1165,7 +1312,12 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
cmd_pkt->control_flags |=
__constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
- if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
+
+ if (!bundling && tot_prot_dsds) {
+ if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
+ cur_dsd, tot_dsds))
+ goto crc_queuing_error;
+ } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
(tot_dsds - tot_prot_dsds)))
goto crc_queuing_error;
@@ -1182,8 +1334,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
return QLA_SUCCESS;
crc_queuing_error:
- DEBUG18(qla_printk(KERN_INFO, ha,
- "CMD sent FAILED crc_q error:sp = %p\n", sp));
/* Cleanup will be performed by the caller */
return QLA_FUNCTION_FAILED;
@@ -1225,8 +1375,8 @@ qla24xx_start_scsi(srb_t *sp)
/* Send marker if required */
if (vha->marker_needed != 0) {
- if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
- != QLA_SUCCESS)
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS)
return QLA_FUNCTION_FAILED;
vha->marker_needed = 0;
}
@@ -1243,8 +1393,9 @@ qla24xx_start_scsi(srb_t *sp)
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS)
+ if (index == MAX_OUTSTANDING_COMMANDS) {
goto queuing_error;
+ }
/* Map the sg table so we have an accurate count of sg entries needed */
if (scsi_sg_count(cmd)) {
@@ -1256,8 +1407,7 @@ qla24xx_start_scsi(srb_t *sp)
nseg = 0;
tot_dsds = nseg;
-
- req_cnt = qla24xx_calc_iocbs(tot_dsds);
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
@@ -1322,7 +1472,6 @@ qla24xx_start_scsi(srb_t *sp)
/* Specify response queue number where completion should happen */
cmd_pkt->entry_status = (uint8_t) rsp->id;
wmb();
-
/* Adjust ring index. */
req->ring_index++;
if (req->ring_index == req->length) {
@@ -1431,6 +1580,22 @@ qla24xx_dif_start_scsi(srb_t *sp)
goto queuing_error;
else
sp->flags |= SRB_DMA_VALID;
+
+ if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+ struct qla2_sgx sgx;
+ uint32_t partial;
+
+ memset(&sgx, 0, sizeof(struct qla2_sgx));
+ sgx.tot_bytes = scsi_bufflen(cmd);
+ sgx.cur_sg = scsi_sglist(cmd);
+ sgx.sp = sp;
+
+ nseg = 0;
+ while (qla24xx_get_one_block_sg(
+ cmd->device->sector_size, &sgx, &partial))
+ nseg++;
+ }
} else
nseg = 0;
@@ -1445,6 +1610,11 @@ qla24xx_dif_start_scsi(srb_t *sp)
goto queuing_error;
else
sp->flags |= SRB_CRC_PROT_DMA_VALID;
+
+ if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+ nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
+ }
} else {
nseg = 0;
}
@@ -1471,6 +1641,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Build header part of command packet (excluding the OPCODE). */
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
req->cnt -= req_cnt;
@@ -1534,9 +1705,6 @@ queuing_error:
/* Cleanup will be performed by the caller (queuecommand) */
spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
- DEBUG18(qla_printk(KERN_INFO, ha,
- "CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd)));
return QLA_FUNCTION_FAILED;
}
@@ -1581,8 +1749,11 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS)
+ if (index == MAX_OUTSTANDING_COMMANDS) {
+ ql_log(ql_log_warn, vha, 0x700b,
+ "No room on oustanding cmd array.\n");
goto queuing_error;
+ }
/* Prep command array. */
req->current_outstanding_cmd = handle;
@@ -1999,8 +2170,11 @@ qla2x00_start_sp(srb_t *sp)
rval = QLA_FUNCTION_FAILED;
spin_lock_irqsave(&ha->hardware_lock, flags);
pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
- if (!pkt)
+ if (!pkt) {
+ ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
+ "qla2x00_alloc_iocbs failed.\n");
goto done;
+ }
rval = QLA_SUCCESS;
switch (ctx->type) {
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ae8e298..646fc52 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -45,7 +45,7 @@ qla2100_intr_handler(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
printk(KERN_INFO
- "%s(): NULL response queue pointer\n", __func__);
+ "%s(): NULL response queue pointer.\n", __func__);
return (IRQ_NONE);
}
@@ -91,9 +91,9 @@ qla2100_intr_handler(int irq, void *dev_id)
qla2x00_async_event(vha, rsp, mb);
} else {
/*EMPTY*/
- DEBUG2(printk("scsi(%ld): Unrecognized "
- "interrupt type (%d).\n",
- vha->host_no, mb[0]));
+ ql_dbg(ql_dbg_async, vha, 0x5025,
+ "Unrecognized interrupt type (%d).\n",
+ mb[0]);
}
/* Release mailbox registers. */
WRT_REG_WORD(&reg->semaphore, 0);
@@ -142,7 +142,7 @@ qla2300_intr_handler(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
printk(KERN_INFO
- "%s(): NULL response queue pointer\n", __func__);
+ "%s(): NULL response queue pointer.\n", __func__);
return (IRQ_NONE);
}
@@ -160,11 +160,13 @@ qla2300_intr_handler(int irq, void *dev_id)
hccr = RD_REG_WORD(&reg->hccr);
if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
- qla_printk(KERN_INFO, ha, "Parity error -- "
- "HCCR=%x, Dumping firmware!\n", hccr);
+ ql_log(ql_log_warn, vha, 0x5026,
+ "Parity error -- HCCR=%x, Dumping "
+ "firmware.\n", hccr);
else
- qla_printk(KERN_INFO, ha, "RISC paused -- "
- "HCCR=%x, Dumping firmware!\n", hccr);
+ ql_log(ql_log_warn, vha, 0x5027,
+ "RISC paused -- HCCR=%x, Dumping "
+ "firmware.\n", hccr);
/*
* Issue a "HARD" reset in order for the RISC
@@ -213,9 +215,8 @@ qla2300_intr_handler(int irq, void *dev_id)
qla2x00_async_event(vha, rsp, mb);
break;
default:
- DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
- "(%d).\n",
- vha->host_no, stat & 0xff));
+ ql_dbg(ql_dbg_async, vha, 0x5028,
+ "Unrecognized interrupt type (%d).\n", stat & 0xff);
break;
}
WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
@@ -262,11 +263,11 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
}
if (ha->mcp) {
- DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
- __func__, vha->host_no, ha->mcp->mb[0]));
+ ql_dbg(ql_dbg_async, vha, 0x5000,
+ "Got mbx completion. cmd=%x.\n", ha->mcp->mb[0]);
} else {
- DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
- __func__, vha->host_no));
+ ql_dbg(ql_dbg_async, vha, 0x5001,
+ "MBX pointer ERROR.\n");
}
}
@@ -285,22 +286,24 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
mb[cnt] = RD_REG_WORD(wptr);
- DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
- "%04x %04x %04x %04x %04x %04x %04x.\n", vha->host_no,
- event[aen & 0xff],
- mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6]));
+ ql_dbg(ql_dbg_async, vha, 0x5021,
+ "Inter-Driver Commucation %s -- "
+ "%04x %04x %04x %04x %04x %04x %04x.\n",
+ event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
+ mb[4], mb[5], mb[6]);
/* Acknowledgement needed? [Notify && non-zero timeout]. */
timeout = (descr >> 8) & 0xf;
if (aen != MBA_IDC_NOTIFY || !timeout)
return;
- DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
- "ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout));
+ ql_dbg(ql_dbg_async, vha, 0x5022,
+ "Inter-Driver Commucation %s -- ACK timeout=%d.\n",
+ vha->host_no, event[aen & 0xff], timeout);
rval = qla2x00_post_idc_ack_work(vha, mb);
if (rval != QLA_SUCCESS)
- qla_printk(KERN_WARNING, vha->hw,
+ ql_log(ql_log_warn, vha, 0x5023,
"IDC failed to post ACK.\n");
}
@@ -393,15 +396,15 @@ skip_rio:
break;
case MBA_RESET: /* Reset */
- DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_async, vha, 0x5002,
+ "Asynchronous RESET.\n");
set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
break;
case MBA_SYSTEM_ERR: /* System Error */
mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0;
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_warn, vha, 0x5003,
"ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
"mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
@@ -409,7 +412,7 @@ skip_rio:
if (IS_FWI2_CAPABLE(ha)) {
if (mb[1] == 0 && mb[2] == 0) {
- qla_printk(KERN_ERR, ha,
+ ql_log(ql_log_fatal, vha, 0x5004,
"Unrecoverable Hardware Error: adapter "
"marked OFFLINE!\n");
vha->flags.online = 0;
@@ -422,7 +425,7 @@ skip_rio:
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
} else if (mb[1] == 0) {
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_fatal, vha, 0x5005,
"Unrecoverable Hardware Error: adapter marked "
"OFFLINE!\n");
vha->flags.online = 0;
@@ -431,31 +434,27 @@ skip_rio:
break;
case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
- DEBUG2(printk("scsi(%ld): ISP Request Transfer Error (%x).\n",
- vha->host_no, mb[1]));
- qla_printk(KERN_WARNING, ha,
- "ISP Request Transfer Error (%x).\n", mb[1]);
+ ql_log(ql_log_warn, vha, 0x5006,
+ "ISP Request Transfer Error (%x).\n", mb[1]);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
- DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
- vha->host_no));
- qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
+ ql_log(ql_log_warn, vha, 0x5007,
+ "ISP Response Transfer Error.\n");
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
- DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_async, vha, 0x5008,
+ "Asynchronous WAKEUP_THRES.\n");
break;
case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
- DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
- mb[1]));
- qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
+ ql_log(ql_log_info, vha, 0x5009,
+ "LIP occurred (%x).\n", mb[1]);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -488,10 +487,8 @@ skip_rio:
ha->link_data_rate = mb[1];
}
- DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
- vha->host_no, link_speed));
- qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
- link_speed);
+ ql_log(ql_log_info, vha, 0x500a,
+ "LOOP UP detected (%s Gbps).\n", link_speed);
vha->flags.management_server_logged_in = 0;
qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
@@ -500,12 +497,9 @@ skip_rio:
case MBA_LOOP_DOWN: /* Loop Down Event */
mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
- DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
- "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3],
- mbx));
- qla_printk(KERN_INFO, ha,
- "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3],
- mbx);
+ ql_log(ql_log_info, vha, 0x500b,
+ "LOOP DOWN detected (%x %x %x %x).\n",
+ mb[1], mb[2], mb[3], mbx);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -525,9 +519,7 @@ skip_rio:
break;
case MBA_LIP_RESET: /* LIP reset occurred */
- DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
- vha->host_no, mb[1]));
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_info, vha, 0x500c,
"LIP reset occurred (%x).\n", mb[1]);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -554,14 +546,15 @@ skip_rio:
break;
if (IS_QLA8XXX_TYPE(ha)) {
- DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
- "%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
+ ql_dbg(ql_dbg_async, vha, 0x500d,
+ "DCBX Completed -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
if (ha->notify_dcbx_comp)
complete(&ha->dcbx_comp);
} else
- DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE "
- "received.\n", vha->host_no));
+ ql_dbg(ql_dbg_async, vha, 0x500e,
+ "Asynchronous P2P MODE received.\n");
/*
* Until there's a transition from loop down to loop up, treat
@@ -594,10 +587,7 @@ skip_rio:
if (IS_QLA2100(ha))
break;
- DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
- "received.\n",
- vha->host_no));
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_info, vha, 0x500f,
"Configuration change detected: value=%x.\n", mb[1]);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -640,11 +630,9 @@ skip_rio:
/* Global event -- port logout or port unavailable. */
if (mb[1] == 0xffff && mb[2] == 0x7) {
- DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
- vha->host_no));
- DEBUG(printk(KERN_INFO
- "scsi(%ld): Port unavailable %04x %04x %04x.\n",
- vha->host_no, mb[1], mb[2], mb[3]));
+ ql_dbg(ql_dbg_async, vha, 0x5010,
+ "Port unavailable %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -674,17 +662,15 @@ skip_rio:
atomic_set(&vha->loop_down_timer, 0);
if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
atomic_read(&vha->loop_state) != LOOP_DEAD) {
- DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
- "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
- mb[2], mb[3]));
+ ql_dbg(ql_dbg_async, vha, 0x5011,
+ "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
+ mb[1], mb[2], mb[3]);
break;
}
- DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
- vha->host_no));
- DEBUG(printk(KERN_INFO
- "scsi(%ld): Port database changed %04x %04x %04x.\n",
- vha->host_no, mb[1], mb[2], mb[3]));
+ ql_dbg(ql_dbg_async, vha, 0x5012,
+ "Port database changed %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
/*
* Mark all devices as missing so we will login again.
@@ -707,20 +693,17 @@ skip_rio:
if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
break;
- DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
- vha->host_no));
- DEBUG(printk(KERN_INFO
- "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
- vha->host_no, mb[1], mb[2], mb[3]));
+ ql_dbg(ql_dbg_async, vha, 0x5013,
+ "RSCN database changed -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
| vha->d_id.b.al_pa;
if (rscn_entry == host_pid) {
- DEBUG(printk(KERN_INFO
- "scsi(%ld): Ignoring RSCN update to local host "
- "port ID (%06x)\n",
- vha->host_no, host_pid));
+ ql_dbg(ql_dbg_async, vha, 0x5014,
+ "Ignoring RSCN update to local host "
+ "port ID (%06x).\n", host_pid);
break;
}
@@ -736,7 +719,6 @@ skip_rio:
vha->flags.rscn_queue_overflow = 1;
}
- atomic_set(&vha->loop_state, LOOP_UPDATE);
atomic_set(&vha->loop_down_timer, 0);
vha->flags.management_server_logged_in = 0;
@@ -747,8 +729,8 @@ skip_rio:
/* case MBA_RIO_RESPONSE: */
case MBA_ZIO_RESPONSE:
- DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_async, vha, 0x5015,
+ "[R|Z]IO update completion.\n");
if (IS_FWI2_CAPABLE(ha))
qla24xx_process_response_queue(vha, rsp);
@@ -757,61 +739,68 @@ skip_rio:
break;
case MBA_DISCARD_RND_FRAME:
- DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
- "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
+ ql_dbg(ql_dbg_async, vha, 0x5016,
+ "Discard RND Frame -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
break;
case MBA_TRACE_NOTIFICATION:
- DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
- vha->host_no, mb[1], mb[2]));
+ ql_dbg(ql_dbg_async, vha, 0x5017,
+ "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
break;
case MBA_ISP84XX_ALERT:
- DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
- "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
+ ql_dbg(ql_dbg_async, vha, 0x5018,
+ "ISP84XX Alert Notification -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
switch (mb[1]) {
case A84_PANIC_RECOVERY:
- qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
- "%04x %04x\n", mb[2], mb[3]);
+ ql_log(ql_log_info, vha, 0x5019,
+ "Alert 84XX: panic recovery %04x %04x.\n",
+ mb[2], mb[3]);
break;
case A84_OP_LOGIN_COMPLETE:
ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
- DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
- "firmware version %x\n", ha->cs84xx->op_fw_version));
+ ql_log(ql_log_info, vha, 0x501a,
+ "Alert 84XX: firmware version %x.\n",
+ ha->cs84xx->op_fw_version);
break;
case A84_DIAG_LOGIN_COMPLETE:
ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
- DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
- "diagnostic firmware version %x\n",
- ha->cs84xx->diag_fw_version));
+ ql_log(ql_log_info, vha, 0x501b,
+ "Alert 84XX: diagnostic firmware version %x.\n",
+ ha->cs84xx->diag_fw_version);
break;
case A84_GOLD_LOGIN_COMPLETE:
ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
ha->cs84xx->fw_update = 1;
- DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
- "firmware version %x\n",
- ha->cs84xx->gold_fw_version));
+ ql_log(ql_log_info, vha, 0x501c,
+ "Alert 84XX: gold firmware version %x.\n",
+ ha->cs84xx->gold_fw_version);
break;
default:
- qla_printk(KERN_ERR, ha,
- "Alert 84xx: Invalid Alert %04x %04x %04x\n",
+ ql_log(ql_log_warn, vha, 0x501d,
+ "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
mb[1], mb[2], mb[3]);
}
spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
break;
case MBA_DCBX_START:
- DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n",
- vha->host_no, mb[1], mb[2], mb[3]));
+ ql_dbg(ql_dbg_async, vha, 0x501e,
+ "DCBX Started -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
break;
case MBA_DCBX_PARAM_UPDATE:
- DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- "
- "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
+ ql_dbg(ql_dbg_async, vha, 0x501f,
+ "DCBX Parameters Updated -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
break;
case MBA_FCF_CONF_ERR:
- DEBUG2(printk("scsi(%ld): FCF Configuration Error -- "
- "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
+ ql_dbg(ql_dbg_async, vha, 0x5020,
+ "FCF Configuration Error -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
break;
case MBA_IDC_COMPLETE:
case MBA_IDC_NOTIFY:
@@ -838,10 +827,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
/* Validate handle. */
if (index >= MAX_OUTSTANDING_COMMANDS) {
- DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
- vha->host_no, index));
- qla_printk(KERN_WARNING, ha,
- "Invalid SCSI completion handle %d.\n", index);
+ ql_log(ql_log_warn, vha, 0x3014,
+ "Invalid SCSI command index (%x).\n", index);
if (IS_QLA82XX(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -859,10 +846,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
sp->cmd->result = DID_OK << 16;
qla2x00_sp_compl(ha, sp);
} else {
- DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
- " handle(0x%x)\n", vha->host_no, req->id, index));
- qla_printk(KERN_WARNING, ha,
- "Invalid ISP SCSI completion handle\n");
+ ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
if (IS_QLA82XX(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -882,8 +866,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
index = LSW(pkt->handle);
if (index >= MAX_OUTSTANDING_COMMANDS) {
- qla_printk(KERN_WARNING, ha,
- "%s: Invalid completion handle (%x).\n", func, index);
+ ql_log(ql_log_warn, vha, 0x5031,
+ "Invalid command index (%x).\n", index);
if (IS_QLA82XX(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
else
@@ -892,15 +876,13 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
}
sp = req->outstanding_cmds[index];
if (!sp) {
- qla_printk(KERN_WARNING, ha,
- "%s: Invalid completion handle (%x) -- timed-out.\n", func,
- index);
+ ql_log(ql_log_warn, vha, 0x5032,
+ "Invalid completion handle (%x) -- timed-out.\n", index);
return sp;
}
if (sp->handle != index) {
- qla_printk(KERN_WARNING, ha,
- "%s: SRB handle (%x) mismatch %x.\n", func, sp->handle,
- index);
+ ql_log(ql_log_warn, vha, 0x5033,
+ "SRB handle (%x) mismatch %x.\n", sp->handle, index);
return NULL;
}
@@ -937,17 +919,17 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
QLA_LOGIO_LOGIN_RETRIED : 0;
if (mbx->entry_status) {
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld:%x): Async-%s error entry - portid=%02x%02x%02x "
+ ql_dbg(ql_dbg_async, vha, 0x5043,
+ "Async-%s error entry - portid=%02x%02x%02x "
"entry-status=%x status=%x state-flag=%x "
"status-flags=%x.\n",
- fcport->vha->host_no, sp->handle, type,
- fcport->d_id.b.domain, fcport->d_id.b.area,
+ type, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa, mbx->entry_status,
le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
- le16_to_cpu(mbx->status_flags)));
+ le16_to_cpu(mbx->status_flags));
- DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx)));
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5057,
+ (uint8_t *)mbx, sizeof(*mbx));
goto logio_done;
}
@@ -957,12 +939,10 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
status = 0;
if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
- DEBUG2(printk(KERN_DEBUG
- "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x "
- "mbx1=%x.\n",
- fcport->vha->host_no, sp->handle, type,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1)));
+ ql_dbg(ql_dbg_async, vha, 0x5045,
+ "Async-%s complete - portid=%02x%02x%02x mbx1=%x.\n",
+ type, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1));
data[0] = MBS_COMMAND_COMPLETE;
if (ctx->type == SRB_LOGIN_CMD) {
@@ -987,14 +967,14 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
break;
}
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x status=%x "
+ ql_log(ql_log_warn, vha, 0x5046,
+ "Async-%s failed - portid=%02x%02x%02x status=%x "
"mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n",
- fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain,
+ type, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa, status,
le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
- le16_to_cpu(mbx->mb7)));
+ le16_to_cpu(mbx->mb7));
logio_done:
lio->done(sp);
@@ -1025,9 +1005,8 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
type = "ct pass-through";
break;
default:
- qla_printk(KERN_WARNING, ha,
- "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
- sp_bsg->type);
+ ql_log(ql_log_warn, vha, 0x5047,
+ "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
return;
}
@@ -1045,20 +1024,20 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
bsg_job->reply->reply_payload_rcv_len =
le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "scsi(%ld): CT pass-through-%s error "
+ ql_log(ql_log_warn, vha, 0x5048,
+ "CT pass-through-%s error "
"comp_status-status=0x%x total_byte = 0x%x.\n",
- vha->host_no, type, comp_status,
- bsg_job->reply->reply_payload_rcv_len));
+ type, comp_status,
+ bsg_job->reply->reply_payload_rcv_len);
} else {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "scsi(%ld): CT pass-through-%s error "
- "comp_status-status=0x%x.\n",
- vha->host_no, type, comp_status));
+ ql_log(ql_log_warn, vha, 0x5049,
+ "CT pass-through-%s error "
+ "comp_status-status=0x%x.\n", type, comp_status);
bsg_job->reply->result = DID_ERROR << 16;
bsg_job->reply->reply_payload_rcv_len = 0;
}
- DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt)));
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5058,
+ (uint8_t *)pkt, sizeof(*pkt));
} else {
bsg_job->reply->result = DID_OK << 16;
bsg_job->reply->reply_payload_rcv_len =
@@ -1110,9 +1089,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
type = "ct pass-through";
break;
default:
- qla_printk(KERN_WARNING, ha,
- "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
- sp_bsg->type);
+ ql_log(ql_log_warn, vha, 0x503e,
+ "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
return;
}
@@ -1132,27 +1110,31 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
bsg_job->reply->reply_payload_rcv_len =
le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
+ ql_log(ql_log_info, vha, 0x503f,
+ "ELS-CT pass-through-%s error comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
- vha->host_no, sp->handle, type, comp_status, fw_status[1], fw_status[2],
- le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count)));
+ type, comp_status, fw_status[1], fw_status[2],
+ le16_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->total_byte_count));
fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
}
else {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
+ ql_log(ql_log_info, vha, 0x5040,
+ "ELS-CT pass-through-%s error comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x.\n",
- vha->host_no, sp->handle, type, comp_status,
- le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1),
- le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2)));
+ type, comp_status,
+ le16_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->error_subcode_1),
+ le16_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->error_subcode_2));
bsg_job->reply->result = DID_ERROR << 16;
bsg_job->reply->reply_payload_rcv_len = 0;
fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
}
- DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt)));
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056,
+ (uint8_t *)pkt, sizeof(*pkt));
}
else {
bsg_job->reply->result = DID_OK << 16;
@@ -1201,25 +1183,24 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
QLA_LOGIO_LOGIN_RETRIED : 0;
if (logio->entry_status) {
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld:%x): Async-%s error entry - "
+ ql_log(ql_log_warn, vha, 0x5034,
+ "Async-%s error entry - "
"portid=%02x%02x%02x entry-status=%x.\n",
- fcport->vha->host_no, sp->handle, type,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, logio->entry_status));
- DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio)));
+ type, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa, logio->entry_status);
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5059,
+ (uint8_t *)logio, sizeof(*logio));
goto logio_done;
}
if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
- DEBUG2(printk(KERN_DEBUG
- "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x "
+ ql_dbg(ql_dbg_async, vha, 0x5036,
+ "Async-%s complete - portid=%02x%02x%02x "
"iop0=%x.\n",
- fcport->vha->host_no, sp->handle, type,
- fcport->d_id.b.domain, fcport->d_id.b.area,
+ type, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa,
- le32_to_cpu(logio->io_parameter[0])));
+ le32_to_cpu(logio->io_parameter[0]));
data[0] = MBS_COMMAND_COMPLETE;
if (ctx->type != SRB_LOGIN_CMD)
@@ -1256,14 +1237,14 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
break;
}
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x comp=%x "
+ ql_dbg(ql_dbg_async, vha, 0x5037,
+ "Async-%s failed - portid=%02x%02x%02x comp=%x "
"iop0=%x iop1=%x.\n",
- fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain,
+ type, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
le16_to_cpu(logio->comp_status),
le32_to_cpu(logio->io_parameter[0]),
- le32_to_cpu(logio->io_parameter[1])));
+ le32_to_cpu(logio->io_parameter[1]));
logio_done:
lio->done(sp);
@@ -1292,38 +1273,34 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
fcport = sp->fcport;
if (sts->entry_status) {
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld:%x): Async-%s error - entry-status(%x).\n",
- fcport->vha->host_no, sp->handle, type,
- sts->entry_status));
+ ql_log(ql_log_warn, vha, 0x5038,
+ "Async-%s error - entry-status(%x).\n",
+ type, sts->entry_status);
} else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld:%x): Async-%s error - completion status(%x).\n",
- fcport->vha->host_no, sp->handle, type,
- sts->comp_status));
+ ql_log(ql_log_warn, vha, 0x5039,
+ "Async-%s error - completion status(%x).\n",
+ type, sts->comp_status);
} else if (!(le16_to_cpu(sts->scsi_status) &
SS_RESPONSE_INFO_LEN_VALID)) {
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld:%x): Async-%s error - no response info(%x).\n",
- fcport->vha->host_no, sp->handle, type,
- sts->scsi_status));
+ ql_log(ql_log_warn, vha, 0x503a,
+ "Async-%s error - no response info(%x).\n",
+ type, sts->scsi_status);
} else if (le32_to_cpu(sts->rsp_data_len) < 4) {
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld:%x): Async-%s error - not enough response(%d).\n",
- fcport->vha->host_no, sp->handle, type,
- sts->rsp_data_len));
+ ql_log(ql_log_warn, vha, 0x503b,
+ "Async-%s error - not enough response(%d).\n",
+ type, sts->rsp_data_len);
} else if (sts->data[3]) {
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld:%x): Async-%s error - response(%x).\n",
- fcport->vha->host_no, sp->handle, type,
- sts->data[3]));
+ ql_log(ql_log_warn, vha, 0x503c,
+ "Async-%s error - response(%x).\n",
+ type, sts->data[3]);
} else {
error = 0;
}
if (error) {
iocb->u.tmf.data = error;
- DEBUG2(qla2x00_dump_buffer((uint8_t *)sts, sizeof(*sts)));
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
+ (uint8_t *)sts, sizeof(*sts));
}
iocb->done(sp);
@@ -1360,8 +1337,8 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
}
if (pkt->entry_status != 0) {
- DEBUG3(printk(KERN_INFO
- "scsi(%ld): Process error entry.\n", vha->host_no));
+ ql_log(ql_log_warn, vha, 0x5035,
+ "Process error entry.\n");
qla2x00_error_entry(vha, rsp, pkt);
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -1399,10 +1376,10 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
break;
default:
/* Type Not Supported. */
- DEBUG4(printk(KERN_WARNING
- "scsi(%ld): Received unknown response pkt type %x "
+ ql_log(ql_log_warn, vha, 0x504a,
+ "Received unknown response pkt type %x "
"entry status=%x.\n",
- vha->host_no, pkt->entry_type, pkt->entry_status));
+ pkt->entry_type, pkt->entry_status);
break;
}
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -1418,6 +1395,7 @@ static inline void
qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
uint32_t sense_len, struct rsp_que *rsp)
{
+ struct scsi_qla_host *vha = sp->fcport->vha;
struct scsi_cmnd *cp = sp->cmd;
if (sense_len >= SCSI_SENSE_BUFFERSIZE)
@@ -1435,11 +1413,13 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
if (sp->request_sense_length != 0)
rsp->status_srb = sp;
- DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
- "cmd=%p\n", __func__, sp->fcport->vha->host_no,
- cp->device->channel, cp->device->id, cp->device->lun, cp));
+ ql_dbg(ql_dbg_io, vha, 0x301c,
+ "Check condition Sense data, scsi(%ld:%d:%d:%d) cmd=%p.\n",
+ sp->fcport->vha->host_no, cp->device->channel, cp->device->id,
+ cp->device->lun, cp);
if (sense_len)
- DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len));
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
+ cp->sense_buffer, sense_len);
}
struct scsi_dif_tuple {
@@ -1454,34 +1434,94 @@ struct scsi_dif_tuple {
* ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
* to indicate to the kernel that the HBA detected error.
*/
-static inline void
+static inline int
qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
{
+ struct scsi_qla_host *vha = sp->fcport->vha;
struct scsi_cmnd *cmd = sp->cmd;
- struct scsi_dif_tuple *ep =
- (struct scsi_dif_tuple *)&sts24->data[20];
- struct scsi_dif_tuple *ap =
- (struct scsi_dif_tuple *)&sts24->data[12];
+ uint8_t *ap = &sts24->data[12];
+ uint8_t *ep = &sts24->data[20];
uint32_t e_ref_tag, a_ref_tag;
uint16_t e_app_tag, a_app_tag;
uint16_t e_guard, a_guard;
- e_ref_tag = be32_to_cpu(ep->ref_tag);
- a_ref_tag = be32_to_cpu(ap->ref_tag);
- e_app_tag = be16_to_cpu(ep->app_tag);
- a_app_tag = be16_to_cpu(ap->app_tag);
- e_guard = be16_to_cpu(ep->guard);
- a_guard = be16_to_cpu(ap->guard);
-
- DEBUG18(printk(KERN_DEBUG
- "%s(): iocb(s) %p Returned STATUS\n", __func__, sts24));
-
- DEBUG18(printk(KERN_ERR "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
+ /*
+ * swab32 of the "data" field in the beginning of qla2x00_status_entry()
+ * would make guard field appear at offset 2
+ */
+ a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
+ a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
+ a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
+ e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
+ e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
+ e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
+
+ ql_dbg(ql_dbg_io, vha, 0x3023,
+ "iocb(s) %p Returned STATUS.\n", sts24);
+
+ ql_dbg(ql_dbg_io, vha, 0x3024,
+ "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
" tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
- " tag=0x%x, act guard=0x%x, exp guard=0x%x\n",
+ " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
- a_app_tag, e_app_tag, a_guard, e_guard));
+ a_app_tag, e_app_tag, a_guard, e_guard);
+ /*
+ * Ignore sector if:
+ * For type 3: ref & app tag is all 'f's
+ * For type 0,1,2: app tag is all 'f's
+ */
+ if ((a_app_tag == 0xffff) &&
+ ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
+ (a_ref_tag == 0xffffffff))) {
+ uint32_t blocks_done, resid;
+ sector_t lba_s = scsi_get_lba(cmd);
+
+ /* 2TB boundary case covered automatically with this */
+ blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
+
+ resid = scsi_bufflen(cmd) - (blocks_done *
+ cmd->device->sector_size);
+
+ scsi_set_resid(cmd, resid);
+ cmd->result = DID_OK << 16;
+
+ /* Update protection tag */
+ if (scsi_prot_sg_count(cmd)) {
+ uint32_t i, j = 0, k = 0, num_ent;
+ struct scatterlist *sg;
+ struct sd_dif_tuple *spt;
+
+ /* Patch the corresponding protection tags */
+ scsi_for_each_prot_sg(cmd, sg,
+ scsi_prot_sg_count(cmd), i) {
+ num_ent = sg_dma_len(sg) / 8;
+ if (k + num_ent < blocks_done) {
+ k += num_ent;
+ continue;
+ }
+ j = blocks_done - k - 1;
+ k = blocks_done;
+ break;
+ }
+
+ if (k != blocks_done) {
+ qla_printk(KERN_WARNING, sp->fcport->vha->hw,
+ "unexpected tag values tag:lba=%x:%lx)\n",
+ e_ref_tag, lba_s);
+ return 1;
+ }
+
+ spt = page_address(sg_page(sg)) + sg->offset;
+ spt += j;
+
+ spt->app_tag = 0xffff;
+ if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
+ spt->ref_tag = 0xffffffff;
+ }
+
+ return 0;
+ }
/* check guard */
if (e_guard != a_guard) {
@@ -1490,28 +1530,30 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
set_driver_byte(cmd, DRIVER_SENSE);
set_host_byte(cmd, DID_ABORT);
cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
- return;
+ return 1;
}
- /* check appl tag */
- if (e_app_tag != a_app_tag) {
+ /* check ref tag */
+ if (e_ref_tag != a_ref_tag) {
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x2);
+ 0x10, 0x3);
set_driver_byte(cmd, DRIVER_SENSE);
set_host_byte(cmd, DID_ABORT);
cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
- return;
+ return 1;
}
- /* check ref tag */
- if (e_ref_tag != a_ref_tag) {
+ /* check appl tag */
+ if (e_app_tag != a_app_tag) {
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x3);
+ 0x10, 0x2);
set_driver_byte(cmd, DRIVER_SENSE);
set_host_byte(cmd, DID_ABORT);
cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
- return;
+ return 1;
}
+
+ return 1;
}
/**
@@ -1569,9 +1611,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
sp = NULL;
if (sp == NULL) {
- qla_printk(KERN_WARNING, ha,
- "scsi(%ld): Invalid status handle (0x%x).\n", vha->host_no,
- sts->handle);
+ ql_log(ql_log_warn, vha, 0x3017,
+ "Invalid status handle (0x%x).\n", sts->handle);
if (IS_QLA82XX(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -1582,9 +1623,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
}
cp = sp->cmd;
if (cp == NULL) {
- qla_printk(KERN_WARNING, ha,
- "scsi(%ld): Command already returned (0x%x/%p).\n",
- vha->host_no, sts->handle, sp);
+ ql_log(ql_log_warn, vha, 0x3018,
+ "Command already returned (0x%x/%p).\n",
+ sts->handle, sp);
return;
}
@@ -1629,10 +1670,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
par_sense_len -= rsp_info_len;
}
if (rsp_info_len > 3 && rsp_info[3]) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d): FCP I/O protocol failure "
- "(0x%x/0x%x).\n", vha->host_no, cp->device->id,
- cp->device->lun, rsp_info_len, rsp_info[3]));
+ ql_log(ql_log_warn, vha, 0x3019,
+ "FCP I/O protocol failure (0x%x/0x%x).\n",
+ rsp_info_len, rsp_info[3]);
cp->result = DID_BUS_BUSY << 16;
goto out;
@@ -1661,11 +1701,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
if (!lscsi_status &&
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
- qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d): Mid-layer underflow "
+ ql_log(ql_log_warn, vha, 0x301a,
+ "Mid-layer underflow "
"detected (0x%x of 0x%x bytes).\n",
- vha->host_no, cp->device->id,
- cp->device->lun, resid, scsi_bufflen(cp));
+ resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16;
break;
@@ -1674,9 +1713,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
cp->result = DID_OK << 16 | lscsi_status;
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d) QUEUE FULL detected.\n",
- vha->host_no, cp->device->id, cp->device->lun));
+ ql_log(ql_log_warn, vha, 0x301b,
+ "QUEUE FULL detected.\n");
break;
}
logit = 0;
@@ -1697,11 +1735,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
scsi_set_resid(cp, resid);
if (scsi_status & SS_RESIDUAL_UNDER) {
if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d) Dropped frame(s) detected "
- "(0x%x of 0x%x bytes).\n", vha->host_no,
- cp->device->id, cp->device->lun, resid,
- scsi_bufflen(cp)));
+ ql_log(ql_log_warn, vha, 0x301d,
+ "Dropped frame(s) detected "
+ "(0x%x of 0x%x bytes).\n",
+ resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16 | lscsi_status;
break;
@@ -1710,20 +1747,18 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
if (!lscsi_status &&
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
- qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d): Mid-layer underflow "
+ ql_log(ql_log_warn, vha, 0x301e,
+ "Mid-layer underflow "
"detected (0x%x of 0x%x bytes).\n",
- vha->host_no, cp->device->id,
- cp->device->lun, resid, scsi_bufflen(cp));
+ resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16;
break;
}
} else {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x "
- "of 0x%x bytes).\n", vha->host_no, cp->device->id,
- cp->device->lun, resid, scsi_bufflen(cp)));
+ ql_log(ql_log_warn, vha, 0x301f,
+ "Dropped frame(s) detected (0x%x "
+ "of 0x%x bytes).\n", resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16 | lscsi_status;
goto check_scsi_status;
@@ -1739,10 +1774,8 @@ check_scsi_status:
*/
if (lscsi_status != 0) {
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d) QUEUE FULL detected.\n",
- vha->host_no, cp->device->id,
- cp->device->lun));
+ ql_log(ql_log_warn, vha, 0x3020,
+ "QUEUE FULL detected.\n");
logit = 1;
break;
}
@@ -1781,10 +1814,9 @@ check_scsi_status:
break;
}
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d) Port down status: port-state=0x%x\n",
- vha->host_no, cp->device->id, cp->device->lun,
- atomic_read(&fcport->state)));
+ ql_dbg(ql_dbg_io, vha, 0x3021,
+ "Port down status: port-state=0x%x.\n",
+ atomic_read(&fcport->state));
if (atomic_read(&fcport->state) == FCS_ONLINE)
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
@@ -1795,7 +1827,7 @@ check_scsi_status:
break;
case CS_DIF_ERROR:
- qla2x00_handle_dif_error(sp, sts24);
+ logit = qla2x00_handle_dif_error(sp, sts24);
break;
default:
cp->result = DID_ERROR << 16;
@@ -1804,15 +1836,13 @@ check_scsi_status:
out:
if (logit)
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) "
- "portid=%02x%02x%02x oxid=0x%x cdb=%02x%02x%02x len=0x%x "
- "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no,
- cp->device->id, cp->device->lun, comp_status, scsi_status,
- cp->result, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, ox_id, cp->cmnd[0], cp->cmnd[1],
- cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, resid_len,
- fw_resid_len));
+ ql_dbg(ql_dbg_io, vha, 0x3022,
+ "FCP command status: 0x%x-0x%x (0x%x) "
+ "oxid=0x%x cdb=%02x%02x%02x len=0x%x "
+ "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
+ comp_status, scsi_status, cp->result, ox_id, cp->cmnd[0],
+ cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
+ resid_len, fw_resid_len);
if (rsp->status_srb == NULL)
qla2x00_sp_compl(ha, sp);
@@ -1830,16 +1860,15 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
{
uint8_t sense_sz = 0;
struct qla_hw_data *ha = rsp->hw;
+ struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
srb_t *sp = rsp->status_srb;
struct scsi_cmnd *cp;
if (sp != NULL && sp->request_sense_length != 0) {
cp = sp->cmd;
if (cp == NULL) {
- DEBUG2(printk("%s(): Cmd already returned back to OS "
- "sp=%p.\n", __func__, sp));
- qla_printk(KERN_INFO, ha,
- "cmd is NULL: already returned to OS (sp=%p)\n",
+ ql_log(ql_log_warn, vha, 0x3025,
+ "cmd is NULL: already returned to OS (sp=%p).\n",
sp);
rsp->status_srb = NULL;
@@ -1856,7 +1885,8 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
if (IS_FWI2_CAPABLE(ha))
host_to_fcp_swap(pkt->data, sizeof(pkt->data));
memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
- DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
+ sp->request_sense_ptr, sense_sz);
sp->request_sense_ptr += sense_sz;
sp->request_sense_length -= sense_sz;
@@ -1882,21 +1912,25 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
uint32_t handle = LSW(pkt->handle);
uint16_t que = MSW(pkt->handle);
struct req_que *req = ha->req_q_map[que];
-#if defined(QL_DEBUG_LEVEL_2)
+
if (pkt->entry_status & RF_INV_E_ORDER)
- qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
+ ql_dbg(ql_dbg_async, vha, 0x502a,
+ "Invalid Entry Order.\n");
else if (pkt->entry_status & RF_INV_E_COUNT)
- qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
+ ql_dbg(ql_dbg_async, vha, 0x502b,
+ "Invalid Entry Count.\n");
else if (pkt->entry_status & RF_INV_E_PARAM)
- qla_printk(KERN_ERR, ha,
- "%s: Invalid Entry Parameter\n", __func__);
+ ql_dbg(ql_dbg_async, vha, 0x502c,
+ "Invalid Entry Parameter.\n");
else if (pkt->entry_status & RF_INV_E_TYPE)
- qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
+ ql_dbg(ql_dbg_async, vha, 0x502d,
+ "Invalid Entry Type.\n");
else if (pkt->entry_status & RF_BUSY)
- qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
+ ql_dbg(ql_dbg_async, vha, 0x502e,
+ "Busy.\n");
else
- qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
-#endif
+ ql_dbg(ql_dbg_async, vha, 0x502f,
+ "UNKNOWN flag error.\n");
/* Validate handle. */
if (handle < MAX_OUTSTANDING_COMMANDS)
@@ -1923,10 +1957,8 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
} else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
|| pkt->entry_type == COMMAND_TYPE_6) {
- DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
- vha->host_no));
- qla_printk(KERN_WARNING, ha,
- "Error entry - invalid handle\n");
+ ql_log(ql_log_warn, vha, 0x5030,
+ "Error entry - invalid handle.\n");
if (IS_QLA82XX(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -1960,11 +1992,11 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
}
if (ha->mcp) {
- DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
- __func__, vha->host_no, ha->mcp->mb[0]));
+ ql_dbg(ql_dbg_async, vha, 0x504d,
+ "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
} else {
- DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
- __func__, vha->host_no));
+ ql_dbg(ql_dbg_async, vha, 0x504e,
+ "MBX pointer ERROR.\n");
}
}
@@ -1993,8 +2025,8 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
}
if (pkt->entry_status != 0) {
- DEBUG3(printk(KERN_INFO
- "scsi(%ld): Process error entry.\n", vha->host_no));
+ ql_dbg(ql_dbg_async, vha, 0x5029,
+ "Process error entry.\n");
qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -2030,10 +2062,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
break;
default:
/* Type Not Supported. */
- DEBUG4(printk(KERN_WARNING
- "scsi(%ld): Received unknown response pkt type %x "
+ ql_dbg(ql_dbg_async, vha, 0x5042,
+ "Received unknown response pkt type %x "
"entry status=%x.\n",
- vha->host_no, pkt->entry_type, pkt->entry_status));
+ pkt->entry_type, pkt->entry_status);
break;
}
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -2088,7 +2120,8 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
next_test:
if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
- qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
+ ql_log(ql_log_info, vha, 0x504c,
+ "Additional code -- 0x55AA.\n");
done:
WRT_REG_DWORD(&reg->iobase_window, 0x0000);
@@ -2121,7 +2154,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
printk(KERN_INFO
- "%s(): NULL response queue pointer\n", __func__);
+ "%s(): NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
@@ -2142,8 +2175,9 @@ qla24xx_intr_handler(int irq, void *dev_id)
hccr = RD_REG_DWORD(&reg->hccr);
- qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
- "Dumping firmware!\n", hccr);
+ ql_log(ql_log_warn, vha, 0x504b,
+ "RISC paused -- HCCR=%x, Dumping firmware.\n",
+ hccr);
qla2xxx_check_risc_status(vha);
@@ -2174,9 +2208,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
qla24xx_process_response_queue(vha, rsp);
break;
default:
- DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
- "(%d).\n",
- vha->host_no, stat & 0xff));
+ ql_dbg(ql_dbg_async, vha, 0x504f,
+ "Unrecognized interrupt type (%d).\n", stat * 0xff);
break;
}
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -2205,7 +2238,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
printk(KERN_INFO
- "%s(): NULL response queue pointer\n", __func__);
+ "%s(): NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
ha = rsp->hw;
@@ -2235,7 +2268,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
printk(KERN_INFO
- "%s(): NULL response queue pointer\n", __func__);
+ "%s(): NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
ha = rsp->hw;
@@ -2268,8 +2301,8 @@ qla24xx_msix_default(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
- DEBUG(printk(
- "%s(): NULL response queue pointer\n", __func__));
+ printk(KERN_INFO
+ "%s(): NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
ha = rsp->hw;
@@ -2286,8 +2319,9 @@ qla24xx_msix_default(int irq, void *dev_id)
hccr = RD_REG_DWORD(&reg->hccr);
- qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
- "Dumping firmware!\n", hccr);
+ ql_log(ql_log_info, vha, 0x5050,
+ "RISC paused -- HCCR=%x, Dumping firmware.\n",
+ hccr);
qla2xxx_check_risc_status(vha);
@@ -2318,9 +2352,8 @@ qla24xx_msix_default(int irq, void *dev_id)
qla24xx_process_response_queue(vha, rsp);
break;
default:
- DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
- "(%d).\n",
- vha->host_no, stat & 0xff));
+ ql_dbg(ql_dbg_async, vha, 0x5051,
+ "Unrecognized interrupt type (%d).\n", stat & 0xff);
break;
}
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -2358,6 +2391,7 @@ qla24xx_disable_msix(struct qla_hw_data *ha)
{
int i;
struct qla_msix_entry *qentry;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
for (i = 0; i < ha->msix_count; i++) {
qentry = &ha->msix_entries[i];
@@ -2368,6 +2402,8 @@ qla24xx_disable_msix(struct qla_hw_data *ha)
kfree(ha->msix_entries);
ha->msix_entries = NULL;
ha->flags.msix_enabled = 0;
+ ql_dbg(ql_dbg_init, vha, 0x0042,
+ "Disabled the MSI.\n");
}
static int
@@ -2377,11 +2413,15 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
int i, ret;
struct msix_entry *entries;
struct qla_msix_entry *qentry;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
GFP_KERNEL);
- if (!entries)
+ if (!entries) {
+ ql_log(ql_log_warn, vha, 0x00bc,
+ "Failed to allocate memory for msix_entry.\n");
return -ENOMEM;
+ }
for (i = 0; i < ha->msix_count; i++)
entries[i].entry = i;
@@ -2391,16 +2431,18 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
if (ret < MIN_MSIX_COUNT)
goto msix_failed;
- qla_printk(KERN_WARNING, ha,
- "MSI-X: Failed to enable support -- %d/%d\n"
- " Retry with %d vectors\n", ha->msix_count, ret, ret);
+ ql_log(ql_log_warn, vha, 0x00c6,
+ "MSI-X: Failed to enable support "
+ "-- %d/%d\n Retry with %d vectors.\n",
+ ha->msix_count, ret, ret);
ha->msix_count = ret;
ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
if (ret) {
msix_failed:
- qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
- " support, giving up -- %d/%d\n",
- ha->msix_count, ret);
+ ql_log(ql_log_fatal, vha, 0x00c7,
+ "MSI-X: Failed to enable support, "
+ "giving up -- %d/%d.\n",
+ ha->msix_count, ret);
goto msix_out;
}
ha->max_rsp_queues = ha->msix_count - 1;
@@ -2408,6 +2450,8 @@ msix_failed:
ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
ha->msix_count, GFP_KERNEL);
if (!ha->msix_entries) {
+ ql_log(ql_log_fatal, vha, 0x00c8,
+ "Failed to allocate memory for ha->msix_entries.\n");
ret = -ENOMEM;
goto msix_out;
}
@@ -2434,9 +2478,9 @@ msix_failed:
0, msix_entries[i].name, rsp);
}
if (ret) {
- qla_printk(KERN_WARNING, ha,
- "MSI-X: Unable to register handler -- %x/%d.\n",
- qentry->vector, ret);
+ ql_log(ql_log_fatal, vha, 0x00cb,
+ "MSI-X: unable to register handler -- %x/%d.\n",
+ qentry->vector, ret);
qla24xx_disable_msix(ha);
ha->mqenable = 0;
goto msix_out;
@@ -2449,6 +2493,12 @@ msix_failed:
/* Enable MSI-X vector for response queue update for queue 0 */
if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
ha->mqenable = 1;
+ ql_dbg(ql_dbg_multiq, vha, 0xc005,
+ "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
+ ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
+ ql_dbg(ql_dbg_init, vha, 0x0055,
+ "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
+ ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
msix_out:
kfree(entries);
@@ -2460,6 +2510,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
{
int ret;
device_reg_t __iomem *reg = ha->iobase;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
/* If possible, enable MSI-X. */
if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
@@ -2470,30 +2521,29 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
(ha->pdev->subsystem_device == 0x7040 ||
ha->pdev->subsystem_device == 0x7041 ||
ha->pdev->subsystem_device == 0x1705)) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X,0x%X).\n",
+ ql_log(ql_log_warn, vha, 0x0034,
+ "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
ha->pdev->subsystem_vendor,
- ha->pdev->subsystem_device));
+ ha->pdev->subsystem_device);
goto skip_msi;
}
- if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
- !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
- ha->pdev->revision, ha->fw_attributes));
+ if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
+ ql_log(ql_log_warn, vha, 0x0035,
+ "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
+ ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
goto skip_msix;
}
ret = qla24xx_enable_msix(ha, rsp);
if (!ret) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
- ha->fw_attributes));
+ ql_dbg(ql_dbg_init, vha, 0x0036,
+ "MSI-X: Enabled (0x%X, 0x%X).\n",
+ ha->chip_revision, ha->fw_attributes);
goto clear_risc_ints;
}
- qla_printk(KERN_WARNING, ha,
- "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
+ ql_log(ql_log_info, vha, 0x0037,
+ "MSI-X Falling back-to MSI mode -%d.\n", ret);
skip_msix:
if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
@@ -2502,18 +2552,19 @@ skip_msix:
ret = pci_enable_msi(ha->pdev);
if (!ret) {
- DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
+ ql_dbg(ql_dbg_init, vha, 0x0038,
+ "MSI: Enabled.\n");
ha->flags.msi_enabled = 1;
} else
- qla_printk(KERN_WARNING, ha,
- "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
+ ql_log(ql_log_warn, vha, 0x0039,
+ "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
skip_msi:
ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
ha->flags.msi_enabled ? 0 : IRQF_SHARED,
QLA2XXX_DRIVER_NAME, rsp);
if (ret) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x003a,
"Failed to reserve interrupt %d already in use.\n",
ha->pdev->irq);
goto fail;
@@ -2563,13 +2614,14 @@ int qla25xx_request_irq(struct rsp_que *rsp)
struct qla_hw_data *ha = rsp->hw;
struct qla_init_msix_entry *intr = &msix_entries[2];
struct qla_msix_entry *msix = rsp->msix;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
int ret;
ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
if (ret) {
- qla_printk(KERN_WARNING, ha,
- "MSI-X: Unable to register handler -- %x/%d.\n",
- msix->vector, ret);
+ ql_log(ql_log_fatal, vha, 0x00e6,
+ "MSI-X: Unable to register handler -- %x/%d.\n",
+ msix->vector, ret);
return ret;
}
msix->have_irq = 1;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index c26f0ac..f7604ea 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -46,14 +46,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- if (ha->pdev->error_state > pci_channel_io_frozen)
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1000, "Entered %s.\n", __func__);
+
+ if (ha->pdev->error_state > pci_channel_io_frozen) {
+ ql_log(ql_log_warn, base_vha, 0x1001,
+ "error_state is greater than pci_channel_io_frozen, "
+ "exiting.\n");
return QLA_FUNCTION_TIMEOUT;
+ }
if (vha->device_flags & DFLG_DEV_FAILED) {
- DEBUG2_3_11(qla_printk(KERN_WARNING, ha,
- "%s(%ld): Device in failed state, "
- "timeout MBX Exiting.\n",
- __func__, base_vha->host_no));
+ ql_log(ql_log_warn, base_vha, 0x1002,
+ "Device in failed state, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
@@ -63,17 +67,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
rval = QLA_SUCCESS;
abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
- DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no));
if (ha->flags.pci_channel_io_perm_failure) {
- DEBUG(printk("%s(%ld): Perm failure on EEH, timeout MBX "
- "Exiting.\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, base_vha, 0x1003,
+ "Perm failure on EEH timeout MBX, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
if (ha->flags.isp82xx_fw_hung) {
/* Setting Link-Down error */
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
+ ql_log(ql_log_warn, base_vha, 0x1004,
+ "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
rval = QLA_FUNCTION_FAILED;
goto premature_exit;
}
@@ -85,8 +90,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
*/
if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
/* Timeout occurred. Return error. */
- DEBUG2_3_11(printk("%s(%ld): cmd access timeout. "
- "Exiting.\n", __func__, base_vha->host_no));
+ ql_log(ql_log_warn, base_vha, 0x1005,
+ "Cmd access timeout, Exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
@@ -94,8 +99,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
/* Save mailbox command for debug */
ha->mcp = mcp;
- DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n",
- base_vha->host_no, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1006,
+ "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -123,27 +128,30 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
iptr++;
}
-#if defined(QL_DEBUG_LEVEL_1)
- printk("%s(%ld): Loaded MBX registers (displayed in bytes) = \n",
- __func__, base_vha->host_no);
- qla2x00_dump_buffer((uint8_t *)mcp->mb, 16);
- printk("\n");
- qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x10), 16);
- printk("\n");
- qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x20), 8);
- printk("\n");
- printk("%s(%ld): I/O address = %p.\n", __func__, base_vha->host_no,
- optr);
- qla2x00_dump_regs(base_vha);
-#endif
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1111,
+ "Loaded MBX registers (displayed in bytes) =.\n");
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1112,
+ (uint8_t *)mcp->mb, 16);
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1113,
+ ".\n");
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1114,
+ ((uint8_t *)mcp->mb + 0x10), 16);
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1115,
+ ".\n");
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1116,
+ ((uint8_t *)mcp->mb + 0x20), 8);
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1117,
+ "I/O Address = %p.\n", optr);
+ ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x100e);
/* Issue set host interrupt command to send cmd out. */
ha->flags.mbox_int = 0;
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
/* Unlock mbx registers and wait for interrupt */
- DEBUG11(printk("%s(%ld): going to unlock irq & waiting for interrupt. "
- "jiffies=%lx.\n", __func__, base_vha->host_no, jiffies));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x100f,
+ "Going to unlock irq & waiting for interrupts. "
+ "jiffies=%lx.\n", jiffies);
/* Wait for mbx cmd completion until timeout */
@@ -155,9 +163,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
HINT_MBX_INT_PENDING) {
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
- DEBUG2_3_11(printk(KERN_INFO
- "%s(%ld): Pending Mailbox timeout. "
- "Exiting.\n", __func__, base_vha->host_no));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
+ "Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
goto premature_exit;
}
@@ -173,17 +180,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
} else {
- DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__,
- base_vha->host_no, command));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1011,
+ "Cmd=%x Polling Mode.\n", command);
if (IS_QLA82XX(ha)) {
if (RD_REG_DWORD(&reg->isp82.hint) &
HINT_MBX_INT_PENDING) {
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
- DEBUG2_3_11(printk(KERN_INFO
- "%s(%ld): Pending Mailbox timeout. "
- "Exiting.\n", __func__, base_vha->host_no));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
+ "Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
goto premature_exit;
}
@@ -207,17 +213,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
command == MBC_LOAD_RISC_RAM_EXTENDED))
msleep(10);
} /* while */
- DEBUG17(qla_printk(KERN_WARNING, ha,
- "Waited %d sec\n",
- (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1013,
+ "Waited %d sec.\n",
+ (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
}
/* Check whether we timed out */
if (ha->flags.mbox_int) {
uint16_t *iptr2;
- DEBUG3_11(printk("%s(%ld): cmd %x completed.\n", __func__,
- base_vha->host_no, command));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1014,
+ "Cmd=%x completed.\n", command);
/* Got interrupt. Clear the flag. */
ha->flags.mbox_int = 0;
@@ -229,6 +235,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
ha->mcp = NULL;
rval = QLA_FUNCTION_FAILED;
+ ql_log(ql_log_warn, base_vha, 0x1015,
+ "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
goto premature_exit;
}
@@ -249,8 +257,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
}
} else {
-#if defined(QL_DEBUG_LEVEL_2) || defined(QL_DEBUG_LEVEL_3) || \
- defined(QL_DEBUG_LEVEL_11)
uint16_t mb0;
uint32_t ictrl;
@@ -261,14 +267,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
ictrl = RD_REG_WORD(&reg->isp.ictrl);
}
- printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n",
- __func__, base_vha->host_no, command);
- printk("%s(%ld): icontrol=%x jiffies=%lx\n", __func__,
- base_vha->host_no, ictrl, jiffies);
- printk("%s(%ld): *** mailbox[0] = 0x%x ***\n", __func__,
- base_vha->host_no, mb0);
- qla2x00_dump_regs(base_vha);
-#endif
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1119,
+ "MBX Command timeout for cmd %x.\n", command);
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111a,
+ "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111b,
+ "mb[0] = 0x%x.\n", mb0);
+ ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1019);
rval = QLA_FUNCTION_TIMEOUT;
}
@@ -279,8 +284,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ha->mcp = NULL;
if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
- DEBUG11(printk("%s(%ld): checking for additional resp "
- "interrupt.\n", __func__, base_vha->host_no));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x101a,
+ "Checking for additional resp interrupt.\n");
/* polling mode for non isp_abort commands. */
qla2x00_poll(ha->rsp_q_map[0]);
@@ -291,38 +296,32 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
ha->flags.eeh_busy) {
/* not in dpc. schedule it for dpc to take over. */
- DEBUG(printk("%s(%ld): timeout schedule "
- "isp_abort_needed.\n", __func__,
- base_vha->host_no));
- DEBUG2_3_11(printk("%s(%ld): timeout schedule "
- "isp_abort_needed.\n", __func__,
- base_vha->host_no));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x101b,
+ "Timeout, schedule isp_abort_needed.\n");
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
- qla_printk(KERN_WARNING, ha,
- "Mailbox command timeout occurred. "
- "Scheduling ISP " "abort. eeh_busy: 0x%x\n",
- ha->flags.eeh_busy);
+ ql_log(ql_log_info, base_vha, 0x101c,
+ "Mailbox cmd timeout occured. "
+ "Scheduling ISP abort eeh_busy=0x%x.\n",
+ ha->flags.eeh_busy);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
} else if (!abort_active) {
/* call abort directly since we are in the DPC thread */
- DEBUG(printk("%s(%ld): timeout calling abort_isp\n",
- __func__, base_vha->host_no));
- DEBUG2_3_11(printk("%s(%ld): timeout calling "
- "abort_isp\n", __func__, base_vha->host_no));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x101d,
+ "Timeout, calling abort_isp.\n");
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
- qla_printk(KERN_WARNING, ha,
- "Mailbox command timeout occurred. "
- "Issuing ISP abort.\n");
+ ql_log(ql_log_info, base_vha, 0x101e,
+ "Mailbox cmd timeout occured. "
+ "Scheduling ISP abort.\n");
set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -332,11 +331,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
&vha->dpc_flags);
}
clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
- DEBUG(printk("%s(%ld): finished abort_isp\n",
- __func__, vha->host_no));
- DEBUG2_3_11(printk(
- "%s(%ld): finished abort_isp\n",
- __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x101f,
+ "Finished abort_isp.\n");
}
}
}
@@ -346,12 +342,11 @@ premature_exit:
complete(&ha->mbx_cmd_comp);
if (rval) {
- DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, "
- "mbx2=%x, cmd=%x ****\n", __func__, base_vha->host_no,
- mcp->mb[0], mcp->mb[1], mcp->mb[2], command));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1020,
+ "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, cmd=%x ****.\n",
+ mcp->mb[0], mcp->mb[1], mcp->mb[2], command);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__,
- base_vha->host_no));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
}
return rval;
@@ -366,7 +361,7 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__);
if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
@@ -397,10 +392,10 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1023,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__);
}
return rval;
@@ -430,7 +425,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
mcp->out_mb = MBX_0;
@@ -461,15 +456,14 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1026,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
if (IS_FWI2_CAPABLE(ha)) {
- DEBUG11(printk("%s(%ld): done exchanges=%x.\n",
- __func__, vha->host_no, mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1027,
+ "Done exchanges=%x.\n", mcp->mb[1]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__,
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__);
}
}
@@ -501,7 +495,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
mcp->out_mb = MBX_0;
@@ -535,11 +529,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
failed:
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__);
}
return rval;
}
@@ -565,7 +558,7 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
mcp->out_mb = MBX_0;
@@ -576,15 +569,14 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
} else {
fwopts[0] = mcp->mb[0];
fwopts[1] = mcp->mb[1];
fwopts[2] = mcp->mb[2];
fwopts[3] = mcp->mb[3];
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__);
}
return rval;
@@ -612,7 +604,7 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
mcp->mb[1] = fwopts[1];
@@ -636,11 +628,11 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("%s(%ld): failed=%x (%x/%x).\n", __func__,
- vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1030,
+ "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
} else {
/*EMPTY*/
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__);
}
return rval;
@@ -668,7 +660,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
mcp->mb[1] = 0xAAAA;
@@ -695,12 +687,10 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("qla2x00_mbx_reg_test(%ld): failed=%x.\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- DEBUG11(printk("qla2x00_mbx_reg_test(%ld): done.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__);
}
return rval;
@@ -728,7 +718,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_VERIFY_CHECKSUM;
mcp->out_mb = MBX_0;
@@ -749,11 +739,11 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__,
- vha->host_no, rval, IS_FWI2_CAPABLE(vha->hw) ?
- (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1036,
+ "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
+ (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__);
}
return rval;
@@ -785,6 +775,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__);
+
mcp->mb[0] = MBC_IOCB_COMMAND_A64;
mcp->mb[1] = 0;
mcp->mb[2] = MSW(phys_addr);
@@ -799,14 +791,14 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
} else {
sts_entry_t *sts_entry = (sts_entry_t *) buffer;
/* Mask reserved bits. */
sts_entry->entry_status &=
IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
+ ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__);
}
return rval;
@@ -847,7 +839,7 @@ qla2x00_abort_command(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = vha->req;
- DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -876,11 +868,9 @@ qla2x00_abort_command(srb_t *sp)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
} else {
- DEBUG11(printk("qla2x00_abort_command(%ld): done.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__);
}
return rval;
@@ -896,10 +886,11 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
struct req_que *req;
struct rsp_que *rsp;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
-
l = l;
vha = fcport->vha;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__);
+
req = vha->hw->req_q_map[0];
rsp = req->rsp;
mcp->mb[0] = MBC_ABORT_TARGET;
@@ -919,18 +910,17 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval);
}
/* Issue marker IOCB. */
rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
MK_SYNC_ID);
if (rval2 != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
- "(%x).\n", __func__, vha->host_no, rval2));
+ ql_dbg(ql_dbg_mbx, vha, 0x1040,
+ "Failed to issue marker IOCB (%x).\n", rval2);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__);
}
return rval;
@@ -946,9 +936,10 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
struct req_que *req;
struct rsp_que *rsp;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
-
vha = fcport->vha;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__);
+
req = vha->hw->req_q_map[0];
rsp = req->rsp;
mcp->mb[0] = MBC_LUN_RESET;
@@ -966,18 +957,17 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
}
/* Issue marker IOCB. */
rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
MK_SYNC_ID_LUN);
if (rval2 != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
- "(%x).\n", __func__, vha->host_no, rval2));
+ ql_dbg(ql_dbg_mbx, vha, 0x1044,
+ "Failed to issue marker IOCB (%x).\n", rval2);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__);
}
return rval;
@@ -1011,8 +1001,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("qla2x00_get_adapter_id(%ld): entered.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
mcp->mb[9] = vha->vp_idx;
@@ -1038,11 +1027,9 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
} else {
- DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__);
if (IS_QLA8XXX_TYPE(vha->hw)) {
vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
@@ -1083,8 +1070,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("qla2x00_get_retry_cnt(%ld): entered.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_RETRY_COUNT;
mcp->out_mb = MBX_0;
@@ -1095,8 +1081,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("qla2x00_get_retry_cnt(%ld): failed = %x.\n",
- vha->host_no, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x104a,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
/* Convert returned data and check our values. */
*r_a_tov = mcp->mb[3] / 2;
@@ -1107,8 +1093,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
*tov = ratov;
}
- DEBUG11(printk("qla2x00_get_retry_cnt(%ld): done. mb3=%d "
- "ratov=%d.\n", vha->host_no, mcp->mb[3], ratov));
+ ql_dbg(ql_dbg_mbx, vha, 0x104b,
+ "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
}
return rval;
@@ -1139,8 +1125,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__);
if (IS_QLA82XX(ha) && ql2xdbwr)
qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
@@ -1174,13 +1159,11 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("qla2x00_init_firmware(%ld): failed=%x "
- "mb0=%x.\n",
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x104d,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
/*EMPTY*/
- DEBUG11(printk("qla2x00_init_firmware(%ld): done.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__);
}
return rval;
@@ -1213,13 +1196,13 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
dma_addr_t pd_dma;
struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__);
pd24 = NULL;
pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
if (pd == NULL) {
- DEBUG2_3(printk("%s(%ld): failed to allocate Port Database "
- "structure.\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x1050,
+ "Failed to allocate port database structure.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
@@ -1261,12 +1244,10 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
/* Check for logged in state. */
if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
pd24->last_login_state != PDS_PRLI_COMPLETE) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "scsi(%ld): Unable to verify login-state (%x/%x) "
- " - portid=%02x%02x%02x.\n", vha->host_no,
- pd24->current_login_state, pd24->last_login_state,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa));
+ ql_dbg(ql_dbg_mbx, vha, 0x1051,
+ "Unable to verify login-state (%x/%x) for "
+ "loop_id %x.\n", pd24->current_login_state,
+ pd24->last_login_state, fcport->loop_id);
rval = QLA_FUNCTION_FAILED;
goto gpd_error_out;
}
@@ -1290,12 +1271,11 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
/* Check for logged in state. */
if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "scsi(%ld): Unable to verify login-state (%x/%x) "
- " - portid=%02x%02x%02x.\n", vha->host_no,
- pd->master_state, pd->slave_state,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa));
+ ql_dbg(ql_dbg_mbx, vha, 0x100a,
+ "Unable to verify login-state (%x/%x) - "
+ "portid=%02x%02x%02x.\n", pd->master_state,
+ pd->slave_state, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
rval = QLA_FUNCTION_FAILED;
goto gpd_error_out;
}
@@ -1325,10 +1305,11 @@ gpd_error_out:
dma_pool_free(ha->s_dma_pool, pd, pd_dma);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
- __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1052,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
+ mcp->mb[0], mcp->mb[1]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__);
}
return rval;
@@ -1357,8 +1338,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("qla2x00_get_firmware_state(%ld): entered.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
mcp->out_mb = MBX_0;
@@ -1381,12 +1361,10 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("qla2x00_get_firmware_state(%ld): "
- "failed=%x.\n", vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- DEBUG11(printk("qla2x00_get_firmware_state(%ld): done.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__);
}
return rval;
@@ -1418,8 +1396,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("qla2x00_get_port_name(%ld): entered.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_PORT_NAME;
mcp->mb[9] = vha->vp_idx;
@@ -1439,8 +1416,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("qla2x00_get_port_name(%ld): failed=%x.\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
} else {
if (name != NULL) {
/* This function returns name in big endian. */
@@ -1454,8 +1430,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
name[7] = LSB(mcp->mb[7]);
}
- DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__);
}
return rval;
@@ -1483,7 +1458,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__);
if (IS_QLA8XXX_TYPE(vha->hw)) {
/* Logout across all FCFs. */
@@ -1517,11 +1492,10 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n",
- __func__, vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__);
}
return rval;
@@ -1553,12 +1527,11 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("qla2x00_send_sns(%ld): entered.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__);
- DEBUG11(printk("qla2x00_send_sns: retry cnt=%d ratov=%d total "
- "tov=%d.\n", vha->hw->retry_count, vha->hw->login_timeout,
- mcp->tov));
+ ql_dbg(ql_dbg_mbx, vha, 0x105e,
+ "Retry cnt=%d ratov=%d total tov=%d.\n",
+ vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
mcp->mb[0] = MBC_SEND_SNS_COMMAND;
mcp->mb[1] = cmd_size;
@@ -1575,13 +1548,12 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x "
- "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
- DEBUG2_3_11(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x "
- "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x105f,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
} else {
/*EMPTY*/
- DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__);
}
return rval;
@@ -1600,7 +1572,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
struct req_que *req;
struct rsp_que *rsp;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__);
if (ha->flags.cpu_affinity_enabled)
req = ha->req_q_map[0];
@@ -1610,8 +1582,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
if (lg == NULL) {
- DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x1062,
+ "Failed to allocate login IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1631,21 +1603,21 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
lg->vp_index = vha->vp_idx;
rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB "
- "(%x).\n", __func__, vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1063,
+ "Failed to issue login IOCB (%x).\n", rval);
} else if (lg->entry_status != 0) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, vha->host_no,
- lg->entry_status));
+ ql_dbg(ql_dbg_mbx, vha, 0x1064,
+ "Failed to complete IOCB -- error status (%x).\n",
+ lg->entry_status);
rval = QLA_FUNCTION_FAILED;
} else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
iop[0] = le32_to_cpu(lg->io_parameter[0]);
iop[1] = le32_to_cpu(lg->io_parameter[1]);
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- completion status (%x) ioparam=%x/%x.\n", __func__,
- vha->host_no, le16_to_cpu(lg->comp_status), iop[0],
- iop[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1065,
+ "Failed to complete IOCB -- completion status (%x) "
+ "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
+ iop[0], iop[1]);
switch (iop[0]) {
case LSC_SCODE_PORTID_USED:
@@ -1673,7 +1645,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
break;
}
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__);
iop[0] = le32_to_cpu(lg->io_parameter[0]);
@@ -1728,7 +1700,7 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1771,13 +1743,12 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
rval = QLA_SUCCESS;
/*EMPTY*/
- DEBUG2_3_11(printk("qla2x00_login_fabric(%ld): failed=%x "
- "mb[0]=%x mb[1]=%x mb[2]=%x.\n", vha->host_no, rval,
- mcp->mb[0], mcp->mb[1], mcp->mb[2]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1068,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
} else {
/*EMPTY*/
- DEBUG11(printk("qla2x00_login_fabric(%ld): done.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__);
}
return rval;
@@ -1808,13 +1779,13 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
+ ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__);
+
if (IS_FWI2_CAPABLE(ha))
return qla24xx_login_fabric(vha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa, mb_ret, opt);
- DEBUG3(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
if (HAS_EXTENDED_IDS(ha))
mcp->mb[1] = fcport->loop_id;
@@ -1845,15 +1816,12 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
rval = QLA_SUCCESS;
- DEBUG(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
- "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval,
- mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
- DEBUG2_3(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
- "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval,
- mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
+ ql_dbg(ql_dbg_mbx, vha, 0x106b,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
} else {
/*EMPTY*/
- DEBUG3(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__);
}
return (rval);
@@ -1870,12 +1838,12 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
struct req_que *req;
struct rsp_que *rsp;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__);
lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
if (lg == NULL) {
- DEBUG2_3(printk("%s(%ld): failed to allocate Logout IOCB.\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x106e,
+ "Failed to allocate logout IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1899,22 +1867,22 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB "
- "(%x).\n", __func__, vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x106f,
+ "Failed to issue logout IOCB (%x).\n", rval);
} else if (lg->entry_status != 0) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, vha->host_no,
- lg->entry_status));
+ ql_dbg(ql_dbg_mbx, vha, 0x1070,
+ "Failed to complete IOCB -- error status (%x).\n",
+ lg->entry_status);
rval = QLA_FUNCTION_FAILED;
} else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
- DEBUG2_3_11(printk("%s(%ld %d): failed to complete IOCB "
- "-- completion status (%x) ioparam=%x/%x.\n", __func__,
- vha->host_no, vha->vp_idx, le16_to_cpu(lg->comp_status),
+ ql_dbg(ql_dbg_mbx, vha, 0x1071,
+ "Failed to complete IOCB -- completion status (%x) "
+ "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
le32_to_cpu(lg->io_parameter[0]),
- le32_to_cpu(lg->io_parameter[1])));
+ le32_to_cpu(lg->io_parameter[1]));
} else {
/*EMPTY*/
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__);
}
dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1946,8 +1914,7 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("qla2x00_fabric_logout(%ld): entered.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
mcp->out_mb = MBX_1|MBX_0;
@@ -1966,12 +1933,11 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("qla2x00_fabric_logout(%ld): failed=%x "
- "mbx1=%x.\n", vha->host_no, rval, mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1074,
+ "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
} else {
/*EMPTY*/
- DEBUG11(printk("qla2x00_fabric_logout(%ld): done.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__);
}
return rval;
@@ -1999,8 +1965,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
@@ -2014,12 +1979,10 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("qla2x00_full_login_lip(%ld): failed=%x.\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- DEBUG11(printk("qla2x00_full_login_lip(%ld): done.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__);
}
return rval;
@@ -2045,8 +2008,7 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("qla2x00_get_id_list(%ld): entered.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__);
if (id_list == NULL)
return QLA_FUNCTION_FAILED;
@@ -2075,12 +2037,10 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("qla2x00_get_id_list(%ld): failed=%x.\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
} else {
*entries = mcp->mb[1];
- DEBUG11(printk("qla2x00_get_id_list(%ld): done.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__);
}
return rval;
@@ -2108,7 +2068,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
mcp->out_mb = MBX_0;
@@ -2121,14 +2081,14 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("%s(%ld): failed = %x.\n", __func__,
- vha->host_no, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x107d,
+ "Failed mb[0]=%x.\n", mcp->mb[0]);
} else {
- DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x "
- "mb7=%x mb10=%x mb11=%x mb12=%x.\n", __func__,
- vha->host_no, mcp->mb[1], mcp->mb[2], mcp->mb[3],
- mcp->mb[6], mcp->mb[7], mcp->mb[10], mcp->mb[11],
- mcp->mb[12]));
+ ql_dbg(ql_dbg_mbx, vha, 0x107e,
+ "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
+ "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
+ mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
+ mcp->mb[11], mcp->mb[12]);
if (cur_xchg_cnt)
*cur_xchg_cnt = mcp->mb[3];
@@ -2147,7 +2107,6 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
return (rval);
}
-#if defined(QL_DEBUG_LEVEL_3)
/*
* qla2x00_get_fcal_position_map
* Get FCAL (LILP) position map using mailbox command
@@ -2172,10 +2131,12 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
dma_addr_t pmap_dma;
struct qla_hw_data *ha = vha->hw;
+ ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__);
+
pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
if (pmap == NULL) {
- DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x1080,
+ "Memory alloc failed.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
memset(pmap, 0, FCAL_MAP_SIZE);
@@ -2193,10 +2154,11 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval == QLA_SUCCESS) {
- DEBUG11(printk("%s(%ld): (mb0=%x/mb1=%x) FC/AL Position Map "
- "size (%x)\n", __func__, vha->host_no, mcp->mb[0],
- mcp->mb[1], (unsigned)pmap[0]));
- DEBUG11(qla2x00_dump_buffer(pmap, pmap[0] + 1));
+ ql_dbg(ql_dbg_mbx, vha, 0x1081,
+ "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
+ mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
+ pmap, pmap[0] + 1);
if (pos_map)
memcpy(pos_map, pmap, FCAL_MAP_SIZE);
@@ -2204,15 +2166,13 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__);
}
return rval;
}
-#endif
/*
* qla2x00_get_link_status
@@ -2237,7 +2197,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
uint32_t *siter, *diter, dwords;
struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_LINK_STATUS;
mcp->mb[2] = MSW(stats_dma);
@@ -2266,11 +2226,12 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
if (rval == QLA_SUCCESS) {
if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
- DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
- __func__, vha->host_no, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1085,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
rval = QLA_FUNCTION_FAILED;
} else {
/* Copy over data -- firmware data is LE. */
+ ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__);
dwords = offsetof(struct link_statistics, unused1) / 4;
siter = diter = &stats->link_fail_cnt;
while (dwords--)
@@ -2278,8 +2239,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
}
} else {
/* Failed. */
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
}
return rval;
@@ -2294,7 +2254,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
mbx_cmd_t *mcp = &mc;
uint32_t *siter, *diter, dwords;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
mcp->mb[2] = MSW(stats_dma);
@@ -2312,10 +2272,11 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
if (rval == QLA_SUCCESS) {
if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
- DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
- __func__, vha->host_no, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1089,
+ "Failed mb[0]=%x.\n", mcp->mb[0]);
rval = QLA_FUNCTION_FAILED;
} else {
+ ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__);
/* Copy over data -- firmware data is LE. */
dwords = sizeof(struct link_statistics) / 4;
siter = diter = &stats->link_fail_cnt;
@@ -2324,8 +2285,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
}
} else {
/* Failed. */
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
}
return rval;
@@ -2345,7 +2305,7 @@ qla24xx_abort_command(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = vha->req;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__);
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -2360,8 +2320,8 @@ qla24xx_abort_command(srb_t *sp)
abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
if (abt == NULL) {
- DEBUG2_3(printk("%s(%ld): failed to allocate Abort IOCB.\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x108d,
+ "Failed to allocate abort IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
memset(abt, 0, sizeof(struct abort_entry_24xx));
@@ -2380,20 +2340,20 @@ qla24xx_abort_command(srb_t *sp)
rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n",
- __func__, vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x108e,
+ "Failed to issue IOCB (%x).\n", rval);
} else if (abt->entry_status != 0) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, vha->host_no,
- abt->entry_status));
+ ql_dbg(ql_dbg_mbx, vha, 0x108f,
+ "Failed to complete IOCB -- error status (%x).\n",
+ abt->entry_status);
rval = QLA_FUNCTION_FAILED;
} else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- completion status (%x).\n", __func__, vha->host_no,
- le16_to_cpu(abt->nport_handle)));
+ ql_dbg(ql_dbg_mbx, vha, 0x1090,
+ "Failed to complete IOCB -- completion status (%x).\n",
+ le16_to_cpu(abt->nport_handle));
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__);
}
dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2421,19 +2381,20 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
struct req_que *req;
struct rsp_que *rsp;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
-
vha = fcport->vha;
ha = vha->hw;
req = vha->req;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__);
+
if (ha->flags.cpu_affinity_enabled)
rsp = ha->rsp_q_map[tag + 1];
else
rsp = req->rsp;
tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
if (tsk == NULL) {
- DEBUG2_3(printk("%s(%ld): failed to allocate Task Management "
- "IOCB.\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x1093,
+ "Failed to allocate task management IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
@@ -2457,30 +2418,30 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
sts = &tsk->p.sts;
rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB "
- "(%x).\n", __func__, vha->host_no, name, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1094,
+ "Failed to issue %s reset IOCB (%x).\n", name, rval);
} else if (sts->entry_status != 0) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, vha->host_no,
- sts->entry_status));
+ ql_dbg(ql_dbg_mbx, vha, 0x1095,
+ "Failed to complete IOCB -- error status (%x).\n",
+ sts->entry_status);
rval = QLA_FUNCTION_FAILED;
} else if (sts->comp_status !=
__constant_cpu_to_le16(CS_COMPLETE)) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- completion status (%x).\n", __func__,
- vha->host_no, le16_to_cpu(sts->comp_status)));
+ ql_dbg(ql_dbg_mbx, vha, 0x1096,
+ "Failed to complete IOCB -- completion status (%x).\n",
+ le16_to_cpu(sts->comp_status));
rval = QLA_FUNCTION_FAILED;
} else if (le16_to_cpu(sts->scsi_status) &
SS_RESPONSE_INFO_LEN_VALID) {
if (le32_to_cpu(sts->rsp_data_len) < 4) {
- DEBUG2_3_11(printk("%s(%ld): ignoring inconsistent "
- "data length -- not enough response info (%d).\n",
- __func__, vha->host_no,
- le32_to_cpu(sts->rsp_data_len)));
+ ql_dbg(ql_dbg_mbx, vha, 0x1097,
+ "Ignoring inconsistent data length -- not enough "
+ "response info (%d).\n",
+ le32_to_cpu(sts->rsp_data_len));
} else if (sts->data[3]) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- response (%x).\n", __func__,
- vha->host_no, sts->data[3]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1098,
+ "Failed to complete IOCB -- response (%x).\n",
+ sts->data[3]);
rval = QLA_FUNCTION_FAILED;
}
}
@@ -2489,10 +2450,10 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
if (rval2 != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
- "(%x).\n", __func__, vha->host_no, rval2));
+ ql_dbg(ql_dbg_mbx, vha, 0x1099,
+ "Failed to issue marker IOCB (%x).\n", rval2);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__);
}
dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
@@ -2533,7 +2494,7 @@ qla2x00_system_error(scsi_qla_host_t *vha)
if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
mcp->out_mb = MBX_0;
@@ -2543,10 +2504,9 @@ qla2x00_system_error(scsi_qla_host_t *vha)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__);
}
return rval;
@@ -2566,7 +2526,7 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_SERDES_PARAMS;
mcp->mb[1] = BIT_0;
@@ -2581,11 +2541,11 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x109f,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
/*EMPTY*/
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__);
}
return rval;
@@ -2601,7 +2561,7 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_STOP_FIRMWARE;
mcp->out_mb = MBX_0;
@@ -2611,12 +2571,11 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
if (mcp->mb[0] == MBS_INVALID_COMMAND)
rval = QLA_INVALID_COMMAND;
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__);
}
return rval;
@@ -2630,14 +2589,14 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__);
+
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
if (unlikely(pci_channel_offline(vha->hw->pdev)))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_TRACE_CONTROL;
mcp->mb[1] = TC_EFT_ENABLE;
mcp->mb[2] = LSW(eft_dma);
@@ -2652,10 +2611,11 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
- __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10a5,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__);
}
return rval;
@@ -2668,14 +2628,14 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__);
+
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
if (unlikely(pci_channel_offline(vha->hw->pdev)))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_TRACE_CONTROL;
mcp->mb[1] = TC_EFT_DISABLE;
mcp->out_mb = MBX_1|MBX_0;
@@ -2684,10 +2644,11 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
- __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10a8,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__);
}
return rval;
@@ -2701,14 +2662,14 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__);
+
if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
return QLA_FUNCTION_FAILED;
if (unlikely(pci_channel_offline(vha->hw->pdev)))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_TRACE_CONTROL;
mcp->mb[1] = TC_FCE_ENABLE;
mcp->mb[2] = LSW(fce_dma);
@@ -2727,10 +2688,11 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
- __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ab,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__);
if (mb)
memcpy(mb, mcp->mb, 8 * sizeof(*mb));
@@ -2748,14 +2710,14 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__);
+
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
if (unlikely(pci_channel_offline(vha->hw->pdev)))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_TRACE_CONTROL;
mcp->mb[1] = TC_FCE_DISABLE;
mcp->mb[2] = TC_FCE_DISABLE_TRACE;
@@ -2766,10 +2728,11 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
- __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ae,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__);
if (wr)
*wr = (uint64_t) mcp->mb[5] << 48 |
@@ -2794,11 +2757,11 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__);
+
if (!IS_IIDMA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_PORT_PARAMS;
mcp->mb[1] = loop_id;
mcp->mb[2] = mcp->mb[3] = 0;
@@ -2817,10 +2780,9 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
}
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__);
if (port_speed)
*port_speed = mcp->mb[3];
}
@@ -2836,11 +2798,11 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__);
+
if (!IS_IIDMA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_PORT_PARAMS;
mcp->mb[1] = loop_id;
mcp->mb[2] = BIT_0;
@@ -2863,10 +2825,9 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
}
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__);
}
return rval;
@@ -2882,33 +2843,36 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
scsi_qla_host_t *vp;
unsigned long flags;
+ ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__);
+
if (rptid_entry->entry_status != 0)
return;
if (rptid_entry->format == 0) {
- DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d,"
- " number of VPs acquired %d\n", __func__, vha->host_no,
- MSB(le16_to_cpu(rptid_entry->vp_count)),
- LSB(le16_to_cpu(rptid_entry->vp_count))));
- DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__,
- rptid_entry->port_id[2], rptid_entry->port_id[1],
- rptid_entry->port_id[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10b7,
+ "Format 0 : Number of VPs setup %d, number of "
+ "VPs acquired %d.\n",
+ MSB(le16_to_cpu(rptid_entry->vp_count)),
+ LSB(le16_to_cpu(rptid_entry->vp_count)));
+ ql_dbg(ql_dbg_mbx, vha, 0x10b8,
+ "Primary port id %02x%02x%02x.\n",
+ rptid_entry->port_id[2], rptid_entry->port_id[1],
+ rptid_entry->port_id[0]);
} else if (rptid_entry->format == 1) {
vp_idx = LSB(stat);
- DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled "
- "- status %d - "
- "with port id %02x%02x%02x\n", __func__, vha->host_no,
- vp_idx, MSB(stat),
+ ql_dbg(ql_dbg_mbx, vha, 0x10b9,
+ "Format 1: VP[%d] enabled - status %d - with "
+ "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
rptid_entry->port_id[2], rptid_entry->port_id[1],
- rptid_entry->port_id[0]));
+ rptid_entry->port_id[0]);
vp = vha;
if (vp_idx == 0 && (MSB(stat) != 1))
goto reg_needed;
if (MSB(stat) == 1) {
- DEBUG2(printk("scsi(%ld): Could not acquire ID for "
- "VP[%d].\n", vha->host_no, vp_idx));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ba,
+ "Could not acquire ID for VP[%d].\n", vp_idx);
return;
}
@@ -2963,10 +2927,12 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
/* This can be called by the parent */
+ ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__);
+
vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
if (!vpmod) {
- DEBUG2_3(printk("%s(%ld): failed to allocate Modify VP "
- "IOCB.\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x10bc,
+ "Failed to allocate modify VP IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
@@ -2983,22 +2949,21 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed to issue VP config IOCB"
- "(%x).\n", __func__, base_vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x10bd,
+ "Failed to issue VP config IOCB (%x).\n", rval);
} else if (vpmod->comp_status != 0) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, base_vha->host_no,
- vpmod->comp_status));
+ ql_dbg(ql_dbg_mbx, vha, 0x10be,
+ "Failed to complete IOCB -- error status (%x).\n",
+ vpmod->comp_status);
rval = QLA_FUNCTION_FAILED;
} else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- completion status (%x).\n", __func__, base_vha->host_no,
- le16_to_cpu(vpmod->comp_status)));
+ ql_dbg(ql_dbg_mbx, vha, 0x10bf,
+ "Failed to complete IOCB -- completion status (%x).\n",
+ le16_to_cpu(vpmod->comp_status));
rval = QLA_FUNCTION_FAILED;
} else {
/* EMPTY */
- DEBUG11(printk("%s(%ld): done.\n", __func__,
- base_vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__);
fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
}
dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
@@ -3032,17 +2997,16 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
int vp_index = vha->vp_idx;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__,
- vha->host_no, vp_index));
+ ql_dbg(ql_dbg_mbx, vha, 0x10c1,
+ "Entered %s enabling index %d.\n", __func__, vp_index);
if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
return QLA_PARAMETER_ERROR;
vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
if (!vce) {
- DEBUG2_3(printk("%s(%ld): "
- "failed to allocate VP Control IOCB.\n", __func__,
- base_vha->host_no));
+ ql_log(ql_log_warn, vha, 0x10c2,
+ "Failed to allocate VP control IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
@@ -3063,28 +3027,20 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed to issue VP control IOCB"
- "(%x).\n", __func__, base_vha->host_no, rval));
- printk("%s(%ld): failed to issue VP control IOCB"
- "(%x).\n", __func__, base_vha->host_no, rval);
+ ql_dbg(ql_dbg_mbx, vha, 0x10c3,
+ "Failed to issue VP control IOCB (%x).\n", rval);
} else if (vce->entry_status != 0) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, base_vha->host_no,
- vce->entry_status));
- printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, base_vha->host_no,
+ ql_dbg(ql_dbg_mbx, vha, 0x10c4,
+ "Failed to complete IOCB -- error status (%x).\n",
vce->entry_status);
rval = QLA_FUNCTION_FAILED;
} else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- completion status (%x).\n", __func__, base_vha->host_no,
- le16_to_cpu(vce->comp_status)));
- printk("%s(%ld): failed to complete IOCB "
- "-- completion status (%x).\n", __func__, base_vha->host_no,
+ ql_dbg(ql_dbg_mbx, vha, 0x10c5,
+ "Failed to complet IOCB -- completion status (%x).\n",
le16_to_cpu(vce->comp_status));
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG2(printk("%s(%ld): done.\n", __func__, base_vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__);
}
dma_pool_free(ha->s_dma_pool, vce, vce_dma);
@@ -3121,6 +3077,8 @@ qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__);
+
/*
* This command is implicitly executed by firmware during login for the
* physical hosts
@@ -3155,7 +3113,7 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__);
if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
@@ -3186,10 +3144,10 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1008,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__);
}
return rval;
@@ -3214,12 +3172,10 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
unsigned long flags;
struct qla_hw_data *ha = vha->hw;
- DEBUG16(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__);
mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
if (mn == NULL) {
- DEBUG2_3(printk("%s(%ld): failed to allocate Verify ISP84XX "
- "IOCB.\n", __func__, vha->host_no));
return QLA_MEMORY_ALLOC_FAILED;
}
@@ -3237,43 +3193,43 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
mn->p.req.entry_count = 1;
mn->p.req.options = cpu_to_le16(options);
- DEBUG16(printk("%s(%ld): Dump of Verify Request.\n", __func__,
- vha->host_no));
- DEBUG16(qla2x00_dump_buffer((uint8_t *)mn,
- sizeof(*mn)));
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
+ "Dump of Verify Request.\n");
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
+ (uint8_t *)mn, sizeof(*mn));
rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
if (rval != QLA_SUCCESS) {
- DEBUG2_16(printk("%s(%ld): failed to issue Verify "
- "IOCB (%x).\n", __func__, vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x10cb,
+ "Failed to issue verify IOCB (%x).\n", rval);
goto verify_done;
}
- DEBUG16(printk("%s(%ld): Dump of Verify Response.\n", __func__,
- vha->host_no));
- DEBUG16(qla2x00_dump_buffer((uint8_t *)mn,
- sizeof(*mn)));
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
+ "Dump of Verify Response.\n");
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
+ (uint8_t *)mn, sizeof(*mn));
status[0] = le16_to_cpu(mn->p.rsp.comp_status);
status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
le16_to_cpu(mn->p.rsp.failure_code) : 0;
- DEBUG2_16(printk("%s(%ld): cs=%x fc=%x\n", __func__,
- vha->host_no, status[0], status[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ce,
+ "cs=%x fc=%x.\n", status[0], status[1]);
if (status[0] != CS_COMPLETE) {
rval = QLA_FUNCTION_FAILED;
if (!(options & VCO_DONT_UPDATE_FW)) {
- DEBUG2_16(printk("%s(%ld): Firmware update "
- "failed. Retrying without update "
- "firmware.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10cf,
+ "Firmware update failed. Retrying "
+ "without update firmware.\n");
options |= VCO_DONT_UPDATE_FW;
options &= ~VCO_FORCE_UPDATE;
retry = 1;
}
} else {
- DEBUG2_16(printk("%s(%ld): firmware updated to %x.\n",
- __func__, vha->host_no,
- le32_to_cpu(mn->p.rsp.fw_ver)));
+ ql_dbg(ql_dbg_mbx, vha, 0x10d0,
+ "Firmware updated to %x.\n",
+ le32_to_cpu(mn->p.rsp.fw_ver));
/* NOTE: we only update OP firmware. */
spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
@@ -3288,10 +3244,9 @@ verify_done:
dma_pool_free(ha->s_dma_pool, mn, mn_dma);
if (rval != QLA_SUCCESS) {
- DEBUG2_16(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval);
} else {
- DEBUG16(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__);
}
return rval;
@@ -3307,6 +3262,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
struct device_reg_25xxmq __iomem *reg;
struct qla_hw_data *ha = vha->hw;
+ ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__);
+
mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
mcp->mb[1] = req->options;
mcp->mb[2] = MSW(LSD(req->dma));
@@ -3344,9 +3301,13 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
rval = qla2x00_mailbox_command(vha, mcp);
- if (rval != QLA_SUCCESS)
- DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x mb0=%x.\n",
- __func__, vha->host_no, rval, mcp->mb[0]));
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10d4,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__);
+ }
+
return rval;
}
@@ -3360,6 +3321,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
struct device_reg_25xxmq __iomem *reg;
struct qla_hw_data *ha = vha->hw;
+ ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__);
+
mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
mcp->mb[1] = rsp->options;
mcp->mb[2] = MSW(LSD(rsp->dma));
@@ -3393,10 +3356,13 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
rval = qla2x00_mailbox_command(vha, mcp);
- if (rval != QLA_SUCCESS)
- DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x "
- "mb0=%x.\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10d7,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__);
+ }
+
return rval;
}
@@ -3407,7 +3373,7 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_IDC_ACK;
memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
@@ -3418,10 +3384,10 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10da,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__);
}
return rval;
@@ -3434,11 +3400,11 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__);
+
if (!IS_QLA81XX(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
mcp->out_mb = MBX_1|MBX_0;
@@ -3448,10 +3414,11 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
- __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10dd,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__);
*sector_size = mcp->mb[1];
}
@@ -3468,7 +3435,7 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
if (!IS_QLA81XX(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
@@ -3480,10 +3447,11 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
- __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10e0,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__);
}
return rval;
@@ -3499,7 +3467,7 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
if (!IS_QLA81XX(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
@@ -3514,11 +3482,11 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
- "mb[2]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0],
- mcp->mb[1], mcp->mb[2]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10e3,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__);
}
return rval;
@@ -3531,7 +3499,7 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_RESTART_MPI_FW;
mcp->out_mb = MBX_0;
@@ -3541,10 +3509,11 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n",
- __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10e6,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__);
}
return rval;
@@ -3559,11 +3528,11 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
+ ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__);
+
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
if (len == 1)
opt |= BIT_0;
@@ -3586,10 +3555,10 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
*sfp = mcp->mb[1];
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10e9,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__);
}
return rval;
@@ -3604,11 +3573,11 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
+ ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__);
+
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
if (len == 1)
opt |= BIT_0;
@@ -3631,10 +3600,10 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ec,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__);
}
return rval;
@@ -3648,11 +3617,11 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__);
+
if (!IS_QLA8XXX_TYPE(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_GET_XGMAC_STATS;
mcp->mb[2] = MSW(stats_dma);
mcp->mb[3] = LSW(stats_dma);
@@ -3666,11 +3635,12 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x "
- "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval,
- mcp->mb[0], mcp->mb[1], mcp->mb[2]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ef,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__);
+
*actual_size = mcp->mb[2] << 2;
}
@@ -3686,11 +3656,11 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__);
+
if (!IS_QLA8XXX_TYPE(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_GET_DCBX_PARAMS;
mcp->mb[1] = 0;
mcp->mb[2] = MSW(tlv_dma);
@@ -3705,11 +3675,11 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x "
- "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval,
- mcp->mb[0], mcp->mb[1], mcp->mb[2]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10f2,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__);
}
return rval;
@@ -3722,11 +3692,11 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__);
+
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_READ_RAM_EXTENDED;
mcp->mb[1] = LSW(risc_addr);
mcp->mb[8] = MSW(risc_addr);
@@ -3736,10 +3706,10 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10f5,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__);
*data = mcp->mb[3] << 16 | mcp->mb[2];
}
@@ -3755,7 +3725,7 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
mbx_cmd_t *mcp = &mc;
uint32_t iter_cnt = 0x1;
- DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__);
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
@@ -3794,15 +3764,12 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2(printk(KERN_WARNING
- "(%ld): failed=%x mb[0]=0x%x "
- "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x "
- "mb[19]=0x%x.\n",
- vha->host_no, rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
- mcp->mb[3], mcp->mb[18], mcp->mb[19]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10f8,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
+ "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
+ mcp->mb[3], mcp->mb[18], mcp->mb[19]);
} else {
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld): done.\n", vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__);
}
/* Copy mailbox information */
@@ -3819,7 +3786,7 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__);
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
@@ -3858,12 +3825,11 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2(printk(KERN_WARNING
- "(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n",
- vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10fb,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
} else {
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld): done.\n", vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__);
}
/* Copy mailbox information */
@@ -3872,14 +3838,14 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
}
int
-qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic)
+qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG16(printk("%s(%ld): enable_diag=%d entered.\n", __func__,
- ha->host_no, enable_diagnostic));
+ ql_dbg(ql_dbg_mbx, vha, 0x10fd,
+ "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
mcp->mb[0] = MBC_ISP84XX_RESET;
mcp->mb[1] = enable_diagnostic;
@@ -3887,13 +3853,12 @@ qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic)
mcp->in_mb = MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS)
- DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no,
- rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
else
- DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__);
return rval;
}
@@ -3905,11 +3870,11 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__);
+
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
mcp->mb[1] = LSW(risc_addr);
mcp->mb[2] = LSW(data);
@@ -3921,10 +3886,10 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1101,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__);
}
return rval;
@@ -3941,8 +3906,7 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
rval = QLA_SUCCESS;
- DEBUG11(qla_printk(KERN_INFO, ha,
- "%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
@@ -3982,11 +3946,10 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
rval = QLA_FUNCTION_FAILED;
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n",
- __func__, vha->host_no, rval, mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1104,
+ "Failed=%x mb[0]=%x.\n", rval, mb[0]);
} else {
- DEBUG11(printk(KERN_INFO
- "%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__);
}
return rval;
@@ -3999,12 +3962,11 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
+ ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__);
+
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
- DEBUG11(qla_printk(KERN_INFO, ha,
- "%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_DATA_RATE;
mcp->mb[1] = 0;
mcp->out_mb = MBX_1|MBX_0;
@@ -4013,11 +3975,10 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n",
- __func__, vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1107,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- DEBUG11(printk(KERN_INFO
- "%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__);
if (mcp->mb[1] != 0x7)
ha->link_data_rate = mcp->mb[1];
}
@@ -4033,8 +3994,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk(KERN_INFO
- "%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__);
if (!IS_QLA81XX(ha))
return QLA_FUNCTION_FAILED;
@@ -4047,15 +4007,13 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk(KERN_WARNING
- "%s(%ld): failed=%x (%x).\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x110a,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
/* Copy all bits to preserve original value */
memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
- DEBUG11(printk(KERN_INFO
- "%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__);
}
return rval;
}
@@ -4067,8 +4025,7 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk(KERN_INFO
- "%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_SET_PORT_CONFIG;
/* Copy all bits to preserve original setting */
@@ -4080,12 +4037,10 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk(KERN_WARNING
- "%s(%ld): failed=%x (%x).\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x110d,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else
- DEBUG11(printk(KERN_INFO
- "%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__);
return rval;
}
@@ -4100,12 +4055,11 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
+ ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__);
+
if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk(KERN_INFO
- "%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_PORT_PARAMS;
mcp->mb[1] = loop_id;
if (ha->flags.fcp_prio_enabled)
@@ -4127,12 +4081,9 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
}
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk(KERN_WARNING
- "%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
} else {
- DEBUG11(printk(KERN_INFO
- "%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__);
}
return rval;
@@ -4145,13 +4096,12 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
uint8_t byte;
struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__);
/* Integer part */
rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk(KERN_WARNING
- "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval);
ha->flags.thermal_supported = 0;
goto fail;
}
@@ -4160,14 +4110,13 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
/* Fraction part */
rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk(KERN_WARNING
- "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval);
ha->flags.thermal_supported = 0;
goto fail;
}
*frac = (byte >> 6) * 25;
- DEBUG11(printk(KERN_INFO "%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__);
fail:
return rval;
}
@@ -4180,12 +4129,11 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__);
+
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
- DEBUG11(qla_printk(KERN_INFO, ha,
- "%s(%ld): entered.\n", __func__, vha->host_no));
-
memset(mcp, 0, sizeof(mbx_cmd_t));
mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
mcp->mb[1] = 1;
@@ -4197,12 +4145,10 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(qla_printk(KERN_WARNING, ha,
- "%s(%ld): failed=%x mb[0]=%x.\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1016,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- DEBUG11(qla_printk(KERN_INFO, ha,
- "%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__);
}
return rval;
@@ -4216,12 +4162,11 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__);
+
if (!IS_QLA82XX(ha))
return QLA_FUNCTION_FAILED;
- DEBUG11(qla_printk(KERN_INFO, ha,
- "%s(%ld): entered.\n", __func__, vha->host_no));
-
memset(mcp, 0, sizeof(mbx_cmd_t));
mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
mcp->mb[1] = 0;
@@ -4233,12 +4178,10 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(qla_printk(KERN_WARNING, ha,
- "%s(%ld): failed=%x mb[0]=%x.\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x100c,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- DEBUG11(qla_printk(KERN_INFO, ha,
- "%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__);
}
return rval;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 5e34391..f488cc6 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -36,8 +36,9 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
mutex_lock(&ha->vport_lock);
vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
if (vp_id > ha->max_npiv_vports) {
- DEBUG15(printk ("vp_id %d is bigger than max-supported %d.\n",
- vp_id, ha->max_npiv_vports));
+ ql_dbg(ql_dbg_vport, vha, 0xa000,
+ "vp_id %d is bigger than max-supported %d.\n",
+ vp_id, ha->max_npiv_vports);
mutex_unlock(&ha->vport_lock);
return vp_id;
}
@@ -131,9 +132,9 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
fc_port_t *fcport;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- DEBUG15(printk("scsi(%ld): Marking port dead, "
- "loop_id=0x%04x :%x\n",
- vha->host_no, fcport->loop_id, fcport->vp_idx));
+ ql_dbg(ql_dbg_vport, vha, 0xa001,
+ "Marking port dead, loop_id=0x%04x : %x.\n",
+ fcport->loop_id, fcport->vp_idx);
qla2x00_mark_device_lost(vha, fcport, 0, 0);
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
@@ -187,13 +188,13 @@ qla24xx_enable_vp(scsi_qla_host_t *vha)
goto enable_failed;
}
- DEBUG15(qla_printk(KERN_INFO, ha,
- "Virtual port with id: %d - Enabled\n", vha->vp_idx));
+ ql_dbg(ql_dbg_taskm, vha, 0x801a,
+ "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
return 0;
enable_failed:
- DEBUG15(qla_printk(KERN_INFO, ha,
- "Virtual port with id: %d - Disabled\n", vha->vp_idx));
+ ql_dbg(ql_dbg_taskm, vha, 0x801b,
+ "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
return 1;
}
@@ -205,12 +206,12 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
fc_vport = vha->fc_vport;
- DEBUG15(printk("scsi(%ld): %s: change request #3 for this host.\n",
- vha->host_no, __func__));
+ ql_dbg(ql_dbg_vport, vha, 0xa002,
+ "%s: change request #3.\n", __func__);
ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
if (ret != QLA_SUCCESS) {
- DEBUG15(qla_printk(KERN_ERR, vha->hw, "Failed to enable "
- "receiving of RSCN requests: 0x%x\n", ret));
+ ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
+ "receiving of RSCN requests: 0x%x.\n", ret);
return;
} else {
/* Corresponds to SCR enabled */
@@ -248,9 +249,9 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
case MBA_CHG_IN_CONNECTION:
case MBA_PORT_UPDATE:
case MBA_RSCN_UPDATE:
- DEBUG15(printk("scsi(%ld)%s: Async_event for"
- " VP[%d], mb = 0x%x, vha=%p\n",
- vha->host_no, __func__, i, *mb, vha));
+ ql_dbg(ql_dbg_async, vha, 0x5024,
+ "Async_event for VP[%d], mb=0x%x vha=%p.\n",
+ i, *mb, vha);
qla2x00_async_event(vha, rsp, mb);
break;
}
@@ -286,37 +287,49 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
- DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n",
- vha->host_no, vha->vp_idx));
+ ql_dbg(ql_dbg_taskm, vha, 0x801d,
+ "Scheduling enable of Vport %d.\n", vha->vp_idx);
return qla24xx_enable_vp(vha);
}
static int
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
+ ql_dbg(ql_dbg_dpc, vha, 0x4012,
+ "Entering %s.\n", __func__);
+ ql_dbg(ql_dbg_dpc, vha, 0x4013,
+ "vp_flags: 0x%lx.\n", vha->vp_flags);
+
qla2x00_do_work(vha);
if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
/* VP acquired. complete port configuration */
+ ql_dbg(ql_dbg_dpc, vha, 0x4014,
+ "Configure VP scheduled.\n");
qla24xx_configure_vp(vha);
+ ql_dbg(ql_dbg_dpc, vha, 0x4015,
+ "Configure VP end.\n");
return 0;
}
if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
+ ql_dbg(ql_dbg_dpc, vha, 0x4016,
+ "FCPort update scheduled.\n");
qla2x00_update_fcports(vha);
clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
+ ql_dbg(ql_dbg_dpc, vha, 0x4017,
+ "FCPort update end.\n");
}
if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
!test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
atomic_read(&vha->loop_state) != LOOP_DOWN) {
- DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
- vha->host_no));
+ ql_dbg(ql_dbg_dpc, vha, 0x4018,
+ "Relogin needed scheduled.\n");
qla2x00_relogin(vha);
-
- DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
- vha->host_no));
+ ql_dbg(ql_dbg_dpc, vha, 0x4019,
+ "Relogin needed end.\n");
}
if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
@@ -326,11 +339,17 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
+ ql_dbg(ql_dbg_dpc, vha, 0x401a,
+ "Loop resync scheduled.\n");
qla2x00_loop_resync(vha);
clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
+ ql_dbg(ql_dbg_dpc, vha, 0x401b,
+ "Loop resync end.\n");
}
}
+ ql_dbg(ql_dbg_dpc, vha, 0x401c,
+ "Exiting %s.\n", __func__);
return 0;
}
@@ -396,9 +415,10 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
/* Check up max-npiv-supports */
if (ha->num_vhosts > ha->max_npiv_vports) {
- DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than "
- "max_npv_vports %ud.\n", base_vha->host_no,
- ha->num_vhosts, ha->max_npiv_vports));
+ ql_dbg(ql_dbg_vport, vha, 0xa004,
+ "num_vhosts %ud is bigger "
+ "than max_npiv_vports %ud.\n",
+ ha->num_vhosts, ha->max_npiv_vports);
return VPCERR_UNSUPPORTED;
}
return 0;
@@ -415,7 +435,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
vha = qla2x00_create_host(sht, ha);
if (!vha) {
- DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n"));
+ ql_log(ql_log_warn, vha, 0xa005,
+ "scsi_host_alloc() failed for vport.\n");
return(NULL);
}
@@ -429,8 +450,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
vha->device_flags = 0;
vha->vp_idx = qla24xx_allocate_vp_id(vha);
if (vha->vp_idx > ha->max_npiv_vports) {
- DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_vport, vha, 0xa006,
+ "Couldn't allocate vp_id.\n");
goto create_vhost_failed;
}
vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
@@ -451,7 +472,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
host->can_queue = base_vha->req->length + 128;
host->this_id = 255;
host->cmd_per_lun = 3;
- if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif)
+ if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
host->max_cmd_len = 32;
else
host->max_cmd_len = MAX_CMDSZ;
@@ -461,8 +482,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
host->max_id = MAX_TARGETS_2200;
host->transportt = qla2xxx_transport_vport_template;
- DEBUG15(printk("DEBUG: detect vport hba %ld at address = %p\n",
- vha->host_no, vha));
+ ql_dbg(ql_dbg_vport, vha, 0xa007,
+ "Detect vport hba %ld at address = %p.\n",
+ vha->host_no, vha);
vha->flags.init_done = 1;
@@ -567,9 +589,9 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
if (req) {
ret = qla25xx_delete_req_que(vha, req);
if (ret != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
- "Couldn't delete req que %d\n",
- req->id);
+ ql_log(ql_log_warn, vha, 0x00ea,
+ "Couldn't delete req que %d.\n",
+ req->id);
return ret;
}
}
@@ -581,9 +603,9 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
if (rsp) {
ret = qla25xx_delete_rsp_que(vha, rsp);
if (ret != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
- "Couldn't delete rsp que %d\n",
- rsp->id);
+ ql_log(ql_log_warn, vha, 0x00eb,
+ "Couldn't delete rsp que %d.\n",
+ rsp->id);
return ret;
}
}
@@ -604,8 +626,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
if (req == NULL) {
- qla_printk(KERN_WARNING, ha, "could not allocate memory"
- "for request que\n");
+ ql_log(ql_log_fatal, base_vha, 0x00d9,
+ "Failed to allocate memory for request queue.\n");
goto failed;
}
@@ -614,8 +636,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
(req->length + 1) * sizeof(request_t),
&req->dma, GFP_KERNEL);
if (req->ring == NULL) {
- qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - request_ring\n");
+ ql_log(ql_log_fatal, base_vha, 0x00da,
+ "Failed to allocte memory for request_ring.\n");
goto que_failed;
}
@@ -623,8 +645,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
if (que_id >= ha->max_req_queues) {
mutex_unlock(&ha->vport_lock);
- qla_printk(KERN_INFO, ha, "No resources to create "
- "additional request queue\n");
+ ql_log(ql_log_warn, base_vha, 0x00db,
+ "No resources to create additional request queue.\n");
goto que_failed;
}
set_bit(que_id, ha->req_qid_map);
@@ -633,6 +655,12 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req->vp_idx = vp_idx;
req->qos = qos;
+ ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
+ "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
+ que_id, req->rid, req->vp_idx, req->qos);
+ ql_dbg(ql_dbg_init, base_vha, 0x00dc,
+ "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
+ que_id, req->rid, req->vp_idx, req->qos);
if (rsp_que < 0)
req->rsp = NULL;
else
@@ -645,6 +673,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
options |= BIT_5;
req->options = options;
+ ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
+ "options=0x%x.\n", req->options);
+ ql_dbg(ql_dbg_init, base_vha, 0x00dd,
+ "options=0x%x.\n", req->options);
for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
req->outstanding_cmds[cnt] = NULL;
req->current_outstanding_cmd = 1;
@@ -656,10 +688,21 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
reg = ISP_QUE_REG(ha, que_id);
req->max_q_depth = ha->req_q_map[0]->max_q_depth;
mutex_unlock(&ha->vport_lock);
+ ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
+ "ring_ptr=%p ring_index=%d, "
+ "cnt=%d id=%d max_q_depth=%d.\n",
+ req->ring_ptr, req->ring_index,
+ req->cnt, req->id, req->max_q_depth);
+ ql_dbg(ql_dbg_init, base_vha, 0x00de,
+ "ring_ptr=%p ring_index=%d, "
+ "cnt=%d id=%d max_q_depth=%d.\n",
+ req->ring_ptr, req->ring_index, req->cnt,
+ req->id, req->max_q_depth);
ret = qla25xx_init_req_que(base_vha, req);
if (ret != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
+ ql_log(ql_log_fatal, base_vha, 0x00df,
+ "%s failed.\n", __func__);
mutex_lock(&ha->vport_lock);
clear_bit(que_id, ha->req_qid_map);
mutex_unlock(&ha->vport_lock);
@@ -700,8 +743,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
if (rsp == NULL) {
- qla_printk(KERN_WARNING, ha, "could not allocate memory for"
- " response que\n");
+ ql_log(ql_log_warn, base_vha, 0x0066,
+ "Failed to allocate memory for response queue.\n");
goto failed;
}
@@ -710,8 +753,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
(rsp->length + 1) * sizeof(response_t),
&rsp->dma, GFP_KERNEL);
if (rsp->ring == NULL) {
- qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - response_ring\n");
+ ql_log(ql_log_warn, base_vha, 0x00e1,
+ "Failed to allocate memory for response ring.\n");
goto que_failed;
}
@@ -719,8 +762,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
if (que_id >= ha->max_rsp_queues) {
mutex_unlock(&ha->vport_lock);
- qla_printk(KERN_INFO, ha, "No resources to create "
- "additional response queue\n");
+ ql_log(ql_log_warn, base_vha, 0x00e2,
+ "No resources to create additional request queue.\n");
goto que_failed;
}
set_bit(que_id, ha->rsp_qid_map);
@@ -728,12 +771,16 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
if (ha->flags.msix_enabled)
rsp->msix = &ha->msix_entries[que_id + 1];
else
- qla_printk(KERN_WARNING, ha, "msix not enabled\n");
+ ql_log(ql_log_warn, base_vha, 0x00e3,
+ "MSIX not enalbled.\n");
ha->rsp_q_map[que_id] = rsp;
rsp->rid = rid;
rsp->vp_idx = vp_idx;
rsp->hw = ha;
+ ql_dbg(ql_dbg_init, base_vha, 0x00e4,
+ "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
+ que_id, rsp->rid, rsp->vp_idx, rsp->hw);
/* Use alternate PCI bus number */
if (MSB(rsp->rid))
options |= BIT_4;
@@ -750,6 +797,14 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
mutex_unlock(&ha->vport_lock);
+ ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
+ "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
+ rsp->options, rsp->id, rsp->rsp_q_in,
+ rsp->rsp_q_out);
+ ql_dbg(ql_dbg_init, base_vha, 0x00e5,
+ "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
+ rsp->options, rsp->id, rsp->rsp_q_in,
+ rsp->rsp_q_out);
ret = qla25xx_request_irq(rsp);
if (ret)
@@ -757,7 +812,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
ret = qla25xx_init_rsp_que(base_vha, rsp);
if (ret != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
+ ql_log(ql_log_fatal, base_vha, 0x00e7,
+ "%s failed.\n", __func__);
mutex_lock(&ha->vport_lock);
clear_bit(que_id, ha->rsp_qid_map);
mutex_unlock(&ha->vport_lock);
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index e1138bc..049807c 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -348,6 +348,7 @@ static void
qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
{
u32 win_read;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
ha->crb_win = CRB_HI(*off);
writel(ha->crb_win,
@@ -358,9 +359,10 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
*/
win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
if (win_read != ha->crb_win) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "%s: Written crbwin (0x%x) != Read crbwin (0x%x), "
- "off=0x%lx\n", __func__, ha->crb_win, win_read, *off));
+ ql_dbg(ql_dbg_p3p, vha, 0xb000,
+ "%s: Written crbwin (0x%x) "
+ "!= Read crbwin (0x%x), off=0x%lx.\n",
+ ha->crb_win, win_read, *off);
}
*off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
}
@@ -368,6 +370,7 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
static inline unsigned long
qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
/* See if we are currently pointing to the region we want to use next */
if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
/* No need to change window. PCIX and PCIEregs are in both
@@ -398,9 +401,10 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
return off;
}
/* strange address given */
- qla_printk(KERN_WARNING, ha,
- "%s: Warning: unm_nic_pci_set_crbwindow called with"
- " an unknown address(%llx)\n", QLA2XXX_DRIVER_NAME, off);
+ ql_dbg(ql_dbg_p3p, vha, 0xb001,
+ "%x: Warning: unm_nic_pci_set_crbwindow "
+ "called with an unknown address(%llx).\n",
+ QLA2XXX_DRIVER_NAME, off);
return off;
}
@@ -563,6 +567,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
{
int window;
u32 win_read;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
QLA82XX_ADDR_DDR_NET_MAX)) {
@@ -574,8 +579,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
win_read = qla82xx_rd_32(ha,
ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
if ((win_read << 17) != window) {
- qla_printk(KERN_WARNING, ha,
- "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n",
+ ql_dbg(ql_dbg_p3p, vha, 0xb003,
+ "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n",
__func__, window, win_read);
}
addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
@@ -583,7 +588,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
QLA82XX_ADDR_OCM0_MAX)) {
unsigned int temp1;
if ((addr & 0x00ff800) == 0xff800) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0xb004,
"%s: QM access not handled.\n", __func__);
addr = -1UL;
}
@@ -596,8 +601,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
temp1 = ((window & 0x1FF) << 7) |
((window & 0x0FFFE0000) >> 17);
if (win_read != temp1) {
- qla_printk(KERN_WARNING, ha,
- "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n",
+ ql_log(ql_log_warn, vha, 0xb005,
+ "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n",
__func__, temp1, win_read);
}
addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
@@ -612,8 +617,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
win_read = qla82xx_rd_32(ha,
ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
if (win_read != window) {
- qla_printk(KERN_WARNING, ha,
- "%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n",
+ ql_log(ql_log_warn, vha, 0xb006,
+ "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n",
__func__, window, win_read);
}
addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
@@ -624,9 +629,9 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
*/
if ((qla82xx_pci_set_window_warning_count++ < 8) ||
(qla82xx_pci_set_window_warning_count%64 == 0)) {
- qla_printk(KERN_WARNING, ha,
- "%s: Warning:%s Unknown address range!\n", __func__,
- QLA2XXX_DRIVER_NAME);
+ ql_log(ql_log_warn, vha, 0xb007,
+ "%s: Warning:%s Unknown address range!.\n",
+ __func__, QLA2XXX_DRIVER_NAME);
}
addr = -1UL;
}
@@ -671,6 +676,7 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
uint8_t *mem_ptr = NULL;
unsigned long mem_base;
unsigned long mem_page;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
write_lock_irqsave(&ha->hw_lock, flags);
@@ -682,9 +688,10 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
if ((start == -1UL) ||
(qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
write_unlock_irqrestore(&ha->hw_lock, flags);
- qla_printk(KERN_ERR, ha,
- "%s out of bound pci memory access. "
- "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
+ ql_log(ql_log_fatal, vha, 0xb008,
+ "%s out of bound pci memory "
+ "access, offset is 0x%llx.\n",
+ QLA2XXX_DRIVER_NAME, off);
return -1;
}
@@ -741,6 +748,7 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
uint8_t *mem_ptr = NULL;
unsigned long mem_base;
unsigned long mem_page;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
write_lock_irqsave(&ha->hw_lock, flags);
@@ -752,9 +760,10 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
if ((start == -1UL) ||
(qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
write_unlock_irqrestore(&ha->hw_lock, flags);
- qla_printk(KERN_ERR, ha,
- "%s out of bound pci memory access. "
- "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
+ ql_log(ql_log_fatal, vha, 0xb009,
+ "%s out of bount memory "
+ "access, offset is 0x%llx.\n",
+ QLA2XXX_DRIVER_NAME, off);
return -1;
}
@@ -855,15 +864,16 @@ qla82xx_wait_rom_busy(struct qla_hw_data *ha)
{
long timeout = 0;
long done = 0 ;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
while (done == 0) {
done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
done &= 4;
timeout++;
if (timeout >= rom_max_timeout) {
- DEBUG(qla_printk(KERN_INFO, ha,
- "%s: Timeout reached waiting for rom busy",
- QLA2XXX_DRIVER_NAME));
+ ql_dbg(ql_dbg_p3p, vha, 0xb00a,
+ "%s: Timeout reached waiting for rom busy.\n",
+ QLA2XXX_DRIVER_NAME);
return -1;
}
}
@@ -875,15 +885,16 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
{
long timeout = 0;
long done = 0 ;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
while (done == 0) {
done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
done &= 2;
timeout++;
if (timeout >= rom_max_timeout) {
- DEBUG(qla_printk(KERN_INFO, ha,
- "%s: Timeout reached waiting for rom done",
- QLA2XXX_DRIVER_NAME));
+ ql_dbg(ql_dbg_p3p, vha, 0xb00b,
+ "%s: Timeout reached waiting for rom done.\n",
+ QLA2XXX_DRIVER_NAME);
return -1;
}
}
@@ -893,15 +904,16 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
static int
qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
qla82xx_wait_rom_busy(ha);
if (qla82xx_wait_rom_done(ha)) {
- qla_printk(KERN_WARNING, ha,
- "%s: Error waiting for rom done\n",
- QLA2XXX_DRIVER_NAME);
+ ql_log(ql_log_fatal, vha, 0x00ba,
+ "Error waiting for rom done.\n");
return -1;
}
/* Reset abyte_cnt and dummy_byte_cnt */
@@ -917,6 +929,7 @@ static int
qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
{
int ret, loops = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
udelay(100);
@@ -924,9 +937,8 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
loops++;
}
if (loops >= 50000) {
- qla_printk(KERN_INFO, ha,
- "%s: qla82xx_rom_lock failed\n",
- QLA2XXX_DRIVER_NAME);
+ ql_log(ql_log_fatal, vha, 0x00b9,
+ "Failed to aquire SEM2 lock.\n");
return -1;
}
ret = qla82xx_do_rom_fast_read(ha, addr, valp);
@@ -937,11 +949,12 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
static int
qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
qla82xx_wait_rom_busy(ha);
if (qla82xx_wait_rom_done(ha)) {
- qla_printk(KERN_WARNING, ha,
- "Error waiting for rom done\n");
+ ql_log(ql_log_warn, vha, 0xb00c,
+ "Error waiting for rom done.\n");
return -1;
}
*val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
@@ -955,6 +968,7 @@ qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
uint32_t done = 1 ;
uint32_t val;
int ret = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
while ((done != 0) && (ret == 0)) {
@@ -964,8 +978,8 @@ qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
udelay(10);
cond_resched();
if (timeout >= 50000) {
- qla_printk(KERN_WARNING, ha,
- "Timeout reached waiting for write finish");
+ ql_log(ql_log_warn, vha, 0xb00d,
+ "Timeout reached waiting for write finish.\n");
return -1;
}
}
@@ -992,13 +1006,14 @@ qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
static int
qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
if (qla82xx_flash_set_write_enable(ha))
return -1;
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
if (qla82xx_wait_rom_done(ha)) {
- qla_printk(KERN_WARNING, ha,
- "Error waiting for rom done\n");
+ ql_log(ql_log_warn, vha, 0xb00e,
+ "Error waiting for rom done.\n");
return -1;
}
return qla82xx_flash_wait_write_finish(ha);
@@ -1007,10 +1022,11 @@ qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
static int
qla82xx_write_disable_flash(struct qla_hw_data *ha)
{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
if (qla82xx_wait_rom_done(ha)) {
- qla_printk(KERN_WARNING, ha,
- "Error waiting for rom done\n");
+ ql_log(ql_log_warn, vha, 0xb00f,
+ "Error waiting for rom done.\n");
return -1;
}
return 0;
@@ -1020,13 +1036,16 @@ static int
ql82xx_rom_lock_d(struct qla_hw_data *ha)
{
int loops = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
udelay(100);
cond_resched();
loops++;
}
if (loops >= 50000) {
- qla_printk(KERN_WARNING, ha, "ROM lock failed\n");
+ ql_log(ql_log_warn, vha, 0xb010,
+ "ROM lock failed.\n");
return -1;
}
return 0;;
@@ -1037,10 +1056,12 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
uint32_t data)
{
int ret = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
ret = ql82xx_rom_lock_d(ha);
if (ret < 0) {
- qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
+ ql_log(ql_log_warn, vha, 0xb011,
+ "ROM lock failed.\n");
return ret;
}
@@ -1053,8 +1074,8 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
qla82xx_wait_rom_busy(ha);
if (qla82xx_wait_rom_done(ha)) {
- qla_printk(KERN_WARNING, ha,
- "Error waiting for rom done\n");
+ ql_log(ql_log_warn, vha, 0xb012,
+ "Error waiting for rom done.\n");
ret = -1;
goto done_write;
}
@@ -1159,8 +1180,8 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
*/
if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
qla82xx_rom_fast_read(ha, 4, &n) != 0) {
- qla_printk(KERN_WARNING, ha,
- "[ERROR] Reading crb_init area: n: %08x\n", n);
+ ql_log(ql_log_fatal, vha, 0x006e,
+ "Error Reading crb_init area: n: %08x.\n", n);
return -1;
}
@@ -1172,20 +1193,18 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
/* number of addr/value pair should not exceed 1024 enteries */
if (n >= 1024) {
- qla_printk(KERN_WARNING, ha,
- "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n",
- QLA2XXX_DRIVER_NAME, __func__, n);
+ ql_log(ql_log_fatal, vha, 0x0071,
+ "Card flash not initialized:n=0x%x.\n", n);
return -1;
}
- qla_printk(KERN_INFO, ha,
- "%s: %d CRB init values found in ROM.\n", QLA2XXX_DRIVER_NAME, n);
+ ql_log(ql_log_info, vha, 0x0072,
+ "%d CRB init values found in ROM.\n", n);
buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
if (buf == NULL) {
- qla_printk(KERN_WARNING, ha,
- "%s: [ERROR] Unable to malloc memory.\n",
- QLA2XXX_DRIVER_NAME);
+ ql_log(ql_log_fatal, vha, 0x010c,
+ "Unable to allocate memory.\n");
return -1;
}
@@ -1236,9 +1255,8 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
continue;
if (off == ADDR_ERROR) {
- qla_printk(KERN_WARNING, ha,
- "%s: [ERROR] Unknown addr: 0x%08lx\n",
- QLA2XXX_DRIVER_NAME, buf[i].addr);
+ ql_log(ql_log_fatal, vha, 0x0116,
+ "Unknow addr: 0x%08lx.\n", buf[i].addr);
continue;
}
@@ -1370,7 +1388,7 @@ qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
if (j >= MAX_CTL_CHECK) {
if (printk_ratelimit())
dev_err(&ha->pdev->dev,
- "failed to write through agent\n");
+ "failed to write through agent.\n");
ret = -1;
break;
}
@@ -1460,7 +1478,7 @@ qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
if (j >= MAX_CTL_CHECK) {
if (printk_ratelimit())
dev_err(&ha->pdev->dev,
- "failed to read through agent\n");
+ "failed to read through agent.\n");
break;
}
@@ -1633,17 +1651,15 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
uint32_t len = 0;
if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
- qla_printk(KERN_WARNING, ha,
- "Failed to reserve selected regions (%s)\n",
- pci_name(ha->pdev));
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x000c,
+ "Failed to reserver selected regions.\n");
goto iospace_error_exit;
}
/* Use MMIO operations for all accesses. */
if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
- qla_printk(KERN_ERR, ha,
- "region #0 not an MMIO resource (%s), aborting\n",
- pci_name(ha->pdev));
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x000d,
+ "Region #0 not an MMIO resource, aborting.\n");
goto iospace_error_exit;
}
@@ -1651,9 +1667,8 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
ha->nx_pcibase =
(unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
if (!ha->nx_pcibase) {
- qla_printk(KERN_ERR, ha,
- "cannot remap pcibase MMIO (%s), aborting\n",
- pci_name(ha->pdev));
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x000e,
+ "Cannot remap pcibase MMIO, aborting.\n");
pci_release_regions(ha->pdev);
goto iospace_error_exit;
}
@@ -1667,9 +1682,8 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
(unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
(ha->pdev->devfn << 12)), 4);
if (!ha->nxdb_wr_ptr) {
- qla_printk(KERN_ERR, ha,
- "cannot remap MMIO (%s), aborting\n",
- pci_name(ha->pdev));
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x000f,
+ "Cannot remap MMIO, aborting.\n");
pci_release_regions(ha->pdev);
goto iospace_error_exit;
}
@@ -1687,6 +1701,16 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
ha->max_req_queues = ha->max_rsp_queues = 1;
ha->msix_count = ha->max_rsp_queues + 1;
+ ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
+ "nx_pci_base=%p iobase=%p "
+ "max_req_queues=%d msix_count=%d.\n",
+ ha->nx_pcibase, ha->iobase,
+ ha->max_req_queues, ha->msix_count);
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
+ "nx_pci_base=%p iobase=%p "
+ "max_req_queues=%d msix_count=%d.\n",
+ ha->nx_pcibase, ha->iobase,
+ ha->max_req_queues, ha->msix_count);
return 0;
iospace_error_exit:
@@ -1712,6 +1736,9 @@ qla82xx_pci_config(scsi_qla_host_t *vha)
pci_set_master(ha->pdev);
ret = pci_set_mwi(ha->pdev);
ha->chip_revision = ha->pdev->revision;
+ ql_dbg(ql_dbg_init, vha, 0x0043,
+ "Chip revision:%ld.\n",
+ ha->chip_revision);
return 0;
}
@@ -1877,6 +1904,7 @@ qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
{
u32 val = 0;
int retries = 60;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
do {
read_lock(&ha->hw_lock);
@@ -1892,15 +1920,15 @@ qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
default:
break;
}
- qla_printk(KERN_WARNING, ha,
- "CRB_CMDPEG_STATE: 0x%x and retries: 0x%x\n",
- val, retries);
+ ql_log(ql_log_info, vha, 0x00a8,
+ "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n",
+ val, retries);
msleep(500);
} while (--retries);
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_fatal, vha, 0x00a9,
"Cmd Peg initialization failed: 0x%x.\n", val);
val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
@@ -1915,6 +1943,7 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
{
u32 val = 0;
int retries = 60;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
do {
read_lock(&ha->hw_lock);
@@ -1930,17 +1959,16 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
default:
break;
}
-
- qla_printk(KERN_WARNING, ha,
- "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x\n",
- val, retries);
+ ql_log(ql_log_info, vha, 0x00ab,
+ "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n",
+ val, retries);
msleep(500);
} while (--retries);
- qla_printk(KERN_INFO, ha,
- "Rcv Peg initialization failed: 0x%x.\n", val);
+ ql_log(ql_log_fatal, vha, 0x00ac,
+ "Rcv Peg initializatin failed: 0x%x.\n", val);
read_lock(&ha->hw_lock);
qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
read_unlock(&ha->hw_lock);
@@ -1989,13 +2017,11 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
}
if (ha->mcp) {
- DEBUG3_11(printk(KERN_INFO "%s(%ld): "
- "Got mailbox completion. cmd=%x.\n",
- __func__, vha->host_no, ha->mcp->mb[0]));
+ ql_dbg(ql_dbg_async, vha, 0x5052,
+ "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
} else {
- qla_printk(KERN_INFO, ha,
- "%s(%ld): MBX pointer ERROR!\n",
- __func__, vha->host_no);
+ ql_dbg(ql_dbg_async, vha, 0x5053,
+ "MBX pointer ERROR.\n");
}
}
@@ -2019,13 +2045,13 @@ qla82xx_intr_handler(int irq, void *dev_id)
int status = 0, status1 = 0;
unsigned long flags;
unsigned long iter;
- uint32_t stat;
+ uint32_t stat = 0;
uint16_t mb[4];
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
printk(KERN_INFO
- "%s(): NULL response queue pointer\n", __func__);
+ "%s(): NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
ha = rsp->hw;
@@ -2075,9 +2101,9 @@ qla82xx_intr_handler(int irq, void *dev_id)
qla24xx_process_response_queue(vha, rsp);
break;
default:
- DEBUG2(printk("scsi(%ld): "
- " Unrecognized interrupt type (%d).\n",
- vha->host_no, stat & 0xff));
+ ql_dbg(ql_dbg_async, vha, 0x5054,
+ "Unrecognized interrupt type (%d).\n",
+ stat & 0xff);
break;
}
}
@@ -2089,8 +2115,8 @@ qla82xx_intr_handler(int irq, void *dev_id)
#ifdef QL_DEBUG_LEVEL_17
if (!irq && ha->flags.eeh_busy)
- qla_printk(KERN_WARNING, ha,
- "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
+ ql_log(ql_log_warn, vha, 0x503d,
+ "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
#endif
@@ -2111,13 +2137,13 @@ qla82xx_msix_default(int irq, void *dev_id)
struct device_reg_82xx __iomem *reg;
int status = 0;
unsigned long flags;
- uint32_t stat;
+ uint32_t stat = 0;
uint16_t mb[4];
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
printk(KERN_INFO
- "%s(): NULL response queue pointer\n", __func__);
+ "%s(): NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
ha = rsp->hw;
@@ -2149,9 +2175,9 @@ qla82xx_msix_default(int irq, void *dev_id)
qla24xx_process_response_queue(vha, rsp);
break;
default:
- DEBUG2(printk("scsi(%ld): "
- " Unrecognized interrupt type (%d).\n",
- vha->host_no, stat & 0xff));
+ ql_dbg(ql_dbg_async, vha, 0x5041,
+ "Unrecognized interrupt type (%d).\n",
+ stat & 0xff);
break;
}
}
@@ -2162,9 +2188,9 @@ qla82xx_msix_default(int irq, void *dev_id)
#ifdef QL_DEBUG_LEVEL_17
if (!irq && ha->flags.eeh_busy)
- qla_printk(KERN_WARNING, ha,
- "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
- status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
+ ql_log(ql_log_warn, vha, 0x5044,
+ "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
+ status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
#endif
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
@@ -2182,21 +2208,22 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
struct qla_hw_data *ha;
struct rsp_que *rsp;
struct device_reg_82xx __iomem *reg;
+ unsigned long flags;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
printk(KERN_INFO
- "%s(): NULL response queue pointer\n", __func__);
+ "%s(): NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
ha = rsp->hw;
reg = &ha->iobase->isp82;
- spin_lock_irq(&ha->hardware_lock);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
qla24xx_process_response_queue(vha, rsp);
WRT_REG_DWORD(&reg->host_int, 0);
- spin_unlock_irq(&ha->hardware_lock);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
return IRQ_HANDLED;
}
@@ -2215,7 +2242,7 @@ qla82xx_poll(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
printk(KERN_INFO
- "%s(): NULL response queue pointer\n", __func__);
+ "%s(): NULL response queue pointer.\n", __func__);
return;
}
ha = rsp->hw;
@@ -2245,9 +2272,9 @@ qla82xx_poll(int irq, void *dev_id)
qla24xx_process_response_queue(vha, rsp);
break;
default:
- DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
- "(%d).\n",
- vha->host_no, stat & 0xff));
+ ql_dbg(ql_dbg_p3p, vha, 0xb013,
+ "Unrecognized interrupt type (%d).\n",
+ stat * 0xff);
break;
}
}
@@ -2347,9 +2374,8 @@ qla82xx_set_rst_ready(struct qla_hw_data *ha)
drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
}
drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
- qla_printk(KERN_INFO, ha,
- "%s(%ld):drv_state = 0x%x\n",
- __func__, vha->host_no, drv_state);
+ ql_log(ql_log_info, vha, 0x00bb,
+ "drv_state = 0x%x.\n", drv_state);
qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
}
@@ -2392,8 +2418,8 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
- qla_printk(KERN_ERR, ha,
- "%s: Error during CRB Initialization\n", __func__);
+ ql_log(ql_log_fatal, vha, 0x009f,
+ "Error during CRB initialization.\n");
return QLA_FUNCTION_FAILED;
}
udelay(500);
@@ -2411,27 +2437,27 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
if (ql2xfwloadbin == 2)
goto try_blob_fw;
- qla_printk(KERN_INFO, ha,
- "Attempting to load firmware from flash\n");
+ ql_log(ql_log_info, vha, 0x00a0,
+ "Attempting to load firmware from flash.\n");
if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
- qla_printk(KERN_ERR, ha,
- "Firmware loaded successfully from flash\n");
+ ql_log(ql_log_info, vha, 0x00a1,
+ "Firmware loaded successully from flash.\n");
return QLA_SUCCESS;
} else {
- qla_printk(KERN_ERR, ha,
- "Firmware load from flash failed\n");
+ ql_log(ql_log_warn, vha, 0x0108,
+ "Firmware load from flash failed.\n");
}
try_blob_fw:
- qla_printk(KERN_INFO, ha,
- "Attempting to load firmware from blob\n");
+ ql_log(ql_log_info, vha, 0x00a2,
+ "Attempting to load firmware from blob.\n");
/* Load firmware blob. */
blob = ha->hablob = qla2x00_request_firmware(vha);
if (!blob) {
- qla_printk(KERN_ERR, ha,
- "Firmware image not present.\n");
+ ql_log(ql_log_fatal, vha, 0x00a3,
+ "Firmware image not preset.\n");
goto fw_load_failed;
}
@@ -2441,20 +2467,19 @@ try_blob_fw:
/* Fallback to URI format */
if (qla82xx_validate_firmware_blob(vha,
QLA82XX_UNIFIED_ROMIMAGE)) {
- qla_printk(KERN_ERR, ha,
- "No valid firmware image found!!!");
+ ql_log(ql_log_fatal, vha, 0x00a4,
+ "No valid firmware image found.\n");
return QLA_FUNCTION_FAILED;
}
}
if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
- qla_printk(KERN_ERR, ha,
- "%s: Firmware loaded successfully "
- " from binary blob\n", __func__);
+ ql_log(ql_log_info, vha, 0x00a5,
+ "Firmware loaded successfully from binary blob.\n");
return QLA_SUCCESS;
} else {
- qla_printk(KERN_ERR, ha,
- "Firmware load failed from binary blob\n");
+ ql_log(ql_log_fatal, vha, 0x00a6,
+ "Firmware load failed for binary blob.\n");
blob->fw = NULL;
blob = NULL;
goto fw_load_failed;
@@ -2486,15 +2511,15 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
- qla_printk(KERN_INFO, ha,
- "%s: Error trying to start fw!\n", __func__);
+ ql_log(ql_log_fatal, vha, 0x00a7,
+ "Error trying to start fw.\n");
return QLA_FUNCTION_FAILED;
}
/* Handshake with the card before we register the devices. */
if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
- qla_printk(KERN_INFO, ha,
- "%s: Error during card handshake!\n", __func__);
+ ql_log(ql_log_fatal, vha, 0x00aa,
+ "Error during card handshake.\n");
return QLA_FUNCTION_FAILED;
}
@@ -2663,8 +2688,11 @@ qla82xx_start_scsi(srb_t *sp)
/* Send marker if required */
if (vha->marker_needed != 0) {
if (qla2x00_marker(vha, req,
- rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
+ rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x300c,
+ "qla2x00_marker failed for cmd=%p.\n", cmd);
return QLA_FUNCTION_FAILED;
+ }
vha->marker_needed = 0;
}
@@ -2701,8 +2729,13 @@ qla82xx_start_scsi(srb_t *sp)
uint16_t i;
more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
- if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN)
+ if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
+ ql_dbg(ql_dbg_io, vha, 0x300d,
+ "Num of DSD list %d is than %d for cmd=%p.\n",
+ more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
+ cmd);
goto queuing_error;
+ }
if (more_dsd_lists <= ha->gbl_dsd_avail)
goto sufficient_dsds;
@@ -2711,13 +2744,20 @@ qla82xx_start_scsi(srb_t *sp)
for (i = 0; i < more_dsd_lists; i++) {
dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
- if (!dsd_ptr)
+ if (!dsd_ptr) {
+ ql_log(ql_log_fatal, vha, 0x300e,
+ "Failed to allocate memory for dsd_dma "
+ "for cmd=%p.\n", cmd);
goto queuing_error;
+ }
dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
if (!dsd_ptr->dsd_addr) {
kfree(dsd_ptr);
+ ql_log(ql_log_fatal, vha, 0x300f,
+ "Failed to allocate memory for dsd_addr "
+ "for cmd=%p.\n", cmd);
goto queuing_error;
}
list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
@@ -2742,17 +2782,16 @@ sufficient_dsds:
ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
if (!sp->ctx) {
- DEBUG(printk(KERN_INFO
- "%s(%ld): failed to allocate"
- " ctx.\n", __func__, vha->host_no));
+ ql_log(ql_log_fatal, vha, 0x3010,
+ "Failed to allocate ctx for cmd=%p.\n", cmd);
goto queuing_error;
}
memset(ctx, 0, sizeof(struct ct6_dsd));
ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
GFP_ATOMIC, &ctx->fcp_cmnd_dma);
if (!ctx->fcp_cmnd) {
- DEBUG2_3(printk("%s(%ld): failed to allocate"
- " fcp_cmnd.\n", __func__, vha->host_no));
+ ql_log(ql_log_fatal, vha, 0x3011,
+ "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
goto queuing_error_fcp_cmnd;
}
@@ -2766,6 +2805,9 @@ sufficient_dsds:
/* SCSI command bigger than 16 bytes must be
* multiple of 4
*/
+ ql_log(ql_log_warn, vha, 0x3012,
+ "scsi cmd len %d not multiple of 4 "
+ "for cmd=%p.\n", cmd->cmd_len, cmd);
goto queuing_error_fcp_cmnd;
}
ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
@@ -2797,6 +2839,16 @@ sufficient_dsds:
int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+ /* build FCP_CMND IU */
+ memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+ int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
+ ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 1;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 2;
+
/*
* Update tagged queuing modifier -- default is TSK_SIMPLE (0).
*/
@@ -2813,16 +2865,6 @@ sufficient_dsds:
}
}
- /* build FCP_CMND IU */
- memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
- int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
- ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
-
- if (cmd->sc_data_direction == DMA_TO_DEVICE)
- ctx->fcp_cmnd->additional_cdb_len |= 1;
- else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
- ctx->fcp_cmnd->additional_cdb_len |= 2;
-
memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
@@ -2845,7 +2887,7 @@ sufficient_dsds:
cmd_pkt->entry_status = (uint8_t) rsp->id;
} else {
struct cmd_type_7 *cmd_pkt;
- req_cnt = qla24xx_calc_iocbs(tot_dsds);
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
cnt = (uint16_t)RD_REG_DWORD_RELAXED(
&reg->req_q_out[0]);
@@ -2979,8 +3021,8 @@ qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
/* Dword reads to flash. */
for (i = 0; i < length/4; i++, faddr += 4) {
if (qla82xx_rom_fast_read(ha, faddr, &val)) {
- qla_printk(KERN_WARNING, ha,
- "Do ROM fast read failed\n");
+ ql_log(ql_log_warn, vha, 0x0106,
+ "Do ROM fast read failed.\n");
goto done_read;
}
dwptr[i] = __constant_cpu_to_le32(val);
@@ -2994,10 +3036,12 @@ qla82xx_unprotect_flash(struct qla_hw_data *ha)
{
int ret;
uint32_t val;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
ret = ql82xx_rom_lock_d(ha);
if (ret < 0) {
- qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
+ ql_log(ql_log_warn, vha, 0xb014,
+ "ROM Lock failed.\n");
return ret;
}
@@ -3013,7 +3057,8 @@ qla82xx_unprotect_flash(struct qla_hw_data *ha)
}
if (qla82xx_write_disable_flash(ha) != 0)
- qla_printk(KERN_WARNING, ha, "Write disable failed\n");
+ ql_log(ql_log_warn, vha, 0xb015,
+ "Write disable failed.\n");
done_unprotect:
qla82xx_rom_unlock(ha);
@@ -3025,10 +3070,12 @@ qla82xx_protect_flash(struct qla_hw_data *ha)
{
int ret;
uint32_t val;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
ret = ql82xx_rom_lock_d(ha);
if (ret < 0) {
- qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
+ ql_log(ql_log_warn, vha, 0xb016,
+ "ROM Lock failed.\n");
return ret;
}
@@ -3040,10 +3087,12 @@ qla82xx_protect_flash(struct qla_hw_data *ha)
/* LOCK all sectors */
ret = qla82xx_write_status_reg(ha, val);
if (ret < 0)
- qla_printk(KERN_WARNING, ha, "Write status register failed\n");
+ ql_log(ql_log_warn, vha, 0xb017,
+ "Write status register failed.\n");
if (qla82xx_write_disable_flash(ha) != 0)
- qla_printk(KERN_WARNING, ha, "Write disable failed\n");
+ ql_log(ql_log_warn, vha, 0xb018,
+ "Write disable failed.\n");
done_protect:
qla82xx_rom_unlock(ha);
return ret;
@@ -3053,10 +3102,12 @@ static int
qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
{
int ret = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
ret = ql82xx_rom_lock_d(ha);
if (ret < 0) {
- qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
+ ql_log(ql_log_warn, vha, 0xb019,
+ "ROM Lock failed.\n");
return ret;
}
@@ -3066,8 +3117,8 @@ qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
if (qla82xx_wait_rom_done(ha)) {
- qla_printk(KERN_WARNING, ha,
- "Error waiting for rom done\n");
+ ql_log(ql_log_warn, vha, 0xb01a,
+ "Error waiting for rom done.\n");
ret = -1;
goto done;
}
@@ -3110,10 +3161,10 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
&optrom_dma, GFP_KERNEL);
if (!optrom) {
- qla_printk(KERN_DEBUG, ha,
- "Unable to allocate memory for optrom "
- "burst write (%x KB).\n",
- OPTROM_BURST_SIZE / 1024);
+ ql_log(ql_log_warn, vha, 0xb01b,
+ "Unable to allocate memory "
+ "for optron burst write (%x KB).\n",
+ OPTROM_BURST_SIZE / 1024);
}
}
@@ -3122,8 +3173,8 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
ret = qla82xx_unprotect_flash(ha);
if (ret) {
- qla_printk(KERN_WARNING, ha,
- "Unable to unprotect flash for update.\n");
+ ql_log(ql_log_warn, vha, 0xb01c,
+ "Unable to unprotect flash for update.\n");
goto write_done;
}
@@ -3133,9 +3184,9 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
ret = qla82xx_erase_sector(ha, faddr);
if (ret) {
- DEBUG9(qla_printk(KERN_ERR, ha,
- "Unable to erase sector: "
- "address=%x.\n", faddr));
+ ql_log(ql_log_warn, vha, 0xb01d,
+ "Unable to erase sector: address=%x.\n",
+ faddr);
break;
}
}
@@ -3149,12 +3200,12 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
(ha->flash_data_off | faddr),
OPTROM_BURST_DWORDS);
if (ret != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0xb01e,
"Unable to burst-write optrom segment "
"(%x/%x/%llx).\n", ret,
(ha->flash_data_off | faddr),
(unsigned long long)optrom_dma);
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0xb01f,
"Reverting to slow-write.\n");
dma_free_coherent(&ha->pdev->dev,
@@ -3171,16 +3222,16 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
ret = qla82xx_write_flash_dword(ha, faddr,
cpu_to_le32(*dwptr));
if (ret) {
- DEBUG9(printk(KERN_DEBUG "%s(%ld) Unable to program"
- "flash address=%x data=%x.\n", __func__,
- ha->host_no, faddr, *dwptr));
+ ql_dbg(ql_dbg_p3p, vha, 0xb020,
+ "Unable to program flash address=%x data=%x.\n",
+ faddr, *dwptr);
break;
}
}
ret = qla82xx_protect_flash(ha);
if (ret)
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0xb021,
"Unable to protect flash after update.\n");
write_done:
if (optrom)
@@ -3244,9 +3295,12 @@ qla82xx_start_iocbs(srb_t *sp)
void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
if (qla82xx_rom_lock(ha))
/* Someone else is holding the lock. */
- qla_printk(KERN_INFO, ha, "Resetting rom_lock\n");
+ ql_log(ql_log_info, vha, 0xb022,
+ "Resetting rom_lock.\n");
/*
* Either we got the lock, or someone
@@ -3313,7 +3367,8 @@ qla82xx_device_bootstrap(scsi_qla_host_t *vha)
dev_initialize:
/* set to DEV_INITIALIZING */
- qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
+ ql_log(ql_log_info, vha, 0x009e,
+ "HW State: INITIALIZING.\n");
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
/* Driver that sets device state to initializating sets IDC version */
@@ -3324,14 +3379,16 @@ dev_initialize:
qla82xx_idc_lock(ha);
if (rval != QLA_SUCCESS) {
- qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
+ ql_log(ql_log_fatal, vha, 0x00ad,
+ "HW State: FAILED.\n");
qla82xx_clear_drv_active(ha);
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
return rval;
}
dev_ready:
- qla_printk(KERN_INFO, ha, "HW State: READY\n");
+ ql_log(ql_log_info, vha, 0x00ae,
+ "HW State: READY.\n");
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
return QLA_SUCCESS;
@@ -3376,15 +3433,15 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
/* quiescence timeout, other functions didn't ack
* changing the state to DEV_READY
*/
- qla_printk(KERN_INFO, ha,
- "%s: QUIESCENT TIMEOUT\n", QLA2XXX_DRIVER_NAME);
- qla_printk(KERN_INFO, ha,
- "DRV_ACTIVE:%d DRV_STATE:%d\n", drv_active,
- drv_state);
+ ql_log(ql_log_info, vha, 0xb023,
+ "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME);
+ ql_log(ql_log_info, vha, 0xb024,
+ "DRV_ACTIVE:%d DRV_STATE:%d.\n",
+ drv_active, drv_state);
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA82XX_DEV_READY);
- qla_printk(KERN_INFO, ha,
- "HW State: DEV_READY\n");
+ QLA82XX_DEV_READY);
+ ql_log(ql_log_info, vha, 0xb025,
+ "HW State: DEV_READY.\n");
qla82xx_idc_unlock(ha);
qla2x00_perform_loop_resync(vha);
qla82xx_idc_lock(ha);
@@ -3404,7 +3461,8 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
/* everyone acked so set the state to DEV_QUIESCENCE */
if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) {
- qla_printk(KERN_INFO, ha, "HW State: DEV_QUIESCENT\n");
+ ql_log(ql_log_info, vha, 0xb026,
+ "HW State: DEV_QUIESCENT.\n");
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT);
}
}
@@ -3441,7 +3499,8 @@ qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
/* Disable the board */
- qla_printk(KERN_INFO, ha, "Disabling the board\n");
+ ql_log(ql_log_fatal, vha, 0x00b8,
+ "Disabling the board.\n");
qla82xx_idc_lock(ha);
qla82xx_clear_drv_active(ha);
@@ -3492,8 +3551,8 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
while (drv_state != drv_active) {
if (time_after_eq(jiffies, reset_timeout)) {
- qla_printk(KERN_INFO, ha,
- "%s: RESET TIMEOUT!\n", QLA2XXX_DRIVER_NAME);
+ ql_log(ql_log_warn, vha, 0x00b5,
+ "Reset timeout.\n");
break;
}
qla82xx_idc_unlock(ha);
@@ -3504,12 +3563,15 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
}
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- qla_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state,
- dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+ ql_log(ql_log_info, vha, 0x00b6,
+ "Device state is 0x%x = %s.\n",
+ dev_state,
+ dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
/* Force to DEV_COLD unless someone else is starting a reset */
if (dev_state != QLA82XX_DEV_INITIALIZING) {
- qla_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
+ ql_log(ql_log_info, vha, 0x00b7,
+ "HW State: COLD/RE-INIT.\n");
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
}
}
@@ -3523,8 +3585,12 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
fw_heartbeat_counter = qla82xx_rd_32(vha->hw,
QLA82XX_PEG_ALIVE_COUNTER);
/* all 0xff, assume AER/EEH in progress, ignore */
- if (fw_heartbeat_counter == 0xffffffff)
+ if (fw_heartbeat_counter == 0xffffffff) {
+ ql_dbg(ql_dbg_timer, vha, 0x6003,
+ "FW heartbeat counter is 0xffffffff, "
+ "returning status=%d.\n", status);
return status;
+ }
if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
vha->seconds_since_last_heartbeat++;
/* FW not alive after 2 seconds */
@@ -3535,6 +3601,9 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
} else
vha->seconds_since_last_heartbeat = 0;
vha->fw_heartbeat_counter = fw_heartbeat_counter;
+ if (status)
+ ql_dbg(ql_dbg_timer, vha, 0x6004,
+ "Returning status=%d.\n", status);
return status;
}
@@ -3565,8 +3634,10 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
old_dev_state = dev_state;
- qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
- dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+ ql_log(ql_log_info, vha, 0x009b,
+ "Device state is 0x%x = %s.\n",
+ dev_state,
+ dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
/* wait for 30 seconds for device to go ready */
dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
@@ -3574,9 +3645,8 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
while (1) {
if (time_after_eq(jiffies, dev_init_timeout)) {
- DEBUG(qla_printk(KERN_INFO, ha,
- "%s: device init failed!\n",
- QLA2XXX_DRIVER_NAME));
+ ql_log(ql_log_fatal, vha, 0x009c,
+ "Device init failed.\n");
rval = QLA_FUNCTION_FAILED;
break;
}
@@ -3586,10 +3656,11 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
old_dev_state = dev_state;
}
if (loopcount < 5) {
- qla_printk(KERN_INFO, ha,
- "2:Device state is 0x%x = %s\n", dev_state,
- dev_state < MAX_STATES ?
- qdev_state[dev_state] : "Unknown");
+ ql_log(ql_log_info, vha, 0x009d,
+ "Device state is 0x%x = %s.\n",
+ dev_state,
+ dev_state < MAX_STATES ? qdev_state[dev_state] :
+ "Unknown");
}
switch (dev_state) {
@@ -3656,29 +3727,26 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
if (dev_state == QLA82XX_DEV_NEED_RESET &&
!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
- qla_printk(KERN_WARNING, ha,
- "scsi(%ld) %s: Adapter reset needed!\n",
- vha->host_no, __func__);
+ ql_log(ql_log_warn, vha, 0x6001,
+ "Adapter reset needed.\n");
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
!test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
- DEBUG(qla_printk(KERN_INFO, ha,
- "scsi(%ld) %s - detected quiescence needed\n",
- vha->host_no, __func__));
+ ql_log(ql_log_warn, vha, 0x6002,
+ "Quiescent needed.\n");
set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
} else {
if (qla82xx_check_fw_alive(vha)) {
halt_status = qla82xx_rd_32(ha,
QLA82XX_PEG_HALT_STATUS1);
- qla_printk(KERN_INFO, ha,
- "scsi(%ld): %s, Dumping hw/fw registers:\n "
- " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n "
- " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n "
- " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n "
- " PEG_NET_4_PC: 0x%x\n",
- vha->host_no, __func__, halt_status,
+ ql_dbg(ql_dbg_timer, vha, 0x6005,
+ "dumping hw/fw registers:.\n "
+ " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
+ " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
+ " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n "
+ " PEG_NET_4_PC: 0x%x.\n", halt_status,
qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2),
qla82xx_rd_32(ha,
QLA82XX_CRB_PEG_NET_0 + 0x3c),
@@ -3694,9 +3762,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
set_bit(ISP_UNRECOVERABLE,
&vha->dpc_flags);
} else {
- qla_printk(KERN_INFO, ha,
- "scsi(%ld): %s - detect abort needed\n",
- vha->host_no, __func__);
+ ql_log(ql_log_info, vha, 0x6006,
+ "Detect abort needed.\n");
set_bit(ISP_ABORT_NEEDED,
&vha->dpc_flags);
}
@@ -3704,10 +3771,10 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
ha->flags.isp82xx_fw_hung = 1;
if (ha->flags.mbox_busy) {
ha->flags.mbox_int = 1;
- DEBUG2(qla_printk(KERN_ERR, ha,
- "scsi(%ld) Due to fw hung, doing "
+ ql_log(ql_log_warn, vha, 0x6007,
+ "Due to FW hung, doing "
"premature completion of mbx "
- "command\n", vha->host_no));
+ "command.\n");
if (test_bit(MBX_INTR_WAIT,
&ha->mbx_cmd_flags))
complete(&ha->mbx_intr_comp);
@@ -3742,9 +3809,8 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
uint32_t dev_state;
if (vha->device_flags & DFLG_DEV_FAILED) {
- qla_printk(KERN_WARNING, ha,
- "%s(%ld): Device in failed state, "
- "Exiting.\n", __func__, vha->host_no);
+ ql_log(ql_log_warn, vha, 0x8024,
+ "Device in failed state, exiting.\n");
return QLA_SUCCESS;
}
ha->flags.isp82xx_reset_hdlr_active = 1;
@@ -3752,13 +3818,14 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
qla82xx_idc_lock(ha);
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
if (dev_state == QLA82XX_DEV_READY) {
- qla_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
+ ql_log(ql_log_info, vha, 0x8025,
+ "HW State: NEED RESET.\n");
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_NEED_RESET);
} else
- qla_printk(KERN_INFO, ha, "HW State: %s\n",
- dev_state < MAX_STATES ?
- qdev_state[dev_state] : "Unknown");
+ ql_log(ql_log_info, vha, 0x8026,
+ "Hw State: %s.\n", dev_state < MAX_STATES ?
+ qdev_state[dev_state] : "Unknown");
qla82xx_idc_unlock(ha);
rval = qla82xx_device_state_handler(vha);
@@ -3777,9 +3844,9 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
vha->flags.online = 1;
if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
if (ha->isp_abort_cnt == 0) {
- qla_printk(KERN_WARNING, ha,
- "ISP error recovery failed - "
- "board disabled\n");
+ ql_log(ql_log_warn, vha, 0x8027,
+ "ISP error recover failed - board "
+ "disabled.\n");
/*
* The next call disables the board
* completely.
@@ -3791,16 +3858,16 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
rval = QLA_SUCCESS;
} else { /* schedule another ISP abort */
ha->isp_abort_cnt--;
- DEBUG(qla_printk(KERN_INFO, ha,
- "qla%ld: ISP abort - retry remaining %d\n",
- vha->host_no, ha->isp_abort_cnt));
+ ql_log(ql_log_warn, vha, 0x8036,
+ "ISP abort - retry remaining %d.\n",
+ ha->isp_abort_cnt);
rval = QLA_FUNCTION_FAILED;
}
} else {
ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
- DEBUG(qla_printk(KERN_INFO, ha,
- "(%ld): ISP error recovery - retrying (%d) "
- "more times\n", vha->host_no, ha->isp_abort_cnt));
+ ql_dbg(ql_dbg_taskm, vha, 0x8029,
+ "ISP error recovery - retrying (%d) more times.\n",
+ ha->isp_abort_cnt);
set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
rval = QLA_FUNCTION_FAILED;
}
@@ -3872,8 +3939,8 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
break;
}
}
- DEBUG2(printk(KERN_INFO
- "%s status=%d\n", __func__, status));
+ ql_dbg(ql_dbg_p3p, vha, 0xb027,
+ "%s status=%d.\n", status);
return status;
}
@@ -3902,6 +3969,9 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
}
}
}
+ ql_dbg(ql_dbg_init, vha, 0x00b0,
+ "Entered %s fw_hung=%d.\n",
+ __func__, ha->flags.isp82xx_fw_hung);
/* Abort all commands gracefully if fw NOT hung */
if (!ha->flags.isp82xx_fw_hung) {
@@ -3922,13 +3992,13 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
spin_unlock_irqrestore(
&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
- qla_printk(KERN_INFO, ha,
- "scsi(%ld): mbx abort command failed in %s\n",
- vha->host_no, __func__);
+ ql_log(ql_log_info, vha,
+ 0x00b1,
+ "mbx abort failed.\n");
} else {
- qla_printk(KERN_INFO, ha,
- "scsi(%ld): mbx abort command success in %s\n",
- vha->host_no, __func__);
+ ql_log(ql_log_info, vha,
+ 0x00b2,
+ "mbx abort success.\n");
}
spin_lock_irqsave(&ha->hardware_lock, flags);
}
@@ -3940,8 +4010,9 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
/* Wait for pending cmds (physical and virtual) to complete */
if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
WAIT_HOST) == QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Done wait for pending commands\n"));
+ ql_dbg(ql_dbg_init, vha, 0x00b3,
+ "Done wait for "
+ "pending commands.\n");
}
}
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f461925..4cace3f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -35,6 +35,10 @@ static struct kmem_cache *srb_cachep;
* CT6 CTX allocation cache
*/
static struct kmem_cache *ctx_cachep;
+/*
+ * error level for logging
+ */
+int ql_errlev = ql_log_all;
int ql2xlogintimeout = 20;
module_param(ql2xlogintimeout, int, S_IRUGO);
@@ -69,8 +73,17 @@ MODULE_PARM_DESC(ql2xallocfwdump,
int ql2xextended_error_logging;
module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xextended_error_logging,
- "Option to enable extended error logging, "
- "Default is 0 - no logging. 1 - log errors.");
+ "Option to enable extended error logging,\n"
+ "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n"
+ "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n"
+ "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n"
+ "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n"
+ "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n"
+ "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n"
+ "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
+ "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
+ "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
+ "\t\tDo LOGICAL OR of the value to enable more than one level");
int ql2xshiftctondsd = 6;
module_param(ql2xshiftctondsd, int, S_IRUGO);
@@ -93,17 +106,21 @@ MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to report for target devices.");
/* Do not change the value of this after module load */
-int ql2xenabledif = 1;
+int ql2xenabledif = 0;
module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xenabledif,
" Enable T10-CRC-DIF "
- " Default is 0 - No DIF Support. 1 - Enable it");
+ " Default is 0 - No DIF Support. 1 - Enable it"
+ ", 2 - Enable DIF for all types, except Type 0.");
-int ql2xenablehba_err_chk;
+int ql2xenablehba_err_chk = 2;
module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xenablehba_err_chk,
- " Enable T10-CRC-DIF Error isolation by HBA"
- " Default is 0 - Error isolation disabled, 1 - Enable it");
+ " Enable T10-CRC-DIF Error isolation by HBA:\n"
+ " Default is 1.\n"
+ " 0 -- Error isolation disabled\n"
+ " 1 -- Error isolation enabled only for DIX Type 0\n"
+ " 2 -- Error isolation enabled for all Types\n");
int ql2xiidmaenable=1;
module_param(ql2xiidmaenable, int, S_IRUGO);
@@ -128,8 +145,8 @@ MODULE_PARM_DESC(ql2xmultique_tag,
int ql2xfwloadbin;
module_param(ql2xfwloadbin, int, S_IRUGO);
MODULE_PARM_DESC(ql2xfwloadbin,
- "Option to specify location from which to load ISP firmware:\n"
- " 2 -- load firmware via the request_firmware() (hotplug)\n"
+ "Option to specify location from which to load ISP firmware:.\n"
+ " 2 -- load firmware via the request_firmware() (hotplug).\n"
" interface.\n"
" 1 -- load firmware from flash.\n"
" 0 -- use default semantics.\n");
@@ -143,7 +160,7 @@ MODULE_PARM_DESC(ql2xetsenable,
int ql2xdbwr = 1;
module_param(ql2xdbwr, int, S_IRUGO);
MODULE_PARM_DESC(ql2xdbwr,
- "Option to specify scheme for request queue posting\n"
+ "Option to specify scheme for request queue posting.\n"
" 0 -- Regular doorbell.\n"
" 1 -- CAMRAM doorbell (faster).\n");
@@ -168,7 +185,7 @@ MODULE_PARM_DESC(ql2xasynctmfenable,
int ql2xdontresethba;
module_param(ql2xdontresethba, int, S_IRUGO);
MODULE_PARM_DESC(ql2xdontresethba,
- "Option to specify reset behaviour\n"
+ "Option to specify reset behaviour.\n"
" 0 (Default) -- Reset on failure.\n"
" 1 -- Do not reset on failure.\n");
@@ -247,8 +264,11 @@ static inline void
qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
{
/* Currently used for 82XX only. */
- if (vha->device_flags & DFLG_DEV_FAILED)
+ if (vha->device_flags & DFLG_DEV_FAILED) {
+ ql_dbg(ql_dbg_timer, vha, 0x600d,
+ "Device in a failed state, returning.\n");
return;
+ }
mod_timer(&vha->timer, jiffies + interval * HZ);
}
@@ -273,19 +293,20 @@ static void qla2x00_sp_free_dma(srb_t *);
/* -------------------------------------------------------------------------- */
static int qla2x00_alloc_queues(struct qla_hw_data *ha)
{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
GFP_KERNEL);
if (!ha->req_q_map) {
- qla_printk(KERN_WARNING, ha,
- "Unable to allocate memory for request queue ptrs\n");
+ ql_log(ql_log_fatal, vha, 0x003b,
+ "Unable to allocate memory for request queue ptrs.\n");
goto fail_req_map;
}
ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
GFP_KERNEL);
if (!ha->rsp_q_map) {
- qla_printk(KERN_WARNING, ha,
- "Unable to allocate memory for response queue ptrs\n");
+ ql_log(ql_log_fatal, vha, 0x003c,
+ "Unable to allocate memory for response queue ptrs.\n");
goto fail_rsp_map;
}
set_bit(0, ha->rsp_qid_map);
@@ -349,8 +370,8 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
if (!(ha->fw_attributes & BIT_6)) {
- qla_printk(KERN_INFO, ha,
- "Firmware is not multi-queue capable\n");
+ ql_log(ql_log_warn, vha, 0x00d8,
+ "Firmware is not multi-queue capable.\n");
goto fail;
}
if (ql2xmultique_tag) {
@@ -359,8 +380,8 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
req = qla25xx_create_req_que(ha, options, 0, 0, -1,
QLA_DEFAULT_QUE_QOS);
if (!req) {
- qla_printk(KERN_WARNING, ha,
- "Can't create request queue\n");
+ ql_log(ql_log_warn, vha, 0x00e0,
+ "Failed to create request queue.\n");
goto fail;
}
ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
@@ -369,17 +390,20 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
for (ques = 1; ques < ha->max_rsp_queues; ques++) {
ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
if (!ret) {
- qla_printk(KERN_WARNING, ha,
- "Response Queue create failed\n");
+ ql_log(ql_log_warn, vha, 0x00e8,
+ "Failed to create response queue.\n");
goto fail2;
}
}
ha->flags.cpu_affinity_enabled = 1;
-
- DEBUG2(qla_printk(KERN_INFO, ha,
- "CPU affinity mode enabled, no. of response"
- " queues:%d, no. of request queues:%d\n",
- ha->max_rsp_queues, ha->max_req_queues));
+ ql_dbg(ql_dbg_multiq, vha, 0xc007,
+ "CPU affinity mode enalbed, "
+ "no. of response queues:%d no. of request queues:%d.\n",
+ ha->max_rsp_queues, ha->max_req_queues);
+ ql_dbg(ql_dbg_init, vha, 0x00e9,
+ "CPU affinity mode enalbed, "
+ "no. of response queues:%d no. of request queues:%d.\n",
+ ha->max_rsp_queues, ha->max_req_queues);
}
return 0;
fail2:
@@ -526,8 +550,11 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
struct qla_hw_data *ha = vha->hw;
sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
- if (!sp)
+ if (!sp) {
+ ql_log(ql_log_warn, vha, 0x3006,
+ "Memory allocation failed for sp.\n");
return sp;
+ }
atomic_set(&sp->ref_count, 1);
sp->fcport = fcport;
@@ -551,30 +578,43 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
int rval;
if (ha->flags.eeh_busy) {
- if (ha->flags.pci_channel_io_perm_failure)
+ if (ha->flags.pci_channel_io_perm_failure) {
+ ql_dbg(ql_dbg_io, vha, 0x3001,
+ "PCI Channel IO permanent failure, exiting "
+ "cmd=%p.\n", cmd);
cmd->result = DID_NO_CONNECT << 16;
- else
+ } else {
+ ql_dbg(ql_dbg_io, vha, 0x3002,
+ "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
cmd->result = DID_REQUEUE << 16;
+ }
goto qc24_fail_command;
}
rval = fc_remote_port_chkready(rport);
if (rval) {
cmd->result = rval;
+ ql_dbg(ql_dbg_io, vha, 0x3003,
+ "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
+ cmd, rval);
goto qc24_fail_command;
}
if (!vha->flags.difdix_supported &&
scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
- DEBUG2(qla_printk(KERN_ERR, ha,
- "DIF Cap Not Reg, fail DIF capable cmd's:%x\n",
- cmd->cmnd[0]));
+ ql_dbg(ql_dbg_io, vha, 0x3004,
+ "DIF Cap not reg, fail DIF capable cmd's:%p.\n",
+ cmd);
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
if (atomic_read(&fcport->state) != FCS_ONLINE) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
+ ql_dbg(ql_dbg_io, vha, 0x3005,
+ "Returning DNC, fcport_state=%d loop_state=%d.\n",
+ atomic_read(&fcport->state),
+ atomic_read(&base_vha->loop_state));
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
@@ -586,8 +626,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto qc24_host_busy;
rval = ha->isp_ops->start_scsi(sp);
- if (rval != QLA_SUCCESS)
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_io, vha, 0x3013,
+ "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
goto qc24_host_busy_free_sp;
+ }
return 0;
@@ -630,7 +673,8 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
int ret = QLA_SUCCESS;
if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
- DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n"));
+ ql_dbg(ql_dbg_taskm, vha, 0x8005,
+ "Return:eh_wait.\n");
return ret;
}
@@ -723,7 +767,8 @@ qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
else
return_status = QLA_FUNCTION_FAILED;
- DEBUG2(printk("%s return_status=%d\n", __func__, return_status));
+ ql_dbg(ql_dbg_taskm, vha, 0x8019,
+ "%s return status=%d.\n", __func__, return_status);
return return_status;
}
@@ -831,10 +876,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
int wait = 0;
struct qla_hw_data *ha = vha->hw;
+ ql_dbg(ql_dbg_taskm, vha, 0x8000,
+ "Entered %s for cmd=%p.\n", __func__, cmd);
if (!CMD_SP(cmd))
return SUCCESS;
ret = fc_block_scsi_eh(cmd);
+ ql_dbg(ql_dbg_taskm, vha, 0x8001,
+ "Return value of fc_block_scsi_eh=%d.\n", ret);
if (ret != 0)
return ret;
ret = SUCCESS;
@@ -849,37 +898,41 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
return SUCCESS;
}
- DEBUG2(printk("%s(%ld): aborting sp %p from RISC.",
- __func__, vha->host_no, sp));
+ ql_dbg(ql_dbg_taskm, vha, 0x8002,
+ "Aborting sp=%p cmd=%p from RISC ", sp, cmd);
/* Get a reference to the sp and drop the lock.*/
sp_get(sp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
- DEBUG2(printk("%s(%ld): abort_command "
- "mbx failed.\n", __func__, vha->host_no));
- ret = FAILED;
+ ql_dbg(ql_dbg_taskm, vha, 0x8003,
+ "Abort command mbx failed for cmd=%p.\n", cmd);
} else {
- DEBUG3(printk("%s(%ld): abort_command "
- "mbx success.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_taskm, vha, 0x8004,
+ "Abort command mbx success.\n");
wait = 1;
}
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
qla2x00_sp_compl(ha, sp);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ /* Did the command return during mailbox execution? */
+ if (ret == FAILED && !CMD_SP(cmd))
+ ret = SUCCESS;
/* Wait for the command to be returned. */
if (wait) {
if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
- qla_printk(KERN_ERR, ha,
- "scsi(%ld:%d:%d): Abort handler timed out -- %x.\n",
- vha->host_no, id, lun, ret);
+ ql_log(ql_log_warn, vha, 0x8006,
+ "Abort handler timed out for cmd=%p.\n", cmd);
ret = FAILED;
}
}
- qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d): Abort command issued -- %d %x.\n",
- vha->host_no, id, lun, wait, ret);
+ ql_log(ql_log_info, vha, 0x801c,
+ "Abort command issued -- %d %x.\n", wait, ret);
return ret;
}
@@ -947,40 +1000,59 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
int err;
- if (!fcport)
+ if (!fcport) {
+ ql_log(ql_log_warn, vha, 0x8007,
+ "fcport is NULL.\n");
return FAILED;
+ }
err = fc_block_scsi_eh(cmd);
+ ql_dbg(ql_dbg_taskm, vha, 0x8008,
+ "fc_block_scsi_eh ret=%d.\n", err);
if (err != 0)
return err;
- qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
- vha->host_no, cmd->device->id, cmd->device->lun, name);
+ ql_log(ql_log_info, vha, 0x8009,
+ "%s RESET ISSUED for id %d lun %d cmd=%p.\n", name,
+ cmd->device->id, cmd->device->lun, cmd);
err = 0;
- if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x800a,
+ "Wait for hba online failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
+ }
err = 1;
- if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
+ if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x800b,
+ "Wait for loop ready failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
+ }
err = 2;
if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
- != QLA_SUCCESS)
+ != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x800c,
+ "do_reset failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
+ }
err = 3;
if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
- cmd->device->lun, type) != QLA_SUCCESS)
+ cmd->device->lun, type) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x800d,
+ "wait for peding cmds failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
+ }
- qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n",
- vha->host_no, cmd->device->id, cmd->device->lun, name);
+ ql_log(ql_log_info, vha, 0x800e,
+ "%s RESET SUCCEEDED for id %d lun %d cmd=%p.\n", name,
+ cmd->device->id, cmd->device->lun, cmd);
return SUCCESS;
eh_reset_failed:
- qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n"
- , vha->host_no, cmd->device->id, cmd->device->lun, name,
- reset_errors[err]);
+ ql_log(ql_log_info, vha, 0x800f,
+ "%s RESET FAILED: %s for id %d lun %d cmd=%p.\n", name,
+ reset_errors[err], cmd->device->id, cmd->device->lun);
return FAILED;
}
@@ -1030,19 +1102,25 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
id = cmd->device->id;
lun = cmd->device->lun;
- if (!fcport)
+ if (!fcport) {
+ ql_log(ql_log_warn, vha, 0x8010,
+ "fcport is NULL.\n");
return ret;
+ }
ret = fc_block_scsi_eh(cmd);
+ ql_dbg(ql_dbg_taskm, vha, 0x8011,
+ "fc_block_scsi_eh ret=%d.\n", ret);
if (ret != 0)
return ret;
ret = FAILED;
- qla_printk(KERN_INFO, vha->hw,
- "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun);
+ ql_log(ql_log_info, vha, 0x8012,
+ "BUS RESET ISSUED for id %d lun %d.\n", id, lun);
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
- DEBUG2(printk("%s failed:board disabled\n",__func__));
+ ql_log(ql_log_fatal, vha, 0x8013,
+ "Wait for hba online failed board disabled.\n");
goto eh_bus_reset_done;
}
@@ -1055,12 +1133,15 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
/* Flush outstanding commands. */
if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
- QLA_SUCCESS)
+ QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x8014,
+ "Wait for pending commands failed.\n");
ret = FAILED;
+ }
eh_bus_reset_done:
- qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__,
- (ret == FAILED) ? "failed" : "succeeded");
+ ql_log(ql_log_warn, vha, 0x802b,
+ "BUS RESET %s.\n", (ret == FAILED) ? "FAILED" : "SUCCEDED");
return ret;
}
@@ -1093,16 +1174,21 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
id = cmd->device->id;
lun = cmd->device->lun;
- if (!fcport)
+ if (!fcport) {
+ ql_log(ql_log_warn, vha, 0x8016,
+ "fcport is NULL.\n");
return ret;
+ }
ret = fc_block_scsi_eh(cmd);
+ ql_dbg(ql_dbg_taskm, vha, 0x8017,
+ "fc_block_scsi_eh ret=%d.\n", ret);
if (ret != 0)
return ret;
ret = FAILED;
- qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
+ ql_log(ql_log_info, vha, 0x8018,
+ "ADAPTER RESET ISSUED for id %d lun %d.\n", id, lun);
if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
goto eh_host_reset_lock;
@@ -1137,8 +1223,11 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
/* failed. schedule dpc to try */
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
- if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x802a,
+ "wait for hba online failed.\n");
goto eh_host_reset_lock;
+ }
}
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
}
@@ -1149,7 +1238,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
ret = SUCCESS;
eh_host_reset_lock:
- qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
+ qla_printk(KERN_INFO, ha, "%s: reset %s.\n", __func__,
(ret == FAILED) ? "failed" : "succeeded");
return ret;
@@ -1179,9 +1268,9 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
ret = ha->isp_ops->target_reset(fcport, 0, 0);
if (ret != QLA_SUCCESS) {
- DEBUG2_3(printk("%s(%ld): bus_reset failed: "
- "target_reset=%d d_id=%x.\n", __func__,
- vha->host_no, ret, fcport->d_id.b24));
+ ql_dbg(ql_dbg_taskm, vha, 0x802c,
+ "Bus Reset failed: Target Reset=%d "
+ "d_id=%x.\n", ret, fcport->d_id.b24);
}
}
}
@@ -1189,9 +1278,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
- DEBUG2_3(printk("%s(%ld): failed: "
- "full_login_lip=%d.\n", __func__, vha->host_no,
- ret));
+ ql_dbg(ql_dbg_taskm, vha, 0x802d,
+ "full_login_lip=%d.\n", ret);
}
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -1202,8 +1290,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
if (ha->flags.enable_lip_reset) {
ret = qla2x00_lip_reset(vha);
if (ret != QLA_SUCCESS) {
- DEBUG2_3(printk("%s(%ld): failed: "
- "lip_reset=%d.\n", __func__, vha->host_no, ret));
+ ql_dbg(ql_dbg_taskm, vha, 0x802e,
+ "lip_reset failed (%d).\n", ret);
} else
qla2x00_wait_for_loop_ready(vha);
}
@@ -1302,17 +1390,17 @@ static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
if (!scsi_track_queue_full(sdev, qdepth))
return;
- DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
- "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
- fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
- sdev->queue_depth));
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3029,
+ "Queue depth adjusted-down "
+ "to %d for scsi(%ld:%d:%d:%d).\n",
+ sdev->queue_depth, fcport->vha->host_no,
+ sdev->channel, sdev->id, sdev->lun);
}
static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
{
fc_port_t *fcport = sdev->hostdata;
struct scsi_qla_host *vha = fcport->vha;
- struct qla_hw_data *ha = vha->hw;
struct req_que *req = NULL;
req = vha->req;
@@ -1327,10 +1415,11 @@ static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
else
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
- fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
- sdev->queue_depth));
+ ql_dbg(ql_dbg_io, vha, 0x302a,
+ "Queue depth adjusted-up to %d for "
+ "scsi(%ld:%d:%d:%d).\n",
+ sdev->queue_depth, fcport->vha->host_no,
+ sdev->channel, sdev->id, sdev->lun);
}
static int
@@ -1776,6 +1865,9 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->flags.port0 = 1;
else
ha->flags.port0 = 0;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
+ "device_type=0x%x port=%d fw_srisc_address=%p.\n",
+ ha->device_type, ha->flags.port0, ha->fw_srisc_address);
}
static int
@@ -1790,10 +1882,9 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
- qla_printk(KERN_WARNING, ha,
- "Failed to reserve PIO/MMIO regions (%s)\n",
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
+ "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
pci_name(ha->pdev));
-
goto iospace_error_exit;
}
if (!(ha->bars & 1))
@@ -1803,39 +1894,42 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
pio = pci_resource_start(ha->pdev, 0);
if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
- qla_printk(KERN_WARNING, ha,
- "Invalid PCI I/O region size (%s)...\n",
- pci_name(ha->pdev));
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
+ "Invalid pci I/O region size (%s).\n",
+ pci_name(ha->pdev));
pio = 0;
}
} else {
- qla_printk(KERN_WARNING, ha,
- "region #0 not a PIO resource (%s)...\n",
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
+ "Region #0 no a PIO resource (%s).\n",
pci_name(ha->pdev));
pio = 0;
}
ha->pio_address = pio;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
+ "PIO address=%p.\n",
+ ha->pio_address);
skip_pio:
/* Use MMIO operations for all accesses. */
if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
- qla_printk(KERN_ERR, ha,
- "region #1 not an MMIO resource (%s), aborting\n",
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
+ "Region #1 not an MMIO resource (%s), aborting.\n",
pci_name(ha->pdev));
goto iospace_error_exit;
}
if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
- qla_printk(KERN_ERR, ha,
- "Invalid PCI mem region size (%s), aborting\n",
- pci_name(ha->pdev));
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
+ "Invalid PCI mem region size (%s), aborting.\n",
+ pci_name(ha->pdev));
goto iospace_error_exit;
}
ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
if (!ha->iobase) {
- qla_printk(KERN_ERR, ha,
- "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
-
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
+ "Cannot remap MMIO (%s), aborting.\n",
+ pci_name(ha->pdev));
goto iospace_error_exit;
}
@@ -1849,6 +1943,8 @@ skip_pio:
ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
pci_resource_len(ha->pdev, 3));
if (ha->mqiobase) {
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
+ "MQIO Base=%p.\n", ha->mqiobase);
/* Read MSIX vector size of the board */
pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
ha->msix_count = msix;
@@ -1861,17 +1957,24 @@ skip_pio:
ha->max_req_queues = 2;
} else if (ql2xmaxqueues > 1) {
ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
- QLA_MQ_SIZE : ql2xmaxqueues;
- DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no"
- " of request queues:%d\n", ha->max_req_queues));
+ QLA_MQ_SIZE : ql2xmaxqueues;
+ ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
+ "QoS mode set, max no of request queues:%d.\n",
+ ha->max_req_queues);
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
+ "QoS mode set, max no of request queues:%d.\n",
+ ha->max_req_queues);
}
- qla_printk(KERN_INFO, ha,
- "MSI-X vector count: %d\n", msix);
+ ql_log_pci(ql_log_info, ha->pdev, 0x001a,
+ "MSI-X vector count: %d.\n", msix);
} else
- qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n");
+ ql_log_pci(ql_log_info, ha->pdev, 0x001b,
+ "BAR 3 not enabled.\n");
mqiobase_exit:
ha->msix_count = ha->max_rsp_queues + 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
+ "MSIX Count:%d.\n", ha->msix_count);
return (0);
iospace_error_exit:
@@ -1935,7 +2038,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
bars = pci_select_bars(pdev, IORESOURCE_MEM);
mem_only = 1;
+ ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
+ "Mem only adapter.\n");
}
+ ql_dbg_pci(ql_dbg_init, pdev, 0x0008,
+ "Bars=%d.\n", bars);
if (mem_only) {
if (pci_enable_device_mem(pdev))
@@ -1950,9 +2057,12 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
if (!ha) {
- DEBUG(printk("Unable to allocate memory for ha\n"));
+ ql_log_pci(ql_log_fatal, pdev, 0x0009,
+ "Unable to allocate memory for ha.\n");
goto probe_out;
}
+ ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
+ "Memory allocated for ha=%p.\n", ha);
ha->pdev = pdev;
/* Clear our data area */
@@ -1974,10 +2084,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto probe_hw_failed;
- qla_printk(KERN_INFO, ha,
- "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
- ha->iobase);
-
+ ql_log_pci(ql_log_info, pdev, 0x001d,
+ "Found an ISP%04X irq %d iobase 0x%p.\n",
+ pdev->device, pdev->irq, ha->iobase);
ha->prev_topology = 0;
ha->init_cb_size = sizeof(init_cb_t);
ha->link_data_rate = PORT_SPEED_UNKNOWN;
@@ -2078,7 +2187,18 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
}
-
+ ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
+ "mbx_count=%d, req_length=%d, "
+ "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
+ "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, .\n",
+ ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
+ ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
+ ha->nvram_npiv_size);
+ ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
+ "isp_ops=%p, flash_conf_off=%d, "
+ "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
+ ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
+ ha->nvram_conf_off, ha->nvram_data_off);
mutex_init(&ha->vport_lock);
init_completion(&ha->mbx_cmd_comp);
complete(&ha->mbx_cmd_comp);
@@ -2088,10 +2208,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
set_bit(0, (unsigned long *) ha->vp_idx_map);
qla2x00_config_dma_addressing(ha);
+ ql_dbg_pci(ql_dbg_init, pdev, 0x0020,
+ "64 Bit addressing is %s.\n",
+ ha->flags.enable_64bit_addressing ? "enable" :
+ "disable");
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
if (!ret) {
- qla_printk(KERN_WARNING, ha,
- "[ERROR] Failed to allocate memory for adapter\n");
+ ql_log_pci(ql_log_fatal, pdev, 0x0031,
+ "Failed to allocate memory for adapter, aborting.\n");
goto probe_hw_failed;
}
@@ -2103,9 +2227,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
base_vha = qla2x00_create_host(sht, ha);
if (!base_vha) {
- qla_printk(KERN_WARNING, ha,
- "[ERROR] Failed to allocate memory for scsi_host\n");
-
ret = -ENOMEM;
qla2x00_mem_free(ha);
qla2x00_free_req_que(ha, req);
@@ -2132,12 +2253,16 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (!IS_QLA82XX(ha))
host->sg_tablesize = QLA_SG_ALL;
}
-
+ ql_dbg(ql_dbg_init, base_vha, 0x0032,
+ "can_queue=%d, req=%p, "
+ "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
+ host->can_queue, base_vha->req,
+ base_vha->mgmt_svr_loop_id, host->sg_tablesize);
host->max_id = max_id;
host->this_id = 255;
host->cmd_per_lun = 3;
host->unique_id = host->host_no;
- if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif)
+ if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
host->max_cmd_len = 32;
else
host->max_cmd_len = MAX_CMDSZ;
@@ -2146,6 +2271,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->transportt = qla2xxx_transport_template;
sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
+ ql_dbg(ql_dbg_init, base_vha, 0x0033,
+ "max_id=%d this_id=%d "
+ "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
+ "max_lun=%d transportt=%p, vendor_id=%d.\n", host->max_id,
+ host->this_id, host->cmd_per_lun, host->unique_id,
+ host->max_cmd_len, host->max_channel, host->max_lun,
+ host->transportt, sht->vendor_id);
+
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
if (ret)
@@ -2156,9 +2289,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Alloc arrays of request and response ring ptrs */
que_init:
if (!qla2x00_alloc_queues(ha)) {
- qla_printk(KERN_WARNING, ha,
- "[ERROR] Failed to allocate memory for queue"
- " pointers\n");
+ ql_log(ql_log_fatal, base_vha, 0x003d,
+ "Failed to allocate memory for queue pointers.. aborting.\n");
goto probe_init_failed;
}
@@ -2186,20 +2318,33 @@ que_init:
rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
}
- if (qla2x00_initialize_adapter(base_vha)) {
- qla_printk(KERN_WARNING, ha,
- "Failed to initialize adapter\n");
+ ql_dbg(ql_dbg_multiq, base_vha, 0xc009,
+ "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
+ ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
+ ql_dbg(ql_dbg_multiq, base_vha, 0xc00a,
+ "req->req_q_in=%p req->req_q_out=%p "
+ "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
+ req->req_q_in, req->req_q_out,
+ rsp->rsp_q_in, rsp->rsp_q_out);
+ ql_dbg(ql_dbg_init, base_vha, 0x003e,
+ "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
+ ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
+ ql_dbg(ql_dbg_init, base_vha, 0x003f,
+ "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
+ req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
- DEBUG2(printk("scsi(%ld): Failed to initialize adapter - "
- "Adapter flags %x.\n",
- base_vha->host_no, base_vha->device_flags));
+ if (qla2x00_initialize_adapter(base_vha)) {
+ ql_log(ql_log_fatal, base_vha, 0x00d6,
+ "Failed to initialize adapter - Adapter flags %x.\n",
+ base_vha->device_flags);
if (IS_QLA82XX(ha)) {
qla82xx_idc_lock(ha);
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_FAILED);
qla82xx_idc_unlock(ha);
- qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
+ ql_log(ql_log_fatal, base_vha, 0x00d7,
+ "HW State: FAILED.\n");
}
ret = -ENODEV;
@@ -2208,9 +2353,8 @@ que_init:
if (ha->mqenable) {
if (qla25xx_setup_mode(base_vha)) {
- qla_printk(KERN_WARNING, ha,
- "Can't create queues, falling back to single"
- " queue mode\n");
+ ql_log(ql_log_warn, base_vha, 0x00ec,
+ "Failed to create queues, falling back to single queue mode.\n");
goto que_init;
}
}
@@ -2222,13 +2366,15 @@ que_init:
* Startup the kernel thread for this host adapter
*/
ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
- "%s_dpc", base_vha->host_str);
+ "%s_dpc", base_vha->host_str);
if (IS_ERR(ha->dpc_thread)) {
- qla_printk(KERN_WARNING, ha,
- "Unable to start DPC thread!\n");
+ ql_log(ql_log_fatal, base_vha, 0x00ed,
+ "Failed to start DPC thread.\n");
ret = PTR_ERR(ha->dpc_thread);
goto probe_failed;
}
+ ql_dbg(ql_dbg_init, base_vha, 0x00ee,
+ "DPC thread started successfully.\n");
skip_dpc:
list_add_tail(&base_vha->list, &ha->vp_list);
@@ -2236,18 +2382,23 @@ skip_dpc:
/* Initialized the timer */
qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
-
- DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
- base_vha->host_no, ha));
-
- if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
+ ql_dbg(ql_dbg_init, base_vha, 0x00ef,
+ "Started qla2x00_timer with "
+ "interval=%d.\n", WATCH_INTERVAL);
+ ql_dbg(ql_dbg_init, base_vha, 0x00f0,
+ "Detected hba at address=%p.\n",
+ ha);
+
+ if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4) {
+ int prot = 0;
base_vha->flags.difdix_supported = 1;
- DEBUG18(qla_printk(KERN_INFO, ha,
- "Registering for DIF/DIX type 1 and 3"
- " protection.\n"));
+ ql_dbg(ql_dbg_init, base_vha, 0x00f1,
+ "Registering for DIF/DIX type 1 and 3 protection.\n");
+ if (ql2xenabledif == 1)
+ prot = SHOST_DIX_TYPE0_PROTECTION;
scsi_host_set_prot(host,
- SHOST_DIF_TYPE1_PROTECTION
+ prot | SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
| SHOST_DIF_TYPE3_PROTECTION
| SHOST_DIX_TYPE1_PROTECTION
@@ -2267,6 +2418,9 @@ skip_dpc:
base_vha->flags.init_done = 1;
base_vha->flags.online = 1;
+ ql_dbg(ql_dbg_init, base_vha, 0x00f2,
+ "Init done and hba is online.\n");
+
scsi_scan_host(host);
qla2x00_alloc_sysfs_attr(base_vha);
@@ -2275,14 +2429,17 @@ skip_dpc:
qla2x00_dfs_setup(base_vha);
- qla_printk(KERN_INFO, ha, "\n"
- " QLogic Fibre Channel HBA Driver: %s\n"
- " QLogic %s - %s\n"
- " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
- qla2x00_version_str, ha->model_number,
- ha->model_desc ? ha->model_desc : "", pdev->device,
- ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev),
- ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no,
+ ql_log(ql_log_info, base_vha, 0x00fa,
+ "QLogic Fibre Channed HBA Driver: %s.\n",
+ qla2x00_version_str);
+ ql_log(ql_log_info, base_vha, 0x00fb,
+ "QLogic %s - %s.\n",
+ ha->model_number, ha->model_desc ? ha->model_desc : "");
+ ql_log(ql_log_info, base_vha, 0x00fc,
+ "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
+ pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info),
+ pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
+ base_vha->host_no,
ha->isp_ops->fw_version_str(base_vha, fw_str));
return 0;
@@ -2580,20 +2737,15 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
fcport->login_retry = vha->hw->login_retry_count;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- DEBUG(printk("scsi(%ld): Port login retry: "
+ ql_dbg(ql_dbg_disc, vha, 0x2067,
+ "Port login retry "
"%02x%02x%02x%02x%02x%02x%02x%02x, "
- "id = 0x%04x retry cnt=%d\n",
- vha->host_no,
- fcport->port_name[0],
- fcport->port_name[1],
- fcport->port_name[2],
- fcport->port_name[3],
- fcport->port_name[4],
- fcport->port_name[5],
- fcport->port_name[6],
- fcport->port_name[7],
- fcport->loop_id,
- fcport->login_retry));
+ "id = 0x%04x retry cnt=%d.\n",
+ fcport->port_name[0], fcport->port_name[1],
+ fcport->port_name[2], fcport->port_name[3],
+ fcport->port_name[4], fcport->port_name[5],
+ fcport->port_name[6], fcport->port_name[7],
+ fcport->loop_id, fcport->login_retry);
}
}
@@ -2676,6 +2828,9 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ctx_cachep);
if (!ha->ctx_mempool)
goto fail_free_srb_mempool;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021,
+ "ctx_cachep=%p ctx_mempool=%p.\n",
+ ctx_cachep, ha->ctx_mempool);
}
/* Get memory for cached NVRAM */
@@ -2690,22 +2845,29 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->s_dma_pool)
goto fail_free_nvram;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022,
+ "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
+ ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
+
if (IS_QLA82XX(ha) || ql2xenabledif) {
ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
DSD_LIST_DMA_POOL_SIZE, 8, 0);
if (!ha->dl_dma_pool) {
- qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - dl_dma_pool\n");
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0023,
+ "Failed to allocate memory for dl_dma_pool.\n");
goto fail_s_dma_pool;
}
ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
FCP_CMND_DMA_POOL_SIZE, 8, 0);
if (!ha->fcp_cmnd_dma_pool) {
- qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - fcp_cmnd_dma_pool\n");
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0024,
+ "Failed to allocate memory for fcp_cmnd_dma_pool.\n");
goto fail_dl_dma_pool;
}
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
+ "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n",
+ ha->dl_dma_pool, ha->fcp_cmnd_dma_pool);
}
/* Allocate memory for SNS commands */
@@ -2715,6 +2877,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
if (!ha->sns_cmd)
goto fail_dma_pool;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
+ "sns_cmd.\n", ha->sns_cmd);
} else {
/* Get consistent memory allocated for MS IOCB */
ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
@@ -2726,12 +2890,16 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
if (!ha->ct_sns)
goto fail_free_ms_iocb;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027,
+ "ms_iocb=%p ct_sns=%p.\n",
+ ha->ms_iocb, ha->ct_sns);
}
/* Allocate memory for request ring */
*req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
if (!*req) {
- DEBUG(printk("Unable to allocate memory for req\n"));
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0028,
+ "Failed to allocate memory for req.\n");
goto fail_req;
}
(*req)->length = req_len;
@@ -2739,14 +2907,15 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
((*req)->length + 1) * sizeof(request_t),
&(*req)->dma, GFP_KERNEL);
if (!(*req)->ring) {
- DEBUG(printk("Unable to allocate memory for req_ring\n"));
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0029,
+ "Failed to allocate memory for req_ring.\n");
goto fail_req_ring;
}
/* Allocate memory for response ring */
*rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
if (!*rsp) {
- qla_printk(KERN_WARNING, ha,
- "Unable to allocate memory for rsp\n");
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x002a,
+ "Failed to allocate memory for rsp.\n");
goto fail_rsp;
}
(*rsp)->hw = ha;
@@ -2755,19 +2924,24 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
((*rsp)->length + 1) * sizeof(response_t),
&(*rsp)->dma, GFP_KERNEL);
if (!(*rsp)->ring) {
- qla_printk(KERN_WARNING, ha,
- "Unable to allocate memory for rsp_ring\n");
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x002b,
+ "Failed to allocate memory for rsp_ring.\n");
goto fail_rsp_ring;
}
(*req)->rsp = *rsp;
(*rsp)->req = *req;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c,
+ "req=%p req->length=%d req->ring=%p rsp=%p "
+ "rsp->length=%d rsp->ring=%p.\n",
+ *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length,
+ (*rsp)->ring);
/* Allocate memory for NVRAM data for vports */
if (ha->nvram_npiv_size) {
ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
- ha->nvram_npiv_size, GFP_KERNEL);
+ ha->nvram_npiv_size, GFP_KERNEL);
if (!ha->npiv_info) {
- qla_printk(KERN_WARNING, ha,
- "Unable to allocate memory for npiv info\n");
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x002d,
+ "Failed to allocate memory for npiv_info.\n");
goto fail_npiv_info;
}
} else
@@ -2779,6 +2953,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
&ha->ex_init_cb_dma);
if (!ha->ex_init_cb)
goto fail_ex_init_cb;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e,
+ "ex_init_cb=%p.\n", ha->ex_init_cb);
}
INIT_LIST_HEAD(&ha->gbl_dsd_list);
@@ -2789,6 +2965,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
&ha->async_pd_dma);
if (!ha->async_pd)
goto fail_async_pd;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f,
+ "async_pd=%p.\n", ha->async_pd);
}
INIT_LIST_HEAD(&ha->vp_list);
@@ -2854,7 +3032,8 @@ fail_free_init_cb:
ha->init_cb = NULL;
ha->init_cb_dma = 0;
fail:
- DEBUG(printk("%s: Memory allocation failure\n", __func__));
+ ql_log(ql_log_fatal, NULL, 0x0030,
+ "Memory allocation failure.\n");
return -ENOMEM;
}
@@ -3003,8 +3182,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
if (host == NULL) {
- printk(KERN_WARNING
- "qla2xxx: Couldn't allocate host from scsi layer!\n");
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0107,
+ "Failed to allocate host from the scsi layer, aborting.\n");
goto fail;
}
@@ -3023,6 +3202,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
spin_lock_init(&vha->work_lock);
sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
+ ql_dbg(ql_dbg_init, vha, 0x0041,
+ "Allocated the host=%p hw=%p vha=%p dev_name=%s",
+ vha->host, vha->hw, vha,
+ dev_name(&(ha->pdev->dev)));
+
return vha;
fail:
@@ -3264,18 +3448,18 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
if (status == QLA_SUCCESS) {
fcport->old_loop_id = fcport->loop_id;
- DEBUG(printk("scsi(%ld): port login OK: logged "
- "in ID 0x%x\n", vha->host_no, fcport->loop_id));
+ ql_dbg(ql_dbg_disc, vha, 0x2003,
+ "Port login OK: logged in ID 0x%x.\n",
+ fcport->loop_id);
qla2x00_update_fcport(vha, fcport);
} else if (status == 1) {
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
/* retry the login again */
- DEBUG(printk("scsi(%ld): Retrying"
- " %d login again loop_id 0x%x\n",
- vha->host_no, fcport->login_retry,
- fcport->loop_id));
+ ql_dbg(ql_dbg_disc, vha, 0x2007,
+ "Retrying %d login again loop_id 0x%x.\n",
+ fcport->login_retry, fcport->loop_id);
} else {
fcport->login_retry = 0;
}
@@ -3315,26 +3499,27 @@ qla2x00_do_dpc(void *data)
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
- DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4000,
+ "DPC handler sleeping.\n");
schedule();
__set_current_state(TASK_RUNNING);
- DEBUG3(printk("qla2x00: DPC handler waking up\n"));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
+ "DPC handler waking up.\n");
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
+ "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
/* Initialization not yet finished. Don't do anything yet. */
if (!base_vha->flags.init_done)
continue;
if (ha->flags.eeh_busy) {
- DEBUG17(qla_printk(KERN_WARNING, ha,
- "qla2x00_do_dpc: dpc_flags: %lx\n",
- base_vha->dpc_flags));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
+ "eeh_busy=%d.\n", ha->flags.eeh_busy);
continue;
}
- DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
-
ha->dpc_active = 1;
if (ha->flags.mbox_busy) {
@@ -3351,8 +3536,8 @@ qla2x00_do_dpc(void *data)
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_FAILED);
qla82xx_idc_unlock(ha);
- qla_printk(KERN_INFO, ha,
- "HW State: FAILED\n");
+ ql_log(ql_log_info, base_vha, 0x4004,
+ "HW State: FAILED.\n");
qla82xx_device_state_handler(base_vha);
continue;
}
@@ -3360,10 +3545,8 @@ qla2x00_do_dpc(void *data)
if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
&base_vha->dpc_flags)) {
- DEBUG(printk(KERN_INFO
- "scsi(%ld): dpc: sched "
- "qla82xx_fcoe_ctx_reset ha = %p\n",
- base_vha->host_no, ha));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4005,
+ "FCoE context reset scheduled.\n");
if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
&base_vha->dpc_flags))) {
if (qla82xx_fcoe_ctx_reset(base_vha)) {
@@ -3377,18 +3560,16 @@ qla2x00_do_dpc(void *data)
&base_vha->dpc_flags);
}
- DEBUG(printk("scsi(%ld): dpc:"
- " qla82xx_fcoe_ctx_reset end\n",
- base_vha->host_no));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
+ "FCoE context reset end.\n");
}
}
if (test_and_clear_bit(ISP_ABORT_NEEDED,
&base_vha->dpc_flags)) {
- DEBUG(printk("scsi(%ld): dpc: sched "
- "qla2x00_abort_isp ha = %p\n",
- base_vha->host_no, ha));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
+ "ISP abort scheduled.\n");
if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
&base_vha->dpc_flags))) {
@@ -3401,8 +3582,8 @@ qla2x00_do_dpc(void *data)
&base_vha->dpc_flags);
}
- DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
- base_vha->host_no));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
+ "ISP abort end.\n");
}
if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
@@ -3411,9 +3592,8 @@ qla2x00_do_dpc(void *data)
}
if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
- DEBUG(printk(KERN_INFO "scsi(%ld): dpc: sched "
- "qla2x00_quiesce_needed ha = %p\n",
- base_vha->host_no, ha));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
+ "Quiescence mode scheduled.\n");
qla82xx_device_state_handler(base_vha);
clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags);
if (!ha->flags.quiesce_owner) {
@@ -3423,17 +3603,20 @@ qla2x00_do_dpc(void *data)
qla82xx_clear_qsnt_ready(base_vha);
qla82xx_idc_unlock(ha);
}
+ ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
+ "Quiescence mode end.\n");
}
if (test_and_clear_bit(RESET_MARKER_NEEDED,
&base_vha->dpc_flags) &&
(!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
- DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n",
- base_vha->host_no));
-
+ ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
+ "Reset marker scheduled.\n");
qla2x00_rst_aen(base_vha);
clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
+ ql_dbg(ql_dbg_dpc, base_vha, 0x400c,
+ "Reset marker end.\n");
}
/* Retry each device up to login retry count */
@@ -3442,19 +3625,18 @@ qla2x00_do_dpc(void *data)
!test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
- DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
- base_vha->host_no));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
+ "Relogin scheduled.\n");
qla2x00_relogin(base_vha);
-
- DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
- base_vha->host_no));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
+ "Relogin end.\n");
}
if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
&base_vha->dpc_flags)) {
- DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n",
- base_vha->host_no));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
+ "Loop resync scheduled.\n");
if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
&base_vha->dpc_flags))) {
@@ -3465,8 +3647,8 @@ qla2x00_do_dpc(void *data)
&base_vha->dpc_flags);
}
- DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n",
- base_vha->host_no));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4010,
+ "Loop resync end.\n");
}
if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
@@ -3489,7 +3671,8 @@ qla2x00_do_dpc(void *data)
} /* End of while(1) */
__set_current_state(TASK_RUNNING);
- DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4011,
+ "DPC handler exiting.\n");
/*
* Make sure that nobody tries to wake us up again.
@@ -3596,9 +3779,11 @@ void
qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
{
if (atomic_read(&sp->ref_count) == 0) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "SP reference-count to ZERO -- sp=%p\n", sp));
- DEBUG2(BUG());
+ ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
+ "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
+ sp, sp->cmd);
+ if (ql2xextended_error_logging & ql_dbg_io)
+ BUG();
return;
}
if (!atomic_dec_and_test(&sp->ref_count))
@@ -3626,6 +3811,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
struct req_que *req;
if (ha->flags.eeh_busy) {
+ ql_dbg(ql_dbg_timer, vha, 0x6000,
+ "EEH = %d, restarting timer.\n",
+ ha->flags.eeh_busy);
qla2x00_restart_timer(vha, WATCH_INTERVAL);
return;
}
@@ -3650,9 +3838,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
if (atomic_read(&vha->loop_down_timer) ==
vha->loop_down_abort_time) {
- DEBUG(printk("scsi(%ld): Loop Down - aborting the "
- "queues before time expire\n",
- vha->host_no));
+ ql_log(ql_log_info, vha, 0x6008,
+ "Loop down - aborting the queues before time expires.\n");
if (!IS_QLA2100(ha) && vha->link_down_timeout)
atomic_set(&vha->loop_state, LOOP_DEAD);
@@ -3697,10 +3884,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
/* if the loop has been down for 4 minutes, reinit adapter */
if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
if (!(vha->device_flags & DFLG_NO_CABLE)) {
- DEBUG(printk("scsi(%ld): Loop down - "
- "aborting ISP.\n",
- vha->host_no));
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x6009,
"Loop down - aborting ISP.\n");
if (IS_QLA82XX(ha))
@@ -3711,9 +3895,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
&vha->dpc_flags);
}
}
- DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n",
- vha->host_no,
- atomic_read(&vha->loop_down_timer)));
+ ql_dbg(ql_dbg_timer, vha, 0x600a,
+ "Loop down - seconds remaining %d.\n",
+ atomic_read(&vha->loop_down_timer));
}
/* Check if beacon LED needs to be blinked for physical host only */
@@ -3736,8 +3920,27 @@ qla2x00_timer(scsi_qla_host_t *vha)
test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
- test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
+ ql_dbg(ql_dbg_timer, vha, 0x600b,
+ "isp_abort_needed=%d loop_resync_needed=%d "
+ "fcport_update_needed=%d start_dpc=%d "
+ "reset_marker_needed=%d",
+ test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags),
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags),
+ test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags),
+ start_dpc,
+ test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
+ ql_dbg(ql_dbg_timer, vha, 0x600c,
+ "beacon_blink_needed=%d isp_unrecoverable=%d "
+ "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
+ "relogin_needed=%d.\n",
+ test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
+ test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
+ test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
+ test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
qla2xxx_wake_dpc(vha);
+ }
qla2x00_restart_timer(vha, WATCH_INTERVAL);
}
@@ -3806,8 +4009,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
goto out;
if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
- DEBUG2(printk("scsi(%ld): Failed to load firmware image "
- "(%s).\n", vha->host_no, blob->name));
+ ql_log(ql_log_warn, vha, 0x0063,
+ "Failed to load firmware image (%s).\n", blob->name);
blob->fw = NULL;
blob = NULL;
goto out;
@@ -3836,8 +4039,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
scsi_qla_host_t *vha = pci_get_drvdata(pdev);
struct qla_hw_data *ha = vha->hw;
- DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n",
- state));
+ ql_dbg(ql_dbg_aer, vha, 0x9000,
+ "PCI error detected, state %x.\n", state);
switch (state) {
case pci_channel_io_normal:
@@ -3850,9 +4053,9 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
ha->flags.isp82xx_fw_hung = 1;
if (ha->flags.mbox_busy) {
ha->flags.mbox_int = 1;
- DEBUG2(qla_printk(KERN_ERR, ha,
- "Due to pci channel io frozen, doing premature "
- "completion of mbx command\n"));
+ ql_dbg(ql_dbg_aer, vha, 0x9001,
+ "Due to pci channel io frozen, doing premature "
+ "completion of mbx command.\n");
complete(&ha->mbx_intr_comp);
}
}
@@ -3900,8 +4103,8 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (risc_paused) {
- qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, "
- "Dumping firmware!\n");
+ ql_log(ql_log_info, base_vha, 0x9003,
+ "RISC paused -- mmio_enabled, Dumping firmware.\n");
ha->isp_ops->fw_dump(base_vha, 0);
return PCI_ERS_RESULT_NEED_RESET;
@@ -3917,8 +4120,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
int fn;
struct pci_dev *other_pdev = NULL;
- DEBUG17(qla_printk(KERN_INFO, ha,
- "scsi(%ld): In qla82xx_error_recovery\n", base_vha->host_no));
+ ql_dbg(ql_dbg_aer, base_vha, 0x9006,
+ "Entered %s.\n", __func__);
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
@@ -3932,8 +4135,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
fn = PCI_FUNC(ha->pdev->devfn);
while (fn > 0) {
fn--;
- DEBUG17(qla_printk(KERN_INFO, ha,
- "Finding pci device at function = 0x%x\n", fn));
+ ql_dbg(ql_dbg_aer, base_vha, 0x9007,
+ "Finding pci device at function = 0x%x.\n", fn);
other_pdev =
pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
@@ -3942,9 +4145,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
if (!other_pdev)
continue;
if (atomic_read(&other_pdev->enable_cnt)) {
- DEBUG17(qla_printk(KERN_INFO, ha,
- "Found PCI func available and enabled at 0x%x\n",
- fn));
+ ql_dbg(ql_dbg_aer, base_vha, 0x9008,
+ "Found PCI func available and enable at 0x%x.\n",
+ fn);
pci_dev_put(other_pdev);
break;
}
@@ -3953,8 +4156,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
if (!fn) {
/* Reset owner */
- DEBUG17(qla_printk(KERN_INFO, ha,
- "This devfn is reset owner = 0x%x\n", ha->pdev->devfn));
+ ql_dbg(ql_dbg_aer, base_vha, 0x9009,
+ "This devfn is reset owner = 0x%x.\n",
+ ha->pdev->devfn);
qla82xx_idc_lock(ha);
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
@@ -3964,8 +4168,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
QLA82XX_IDC_VERSION);
drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
- DEBUG17(qla_printk(KERN_INFO, ha,
- "drv_active = 0x%x\n", drv_active));
+ ql_dbg(ql_dbg_aer, base_vha, 0x900a,
+ "drv_active = 0x%x.\n", drv_active);
qla82xx_idc_unlock(ha);
/* Reset if device is not already reset
@@ -3978,12 +4182,14 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
qla82xx_idc_lock(ha);
if (rval != QLA_SUCCESS) {
- qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
+ ql_log(ql_log_info, base_vha, 0x900b,
+ "HW State: FAILED.\n");
qla82xx_clear_drv_active(ha);
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_FAILED);
} else {
- qla_printk(KERN_INFO, ha, "HW State: READY\n");
+ ql_log(ql_log_info, base_vha, 0x900c,
+ "HW State: READY.\n");
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_READY);
qla82xx_idc_unlock(ha);
@@ -3996,8 +4202,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
}
qla82xx_idc_unlock(ha);
} else {
- DEBUG17(qla_printk(KERN_INFO, ha,
- "This devfn is not reset owner = 0x%x\n", ha->pdev->devfn));
+ ql_dbg(ql_dbg_aer, base_vha, 0x900d,
+ "This devfn is not reset owner = 0x%x.\n",
+ ha->pdev->devfn);
if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
QLA82XX_DEV_READY)) {
ha->flags.isp82xx_fw_hung = 0;
@@ -4021,7 +4228,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
struct rsp_que *rsp;
int rc, retries = 10;
- DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n"));
+ ql_dbg(ql_dbg_aer, base_vha, 0x9004,
+ "Slot Reset.\n");
/* Workaround: qla2xxx driver which access hardware earlier
* needs error state to be pci_channel_io_online.
@@ -4042,7 +4250,7 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
rc = pci_enable_device(pdev);
if (rc) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, base_vha, 0x9005,
"Can't re-enable PCI device after reset.\n");
goto exit_slot_reset;
}
@@ -4072,8 +4280,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
exit_slot_reset:
- DEBUG17(qla_printk(KERN_WARNING, ha,
- "slot_reset-return:ret=%x\n", ret));
+ ql_dbg(ql_dbg_aer, base_vha, 0x900e,
+ "slot_reset return %x.\n", ret);
return ret;
}
@@ -4085,13 +4293,13 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
struct qla_hw_data *ha = base_vha->hw;
int ret;
- DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n"));
+ ql_dbg(ql_dbg_aer, base_vha, 0x900f,
+ "pci_resume.\n");
ret = qla2x00_wait_for_hba_online(base_vha);
if (ret != QLA_SUCCESS) {
- qla_printk(KERN_ERR, ha,
- "the device failed to resume I/O "
- "from slot/link_reset");
+ ql_log(ql_log_fatal, base_vha, 0x9002,
+ "The device failed to resume I/O from slot/link_reset.\n");
}
pci_cleanup_aer_uncorrect_error_status(pdev);
@@ -4155,8 +4363,8 @@ qla2x00_module_init(void)
srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (srb_cachep == NULL) {
- printk(KERN_ERR
- "qla2xxx: Unable to allocate SRB cache...Failing load!\n");
+ ql_log(ql_log_fatal, NULL, 0x0001,
+ "Unable to allocate SRB cache...Failing load!.\n");
return -ENOMEM;
}
@@ -4169,13 +4377,15 @@ qla2x00_module_init(void)
fc_attach_transport(&qla2xxx_transport_functions);
if (!qla2xxx_transport_template) {
kmem_cache_destroy(srb_cachep);
+ ql_log(ql_log_fatal, NULL, 0x0002,
+ "fc_attach_transport failed...Failing load!.\n");
return -ENODEV;
}
apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
if (apidev_major < 0) {
- printk(KERN_WARNING "qla2xxx: Unable to register char device "
- "%s\n", QLA2XXX_APIDEV);
+ ql_log(ql_log_fatal, NULL, 0x0003,
+ "Unable to register char device %s.\n", QLA2XXX_APIDEV);
}
qla2xxx_transport_vport_template =
@@ -4183,16 +4393,21 @@ qla2x00_module_init(void)
if (!qla2xxx_transport_vport_template) {
kmem_cache_destroy(srb_cachep);
fc_release_transport(qla2xxx_transport_template);
+ ql_log(ql_log_fatal, NULL, 0x0004,
+ "fc_attach_transport vport failed...Failing load!.\n");
return -ENODEV;
}
-
- printk(KERN_INFO "QLogic Fibre Channel HBA Driver: %s\n",
+ ql_log(ql_log_info, NULL, 0x0005,
+ "QLogic Fibre Channel HBA Driver: %s.\n",
qla2x00_version_str);
ret = pci_register_driver(&qla2xxx_pci_driver);
if (ret) {
kmem_cache_destroy(srb_cachep);
fc_release_transport(qla2xxx_transport_template);
fc_release_transport(qla2xxx_transport_vport_template);
+ ql_log(ql_log_fatal, NULL, 0x0006,
+ "pci_register_driver failed...ret=%d Failing load!.\n",
+ ret);
}
return ret;
}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 6936476..eff1356 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -189,6 +189,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
uint16_t word;
uint32_t nv_cmd, wait_cnt;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
qla2x00_nv_write(ha, NVR_DATA_OUT);
qla2x00_nv_write(ha, 0);
@@ -220,8 +221,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
- DEBUG9_10(qla_printk(KERN_WARNING, ha,
- "NVRAM didn't go ready...\n"));
+ ql_dbg(ql_dbg_user, vha, 0x708d,
+ "NVRAM didn't go ready...\n");
break;
}
NVRAM_DELAY();
@@ -308,6 +309,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint32_t word, wait_cnt;
uint16_t wprot, wprot_old;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
/* Clear NVRAM write protection. */
ret = QLA_FUNCTION_FAILED;
@@ -350,8 +352,8 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
- DEBUG9_10(qla_printk(KERN_WARNING, ha,
- "NVRAM didn't go ready...\n"));
+ ql_dbg(ql_dbg_user, vha, 0x708e,
+ "NVRAM didn't go ready...\n");
break;
}
NVRAM_DELAY();
@@ -371,6 +373,7 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
{
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint32_t word, wait_cnt;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
if (stat != QLA_SUCCESS)
return;
@@ -409,8 +412,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
- DEBUG9_10(qla_printk(KERN_WARNING, ha,
- "NVRAM didn't go ready...\n"));
+ ql_dbg(ql_dbg_user, vha, 0x708f,
+ "NVRAM didn't go ready...\n");
break;
}
NVRAM_DELAY();
@@ -607,9 +610,10 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
for (chksum = 0; cnt; cnt--)
chksum += le16_to_cpu(*wptr++);
if (chksum) {
- qla_printk(KERN_ERR, ha,
+ ql_log(ql_log_fatal, vha, 0x0045,
"Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
- qla2x00_dump_buffer(buf, sizeof(struct qla_flt_location));
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010e,
+ buf, sizeof(struct qla_flt_location));
return QLA_FUNCTION_FAILED;
}
@@ -618,7 +622,9 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
*start = (le16_to_cpu(fltl->start_hi) << 16 |
le16_to_cpu(fltl->start_lo)) >> 2;
end:
- DEBUG2(qla_printk(KERN_DEBUG, ha, "FLTL[%s] = 0x%x.\n", loc, *start));
+ ql_dbg(ql_dbg_init, vha, 0x0046,
+ "FLTL[%s] = 0x%x.\n",
+ loc, *start);
return QLA_SUCCESS;
}
@@ -685,10 +691,10 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
if (*wptr == __constant_cpu_to_le16(0xffff))
goto no_flash_data;
if (flt->version != __constant_cpu_to_le16(1)) {
- DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported FLT detected: "
- "version=0x%x length=0x%x checksum=0x%x.\n",
+ ql_log(ql_log_warn, vha, 0x0047,
+ "Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
le16_to_cpu(flt->version), le16_to_cpu(flt->length),
- le16_to_cpu(flt->checksum)));
+ le16_to_cpu(flt->checksum));
goto no_flash_data;
}
@@ -696,10 +702,10 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
for (chksum = 0; cnt; cnt--)
chksum += le16_to_cpu(*wptr++);
if (chksum) {
- DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FLT detected: "
- "version=0x%x length=0x%x checksum=0x%x.\n",
+ ql_log(ql_log_fatal, vha, 0x0048,
+ "Inconsistent FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
le16_to_cpu(flt->version), le16_to_cpu(flt->length),
- chksum));
+ le16_to_cpu(flt->checksum));
goto no_flash_data;
}
@@ -708,10 +714,11 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
for ( ; cnt; cnt--, region++) {
/* Store addresses as DWORD offsets. */
start = le32_to_cpu(region->start) >> 2;
-
- DEBUG3(qla_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x "
- "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start,
- le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size)));
+ ql_dbg(ql_dbg_init, vha, 0x0049,
+ "FLT[%02x]: start=0x%x "
+ "end=0x%x size=0x%x.\n", le32_to_cpu(region->code),
+ start, le32_to_cpu(region->end) >> 2,
+ le32_to_cpu(region->size));
switch (le32_to_cpu(region->code) & 0xff) {
case FLT_REG_FW:
@@ -796,12 +803,16 @@ no_flash_data:
ha->flt_region_npiv_conf = ha->flags.port0 ?
def_npiv_conf0[def] : def_npiv_conf1[def];
done:
- DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
- "vpd_nvram=0x%x vpd=0x%x nvram=0x%x fdt=0x%x flt=0x%x "
- "npiv=0x%x. fcp_prio_cfg=0x%x\n", loc, ha->flt_region_boot,
- ha->flt_region_fw, ha->flt_region_vpd_nvram, ha->flt_region_vpd,
- ha->flt_region_nvram, ha->flt_region_fdt, ha->flt_region_flt,
- ha->flt_region_npiv_conf, ha->flt_region_fcp_prio));
+ ql_dbg(ql_dbg_init, vha, 0x004a,
+ "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x.\n",
+ loc, ha->flt_region_boot,
+ ha->flt_region_fw, ha->flt_region_vpd_nvram,
+ ha->flt_region_vpd);
+ ql_dbg(ql_dbg_init, vha, 0x004b,
+ "nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n",
+ ha->flt_region_nvram,
+ ha->flt_region_fdt, ha->flt_region_flt,
+ ha->flt_region_npiv_conf, ha->flt_region_fcp_prio);
}
static void
@@ -833,10 +844,12 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
cnt++)
chksum += le16_to_cpu(*wptr++);
if (chksum) {
- DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FDT detected: "
- "checksum=0x%x id=%c version=0x%x.\n", chksum, fdt->sig[0],
- le16_to_cpu(fdt->version)));
- DEBUG9(qla2x00_dump_buffer((uint8_t *)fdt, sizeof(*fdt)));
+ ql_dbg(ql_dbg_init, vha, 0x004c,
+ "Inconsistent FDT detected:"
+ " checksum=0x%x id=%c version0x%x.\n", chksum,
+ fdt->sig[0], le16_to_cpu(fdt->version));
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0113,
+ (uint8_t *)fdt, sizeof(*fdt));
goto no_flash_data;
}
@@ -890,11 +903,12 @@ no_flash_data:
break;
}
done:
- DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x "
- "pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
+ ql_dbg(ql_dbg_init, vha, 0x004d,
+ "FDT[%x]: (0x%x/0x%x) erase=0x%x "
+ "pr=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
- ha->fdt_unprotect_sec_cmd, ha->fdt_wrt_disable,
- ha->fdt_block_size));
+ ha->fdt_wrt_disable, ha->fdt_block_size);
+
}
static void
@@ -919,6 +933,10 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
ha->nx_dev_init_timeout = le32_to_cpu(*wptr++);
ha->nx_reset_timeout = le32_to_cpu(*wptr);
}
+ ql_dbg(ql_dbg_init, vha, 0x004e,
+ "nx_dev_init_timeout=%d "
+ "nx_reset_timeout=%d.\n", ha->nx_dev_init_timeout,
+ ha->nx_reset_timeout);
return;
}
@@ -963,17 +981,18 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
if (hdr.version == __constant_cpu_to_le16(0xffff))
return;
if (hdr.version != __constant_cpu_to_le16(1)) {
- DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported NPIV-Config "
+ ql_dbg(ql_dbg_user, vha, 0x7090,
+ "Unsupported NPIV-Config "
"detected: version=0x%x entries=0x%x checksum=0x%x.\n",
le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
- le16_to_cpu(hdr.checksum)));
+ le16_to_cpu(hdr.checksum));
return;
}
data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL);
if (!data) {
- DEBUG2(qla_printk(KERN_INFO, ha, "NPIV-Config: Unable to "
- "allocate memory.\n"));
+ ql_log(ql_log_warn, vha, 0x7091,
+ "Unable to allocate memory for data.\n");
return;
}
@@ -985,10 +1004,11 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
for (wptr = data, chksum = 0; cnt; cnt--)
chksum += le16_to_cpu(*wptr++);
if (chksum) {
- DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent NPIV-Config "
+ ql_dbg(ql_dbg_user, vha, 0x7092,
+ "Inconsistent NPIV-Config "
"detected: version=0x%x entries=0x%x checksum=0x%x.\n",
le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
- chksum));
+ le16_to_cpu(hdr.checksum));
goto done;
}
@@ -1014,21 +1034,22 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
vid.port_name = wwn_to_u64(entry->port_name);
vid.node_name = wwn_to_u64(entry->node_name);
- DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx "
- "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
- (unsigned long long)vid.port_name,
- (unsigned long long)vid.node_name,
- le16_to_cpu(entry->vf_id),
- entry->q_qos, entry->f_qos));
+ ql_dbg(ql_dbg_user, vha, 0x7093,
+ "NPIV[%02x]: wwpn=%llx "
+ "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
+ (unsigned long long)vid.port_name,
+ (unsigned long long)vid.node_name,
+ le16_to_cpu(entry->vf_id),
+ entry->q_qos, entry->f_qos);
if (i < QLA_PRECONFIG_VPORTS) {
vport = fc_vport_create(vha->host, 0, &vid);
if (!vport)
- qla_printk(KERN_INFO, ha,
- "NPIV-Config: Failed to create vport [%02x]: "
- "wwpn=%llx wwnn=%llx.\n", cnt,
- (unsigned long long)vid.port_name,
- (unsigned long long)vid.node_name);
+ ql_log(ql_log_warn, vha, 0x7094,
+ "NPIV-Config Failed to create vport [%02x]: "
+ "wwpn=%llx wwnn=%llx.\n", cnt,
+ (unsigned long long)vid.port_name,
+ (unsigned long long)vid.node_name);
}
}
done:
@@ -1127,9 +1148,10 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
&optrom_dma, GFP_KERNEL);
if (!optrom) {
- qla_printk(KERN_DEBUG, ha,
- "Unable to allocate memory for optrom burst write "
- "(%x KB).\n", OPTROM_BURST_SIZE / 1024);
+ ql_log(ql_log_warn, vha, 0x7095,
+ "Unable to allocate "
+ "memory for optrom burst write (%x KB).\n",
+ OPTROM_BURST_SIZE / 1024);
}
}
@@ -1138,7 +1160,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
ret = qla24xx_unprotect_flash(vha);
if (ret != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7096,
"Unable to unprotect flash for update.\n");
goto done;
}
@@ -1156,9 +1178,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
0xff0000) | ((fdata >> 16) & 0xff));
ret = qla24xx_erase_sector(vha, fdata);
if (ret != QLA_SUCCESS) {
- DEBUG9(qla_printk(KERN_WARNING, ha,
- "Unable to erase sector: address=%x.\n",
- faddr));
+ ql_dbg(ql_dbg_user, vha, 0x7007,
+ "Unable to erase erase sector: address=%x.\n",
+ faddr);
break;
}
}
@@ -1172,12 +1194,12 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
flash_data_addr(ha, faddr),
OPTROM_BURST_DWORDS);
if (ret != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7097,
"Unable to burst-write optrom segment "
"(%x/%x/%llx).\n", ret,
flash_data_addr(ha, faddr),
(unsigned long long)optrom_dma);
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7098,
"Reverting to slow-write.\n");
dma_free_coherent(&ha->pdev->dev,
@@ -1194,9 +1216,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
ret = qla24xx_write_flash_dword(ha,
flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
if (ret != QLA_SUCCESS) {
- DEBUG9(printk("%s(%ld) Unable to program flash "
- "address=%x data=%x.\n", __func__,
- vha->host_no, faddr, *dwptr));
+ ql_dbg(ql_dbg_user, vha, 0x7006,
+ "Unable to program flash address=%x data=%x.\n",
+ faddr, *dwptr);
break;
}
@@ -1211,7 +1233,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
ret = qla24xx_protect_flash(vha);
if (ret != QLA_SUCCESS)
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7099,
"Unable to protect flash after update.\n");
done:
if (optrom)
@@ -1324,9 +1346,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
ret = qla24xx_write_flash_dword(ha,
nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
if (ret != QLA_SUCCESS) {
- DEBUG9(qla_printk(KERN_WARNING, ha,
+ ql_dbg(ql_dbg_user, vha, 0x709a,
"Unable to program nvram address=%x data=%x.\n",
- naddr, *dwptr));
+ naddr, *dwptr);
break;
}
}
@@ -1476,7 +1498,7 @@ qla2x00_beacon_on(struct scsi_qla_host *vha)
ha->fw_options[1] |= FO1_DISABLE_GPIO6_7;
if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x709b,
"Unable to update fw options (beacon on).\n");
return QLA_FUNCTION_FAILED;
}
@@ -1541,7 +1563,7 @@ qla2x00_beacon_off(struct scsi_qla_host *vha)
rval = qla2x00_set_fw_options(vha, ha->fw_options);
if (rval != QLA_SUCCESS)
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x709c,
"Unable to update fw options (beacon off).\n");
return rval;
}
@@ -1616,7 +1638,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
if (qla2x00_get_fw_options(vha, ha->fw_options) !=
QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7009,
"Unable to update fw options (beacon on).\n");
return QLA_FUNCTION_FAILED;
}
@@ -1670,14 +1692,14 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
- "Unable to update fw options (beacon off).\n");
+ ql_log(ql_log_warn, vha, 0x704d,
+ "Unable to update fw options (beacon on).\n");
return QLA_FUNCTION_FAILED;
}
if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
- "Unable to get fw options (beacon off).\n");
+ ql_log(ql_log_warn, vha, 0x704e,
+ "Unable to update fw options (beacon on).\n");
return QLA_FUNCTION_FAILED;
}
@@ -2389,10 +2411,9 @@ try_fast:
optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
&optrom_dma, GFP_KERNEL);
if (!optrom) {
- qla_printk(KERN_DEBUG, ha,
- "Unable to allocate memory for optrom burst read "
- "(%x KB).\n", OPTROM_BURST_SIZE / 1024);
-
+ ql_log(ql_log_warn, vha, 0x00cc,
+ "Unable to allocate memory for optrom burst read (%x KB).\n",
+ OPTROM_BURST_SIZE / 1024);
goto slow_read;
}
@@ -2407,12 +2428,11 @@ try_fast:
rval = qla2x00_dump_ram(vha, optrom_dma,
flash_data_addr(ha, faddr), burst);
if (rval) {
- qla_printk(KERN_WARNING, ha,
- "Unable to burst-read optrom segment "
- "(%x/%x/%llx).\n", rval,
- flash_data_addr(ha, faddr),
+ ql_log(ql_log_warn, vha, 0x00f5,
+ "Unable to burst-read optrom segment (%x/%x/%llx).\n",
+ rval, flash_data_addr(ha, faddr),
(unsigned long long)optrom_dma);
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x00f6,
"Reverting to slow-read.\n");
dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
@@ -2556,8 +2576,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 ||
qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) {
/* No signature */
- DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM "
- "signature.\n"));
+ ql_log(ql_log_fatal, vha, 0x0050,
+ "No matching ROM signature.\n");
ret = QLA_FUNCTION_FAILED;
break;
}
@@ -2573,8 +2593,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' ||
qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') {
/* Incorrect header. */
- DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not "
- "found pcir_adr=%x.\n", pcids));
+ ql_log(ql_log_fatal, vha, 0x0051,
+ "PCI data struct not found pcir_adr=%x.\n", pcids);
ret = QLA_FUNCTION_FAILED;
break;
}
@@ -2588,8 +2608,9 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
qla2x00_read_flash_byte(ha, pcids + 0x12);
ha->bios_revision[1] =
qla2x00_read_flash_byte(ha, pcids + 0x13);
- DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n",
- ha->bios_revision[1], ha->bios_revision[0]));
+ ql_dbg(ql_dbg_init, vha, 0x0052,
+ "Read BIOS %d.%d.\n",
+ ha->bios_revision[1], ha->bios_revision[0]);
break;
case ROM_CODE_TYPE_FCODE:
/* Open Firmware standard for PCI (FCode). */
@@ -2602,12 +2623,14 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
qla2x00_read_flash_byte(ha, pcids + 0x12);
ha->efi_revision[1] =
qla2x00_read_flash_byte(ha, pcids + 0x13);
- DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n",
- ha->efi_revision[1], ha->efi_revision[0]));
+ ql_dbg(ql_dbg_init, vha, 0x0053,
+ "Read EFI %d.%d.\n",
+ ha->efi_revision[1], ha->efi_revision[0]);
break;
default:
- DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code "
- "type %x at pcids %x.\n", code_type, pcids));
+ ql_log(ql_log_warn, vha, 0x0054,
+ "Unrecognized code type %x at pcids %x.\n",
+ code_type, pcids);
break;
}
@@ -2627,21 +2650,28 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
8);
- DEBUG3(qla_printk(KERN_DEBUG, ha, "dumping fw ver from "
- "flash:\n"));
- DEBUG3(qla2x00_dump_buffer((uint8_t *)dbyte, 8));
+ ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010a,
+ "Dumping fw "
+ "ver from flash:.\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010b,
+ (uint8_t *)dbyte, 8);
if ((dcode[0] == 0xffff && dcode[1] == 0xffff &&
dcode[2] == 0xffff && dcode[3] == 0xffff) ||
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
- DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw "
- "revision at %x.\n", ha->flt_region_fw * 4));
+ ql_log(ql_log_warn, vha, 0x0057,
+ "Unrecognized fw revision at %x.\n",
+ ha->flt_region_fw * 4);
} else {
/* values are in big endian */
ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
ha->fw_revision[1] = dbyte[2] << 16 | dbyte[3];
ha->fw_revision[2] = dbyte[4] << 16 | dbyte[5];
+ ql_dbg(ql_dbg_init, vha, 0x0058,
+ "FW Version: "
+ "%d.%d.%d.\n", ha->fw_revision[0],
+ ha->fw_revision[1], ha->fw_revision[2]);
}
}
@@ -2683,8 +2713,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
bcode = mbuf + (pcihdr % 4);
if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
/* No signature */
- DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM "
- "signature.\n"));
+ ql_log(ql_log_fatal, vha, 0x0059,
+ "No matching ROM signature.\n");
ret = QLA_FUNCTION_FAILED;
break;
}
@@ -2699,8 +2729,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
bcode[0x2] != 'I' || bcode[0x3] != 'R') {
/* Incorrect header. */
- DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not "
- "found pcir_adr=%x.\n", pcids));
+ ql_log(ql_log_fatal, vha, 0x005a,
+ "PCI data struct not found pcir_adr=%x.\n", pcids);
ret = QLA_FUNCTION_FAILED;
break;
}
@@ -2712,26 +2742,30 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
/* Intel x86, PC-AT compatible. */
ha->bios_revision[0] = bcode[0x12];
ha->bios_revision[1] = bcode[0x13];
- DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n",
- ha->bios_revision[1], ha->bios_revision[0]));
+ ql_dbg(ql_dbg_init, vha, 0x005b,
+ "Read BIOS %d.%d.\n",
+ ha->bios_revision[1], ha->bios_revision[0]);
break;
case ROM_CODE_TYPE_FCODE:
/* Open Firmware standard for PCI (FCode). */
ha->fcode_revision[0] = bcode[0x12];
ha->fcode_revision[1] = bcode[0x13];
- DEBUG3(qla_printk(KERN_DEBUG, ha, "read FCODE %d.%d.\n",
- ha->fcode_revision[1], ha->fcode_revision[0]));
+ ql_dbg(ql_dbg_init, vha, 0x005c,
+ "Read FCODE %d.%d.\n",
+ ha->fcode_revision[1], ha->fcode_revision[0]);
break;
case ROM_CODE_TYPE_EFI:
/* Extensible Firmware Interface (EFI). */
ha->efi_revision[0] = bcode[0x12];
ha->efi_revision[1] = bcode[0x13];
- DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n",
- ha->efi_revision[1], ha->efi_revision[0]));
+ ql_dbg(ql_dbg_init, vha, 0x005d,
+ "Read EFI %d.%d.\n",
+ ha->efi_revision[1], ha->efi_revision[0]);
break;
default:
- DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code "
- "type %x at pcids %x.\n", code_type, pcids));
+ ql_log(ql_log_warn, vha, 0x005e,
+ "Unrecognized code type %x at pcids %x.\n",
+ code_type, pcids);
break;
}
@@ -2753,13 +2787,18 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
- DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw "
- "revision at %x.\n", ha->flt_region_fw * 4));
+ ql_log(ql_log_warn, vha, 0x005f,
+ "Unrecognized fw revision at %x.\n",
+ ha->flt_region_fw * 4);
} else {
ha->fw_revision[0] = dcode[0];
ha->fw_revision[1] = dcode[1];
ha->fw_revision[2] = dcode[2];
ha->fw_revision[3] = dcode[3];
+ ql_dbg(ql_dbg_init, vha, 0x0060,
+ "Firmware revision %d.%d.%d.%d.\n",
+ ha->fw_revision[0], ha->fw_revision[1],
+ ha->fw_revision[2], ha->fw_revision[3]);
}
/* Check for golden firmware and get version if available */
@@ -2775,9 +2814,9 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
if (dcode[4] == 0xFFFFFFFF && dcode[5] == 0xFFFFFFFF &&
dcode[6] == 0xFFFFFFFF && dcode[7] == 0xFFFFFFFF) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "%s(%ld): Unrecognized golden fw at 0x%x.\n",
- __func__, vha->host_no, ha->flt_region_gold_fw * 4));
+ ql_log(ql_log_warn, vha, 0x0056,
+ "Unrecognized golden fw at 0x%x.\n",
+ ha->flt_region_gold_fw * 4);
return ret;
}
@@ -2843,9 +2882,9 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
if (!ha->fcp_prio_cfg) {
ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
if (!ha->fcp_prio_cfg) {
- qla_printk(KERN_WARNING, ha,
- "Unable to allocate memory for fcp priority data "
- "(%x).\n", FCP_PRIO_CFG_SIZE);
+ ql_log(ql_log_warn, vha, 0x00d5,
+ "Unable to allocate memory for fcp priorty data (%x).\n",
+ FCP_PRIO_CFG_SIZE);
return QLA_FUNCTION_FAILED;
}
}
@@ -2857,7 +2896,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg,
fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE);
- if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 0))
+ if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 0))
goto fail;
/* read remaining FCP CMD config data from flash */
@@ -2869,7 +2908,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
fcp_prio_addr << 2, (len < max_len ? len : max_len));
/* revalidate the entire FCP priority config data, including entries */
- if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 1))
+ if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1))
goto fail;
ha->flags.fcp_prio_enabled = 1;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 062c97b..13b6357 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.07.03-k"
+#define QLA2XXX_VERSION "8.03.07.07-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig
index 2c33ce6..0f5599e 100644
--- a/drivers/scsi/qla4xxx/Kconfig
+++ b/drivers/scsi/qla4xxx/Kconfig
@@ -1,6 +1,6 @@
config SCSI_QLA_ISCSI
tristate "QLogic ISP4XXX and ISP82XX host adapter family support"
- depends on PCI && SCSI
+ depends on PCI && SCSI && NET
select SCSI_ISCSI_ATTRS
---help---
This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 28d9c9d..fc3f168 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -137,6 +137,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
host->host_blocked = host->max_host_blocked;
break;
case SCSI_MLQUEUE_DEVICE_BUSY:
+ case SCSI_MLQUEUE_EH_RETRY:
device->device_blocked = device->max_device_blocked;
break;
case SCSI_MLQUEUE_TARGET_BUSY:
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 8a172d4..5fbeadd 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -597,6 +597,28 @@ static DEVICE_ATTR(signalling, S_IRUGO,
show_spi_host_signalling,
store_spi_host_signalling);
+static ssize_t show_spi_host_width(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(cdev);
+
+ return sprintf(buf, "%s\n", shost->max_id == 16 ? "wide" : "narrow");
+}
+static DEVICE_ATTR(host_width, S_IRUGO,
+ show_spi_host_width, NULL);
+
+static ssize_t show_spi_host_hba_id(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(cdev);
+
+ return sprintf(buf, "%d\n", shost->this_id);
+}
+static DEVICE_ATTR(hba_id, S_IRUGO,
+ show_spi_host_hba_id, NULL);
+
#define DV_SET(x, y) \
if(i->f->set_##x) \
i->f->set_##x(sdev->sdev_target, y)
@@ -1380,6 +1402,8 @@ static DECLARE_ANON_TRANSPORT_CLASS(spi_device_class,
static struct attribute *host_attributes[] = {
&dev_attr_signalling.attr,
+ &dev_attr_host_width.attr,
+ &dev_attr_hba_id.attr,
NULL
};
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index d6702e5..dc8d022 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -34,6 +34,9 @@ static LIST_HEAD(clock_list);
static DEFINE_SPINLOCK(clock_lock);
static DEFINE_MUTEX(clock_list_sem);
+/* clock disable operations are not passed on to hardware during boot */
+static int allow_disable;
+
void clk_rate_table_build(struct clk *clk,
struct cpufreq_frequency_table *freq_table,
int nr_freqs,
@@ -228,7 +231,7 @@ static void __clk_disable(struct clk *clk)
return;
if (!(--clk->usecount)) {
- if (likely(clk->ops && clk->ops->disable))
+ if (likely(allow_disable && clk->ops && clk->ops->disable))
clk->ops->disable(clk);
if (likely(clk->parent))
__clk_disable(clk->parent);
@@ -393,7 +396,7 @@ int clk_register(struct clk *clk)
{
int ret;
- if (clk == NULL || IS_ERR(clk))
+ if (IS_ERR_OR_NULL(clk))
return -EINVAL;
/*
@@ -744,3 +747,25 @@ err_out:
return err;
}
late_initcall(clk_debugfs_init);
+
+static int __init clk_late_init(void)
+{
+ unsigned long flags;
+ struct clk *clk;
+
+ /* disable all clocks with zero use count */
+ mutex_lock(&clock_list_sem);
+ spin_lock_irqsave(&clock_lock, flags);
+
+ list_for_each_entry(clk, &clock_list, node)
+ if (!clk->usecount && clk->ops && clk->ops->disable)
+ clk->ops->disable(clk);
+
+ /* from now on allow clock disable operations */
+ allow_disable = 1;
+
+ spin_unlock_irqrestore(&clock_lock, flags);
+ mutex_unlock(&clock_list_sem);
+ return 0;
+}
+late_initcall(clk_late_init);
diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c
index f33e2dd..33b2ed4 100644
--- a/drivers/sh/intc/chip.c
+++ b/drivers/sh/intc/chip.c
@@ -186,6 +186,9 @@ static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
!defined(CONFIG_CPU_SUBTYPE_SH7709)
[IRQ_TYPE_LEVEL_HIGH] = VALID(3),
#endif
+#if defined(CONFIG_ARCH_SH7372)
+ [IRQ_TYPE_EDGE_BOTH] = VALID(4),
+#endif
};
static int intc_set_type(struct irq_data *data, unsigned int type)
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index eba88c7..730b4a3 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2267,17 +2267,13 @@ static int __devexit
pl022_remove(struct amba_device *adev)
{
struct pl022 *pl022 = amba_get_drvdata(adev);
- int status = 0;
+
if (!pl022)
return 0;
/* Remove the queue */
- status = destroy_queue(pl022);
- if (status != 0) {
- dev_err(&adev->dev,
- "queue remove failed (%d)\n", status);
- return status;
- }
+ if (destroy_queue(pl022) != 0)
+ dev_err(&adev->dev, "queue remove failed\n");
load_ssp_default_config(pl022);
pl022_dma_remove(pl022);
free_irq(adev->irq[0], pl022);
@@ -2289,7 +2285,6 @@ pl022_remove(struct amba_device *adev)
spi_unregister_master(pl022->master);
spi_master_put(pl022->master);
amba_set_drvdata(adev, NULL);
- dev_dbg(&adev->dev, "remove succeeded\n");
return 0;
}
diff --git a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c
index c01c0cb..b99a11a 100644
--- a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c
+++ b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c
@@ -812,7 +812,7 @@ int AthCreateCommandList(struct ps_cmd_packet **HciPacketList, u32 *numPackets)
for(count = 0; count < Patch_Count; count++) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Freeing Patch Buffer %d \r\n",count));
- kfree(RamPatch[Patch_Count].Data);
+ kfree(RamPatch[count].Data);
}
for(count = 0; count < Tag_Count; count++) {
diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
index 499b7a9..32ee39a 100644
--- a/drivers/staging/ath6kl/os/linux/ar6000_drv.c
+++ b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
@@ -6205,6 +6205,7 @@ int ar6000_create_ap_interface(struct ar6_softc *ar, char *ap_ifname)
ether_setup(dev);
init_netdev(dev, ap_ifname);
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
if (register_netdev(dev)) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_create_ap_interface: register_netdev failed\n"));
diff --git a/drivers/staging/brcm80211/brcmsmac/mac80211_if.h b/drivers/staging/brcm80211/brcmsmac/mac80211_if.h
index 5711e7c..40e3d37 100644
--- a/drivers/staging/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/staging/brcm80211/brcmsmac/mac80211_if.h
@@ -24,8 +24,6 @@
#define BRCMS_SET_SHORTSLOT_OVERRIDE 146
-#include <linux/interrupt.h>
-
/* BMAC Note: High-only driver is no longer working in softirq context as it needs to block and
* sleep so perimeter lock has to be a semaphore instead of spinlock. This requires timers to be
* submitted to workqueue instead of being on kernel timer
diff --git a/drivers/staging/brcm80211/brcmsmac/otp.c b/drivers/staging/brcm80211/brcmsmac/otp.c
index 34253cf..4a70180 100644
--- a/drivers/staging/brcm80211/brcmsmac/otp.c
+++ b/drivers/staging/brcm80211/brcmsmac/otp.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/errno.h>
+#include <linux/string.h>
#include <brcm_hw_ids.h>
#include <chipcommon.h>
diff --git a/drivers/staging/brcm80211/brcmsmac/types.h b/drivers/staging/brcm80211/brcmsmac/types.h
index bbf2189..823b5e4 100644
--- a/drivers/staging/brcm80211/brcmsmac/types.h
+++ b/drivers/staging/brcm80211/brcmsmac/types.h
@@ -18,6 +18,7 @@
#define _BRCM_TYPES_H_
#include <linux/types.h>
+#include <linux/io.h>
/* Bus types */
#define SI_BUS 0 /* SOC Interconnect */
diff --git a/drivers/staging/cxd2099/Kconfig b/drivers/staging/cxd2099/Kconfig
index 9d638c3..b48aefd 100644
--- a/drivers/staging/cxd2099/Kconfig
+++ b/drivers/staging/cxd2099/Kconfig
@@ -1,9 +1,10 @@
config DVB_CXD2099
- tristate "CXD2099AR Common Interface driver"
- depends on DVB_CORE && PCI && I2C && DVB_NGENE
- ---help---
- Support for the CI module found on cineS2 DVB-S2, supported by
- the Micronas PCIe device driver (ngene).
+ tristate "CXD2099AR Common Interface driver"
+ depends on DVB_CORE && PCI && I2C
+ ---help---
+ Support for the CI module found on cards based on
+ - Micronas ngene PCIe bridge: cineS2 etc.
+ - Digital Devices PCIe bridge: Octopus series
For now, data is passed through '/dev/dvb/adapterX/sec0':
- Encrypted data must be written to 'sec0'.
diff --git a/drivers/staging/cxd2099/cxd2099.c b/drivers/staging/cxd2099/cxd2099.c
index 55b1c4a..1c04185 100644
--- a/drivers/staging/cxd2099/cxd2099.c
+++ b/drivers/staging/cxd2099/cxd2099.c
@@ -1,7 +1,7 @@
/*
* cxd2099.c: Driver for the CXD2099AR Common Interface Controller
*
- * Copyright (C) 2010 DigitalDevices UG
+ * Copyright (C) 2010-2011 Digital Devices GmbH
*
*
* This program is free software; you can redistribute it and/or
@@ -41,13 +41,13 @@ struct cxd {
struct dvb_ca_en50221 en;
struct i2c_adapter *i2c;
- u8 adr;
+ struct cxd2099_cfg cfg;
+
u8 regs[0x23];
u8 lastaddress;
u8 clk_reg_f;
u8 clk_reg_b;
int mode;
- u32 bitrate;
int ready;
int dr;
int slot_stat;
@@ -89,9 +89,9 @@ static int i2c_read_reg(struct i2c_adapter *adapter, u8 adr,
u8 reg, u8 *val)
{
struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
- .buf = &reg, .len = 1 },
+ .buf = &reg, .len = 1},
{.addr = adr, .flags = I2C_M_RD,
- .buf = val, .len = 1 } };
+ .buf = val, .len = 1} };
if (i2c_transfer(adapter, msgs, 2) != 2) {
printk(KERN_ERR "error in i2c_read_reg\n");
@@ -104,9 +104,9 @@ static int i2c_read(struct i2c_adapter *adapter, u8 adr,
u8 reg, u8 *data, u8 n)
{
struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
- .buf = &reg, .len = 1 },
- {.addr = adr, .flags = I2C_M_RD,
- .buf = data, .len = n } };
+ .buf = &reg, .len = 1},
+ {.addr = adr, .flags = I2C_M_RD,
+ .buf = data, .len = n} };
if (i2c_transfer(adapter, msgs, 2) != 2) {
printk(KERN_ERR "error in i2c_read\n");
@@ -119,10 +119,10 @@ static int read_block(struct cxd *ci, u8 adr, u8 *data, u8 n)
{
int status;
- status = i2c_write_reg(ci->i2c, ci->adr, 0, adr);
+ status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr);
if (!status) {
ci->lastaddress = adr;
- status = i2c_read(ci->i2c, ci->adr, 1, data, n);
+ status = i2c_read(ci->i2c, ci->cfg.adr, 1, data, n);
}
return status;
}
@@ -136,24 +136,24 @@ static int read_reg(struct cxd *ci, u8 reg, u8 *val)
static int read_pccard(struct cxd *ci, u16 address, u8 *data, u8 n)
{
int status;
- u8 addr[3] = { 2, address&0xff, address>>8 };
+ u8 addr[3] = {2, address & 0xff, address >> 8};
- status = i2c_write(ci->i2c, ci->adr, addr, 3);
+ status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
if (!status)
- status = i2c_read(ci->i2c, ci->adr, 3, data, n);
+ status = i2c_read(ci->i2c, ci->cfg.adr, 3, data, n);
return status;
}
static int write_pccard(struct cxd *ci, u16 address, u8 *data, u8 n)
{
int status;
- u8 addr[3] = { 2, address&0xff, address>>8 };
+ u8 addr[3] = {2, address & 0xff, address >> 8};
- status = i2c_write(ci->i2c, ci->adr, addr, 3);
+ status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
if (!status) {
u8 buf[256] = {3};
memcpy(buf+1, data, n);
- status = i2c_write(ci->i2c, ci->adr, buf, n+1);
+ status = i2c_write(ci->i2c, ci->cfg.adr, buf, n+1);
}
return status;
}
@@ -161,39 +161,64 @@ static int write_pccard(struct cxd *ci, u16 address, u8 *data, u8 n)
static int read_io(struct cxd *ci, u16 address, u8 *val)
{
int status;
- u8 addr[3] = { 2, address&0xff, address>>8 };
+ u8 addr[3] = {2, address & 0xff, address >> 8};
- status = i2c_write(ci->i2c, ci->adr, addr, 3);
+ status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
if (!status)
- status = i2c_read(ci->i2c, ci->adr, 3, val, 1);
+ status = i2c_read(ci->i2c, ci->cfg.adr, 3, val, 1);
return status;
}
static int write_io(struct cxd *ci, u16 address, u8 val)
{
int status;
- u8 addr[3] = { 2, address&0xff, address>>8 };
- u8 buf[2] = { 3, val };
+ u8 addr[3] = {2, address & 0xff, address >> 8};
+ u8 buf[2] = {3, val};
- status = i2c_write(ci->i2c, ci->adr, addr, 3);
+ status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
if (!status)
- status = i2c_write(ci->i2c, ci->adr, buf, 2);
-
+ status = i2c_write(ci->i2c, ci->cfg.adr, buf, 2);
return status;
}
+#if 0
+static int read_io_data(struct cxd *ci, u8 *data, u8 n)
+{
+ int status;
+ u8 addr[3] = { 2, 0, 0 };
+
+ status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
+ if (!status)
+ status = i2c_read(ci->i2c, ci->cfg.adr, 3, data, n);
+ return 0;
+}
+
+static int write_io_data(struct cxd *ci, u8 *data, u8 n)
+{
+ int status;
+ u8 addr[3] = {2, 0, 0};
+
+ status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
+ if (!status) {
+ u8 buf[256] = {3};
+ memcpy(buf+1, data, n);
+ status = i2c_write(ci->i2c, ci->cfg.adr, buf, n + 1);
+ }
+ return 0;
+}
+#endif
static int write_regm(struct cxd *ci, u8 reg, u8 val, u8 mask)
{
int status;
- status = i2c_write_reg(ci->i2c, ci->adr, 0, reg);
+ status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, reg);
if (!status && reg >= 6 && reg <= 8 && mask != 0xff)
- status = i2c_read_reg(ci->i2c, ci->adr, 1, &ci->regs[reg]);
- ci->regs[reg] = (ci->regs[reg]&(~mask))|val;
+ status = i2c_read_reg(ci->i2c, ci->cfg.adr, 1, &ci->regs[reg]);
+ ci->regs[reg] = (ci->regs[reg] & (~mask)) | val;
if (!status) {
ci->lastaddress = reg;
- status = i2c_write_reg(ci->i2c, ci->adr, 1, ci->regs[reg]);
+ status = i2c_write_reg(ci->i2c, ci->cfg.adr, 1, ci->regs[reg]);
}
if (reg == 0x20)
ci->regs[reg] &= 0x7f;
@@ -211,11 +236,11 @@ static int write_block(struct cxd *ci, u8 adr, u8 *data, int n)
int status;
u8 buf[256] = {1};
- status = i2c_write_reg(ci->i2c, ci->adr, 0, adr);
+ status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr);
if (!status) {
ci->lastaddress = adr;
- memcpy(buf+1, data, n);
- status = i2c_write(ci->i2c, ci->adr, buf, n+1);
+ memcpy(buf + 1, data, n);
+ status = i2c_write(ci->i2c, ci->cfg.adr, buf, n + 1);
}
return status;
}
@@ -249,12 +274,16 @@ static void cam_mode(struct cxd *ci, int mode)
write_regm(ci, 0x20, 0x80, 0x80);
break;
case 0x01:
+#ifdef BUFFER_MODE
+ if (!ci->en.read_data)
+ return;
printk(KERN_INFO "enable cam buffer mode\n");
/* write_reg(ci, 0x0d, 0x00); */
/* write_reg(ci, 0x0e, 0x01); */
write_regm(ci, 0x08, 0x40, 0x40);
/* read_reg(ci, 0x12, &dummy); */
write_regm(ci, 0x08, 0x80, 0x80);
+#endif
break;
default:
break;
@@ -264,8 +293,6 @@ static void cam_mode(struct cxd *ci, int mode)
-#define CHK_ERROR(s) if ((status = s)) break
-
static int init(struct cxd *ci)
{
int status;
@@ -273,63 +300,160 @@ static int init(struct cxd *ci)
mutex_lock(&ci->lock);
ci->mode = -1;
do {
- CHK_ERROR(write_reg(ci, 0x00, 0x00));
- CHK_ERROR(write_reg(ci, 0x01, 0x00));
- CHK_ERROR(write_reg(ci, 0x02, 0x10));
- CHK_ERROR(write_reg(ci, 0x03, 0x00));
- CHK_ERROR(write_reg(ci, 0x05, 0xFF));
- CHK_ERROR(write_reg(ci, 0x06, 0x1F));
- CHK_ERROR(write_reg(ci, 0x07, 0x1F));
- CHK_ERROR(write_reg(ci, 0x08, 0x28));
- CHK_ERROR(write_reg(ci, 0x14, 0x20));
-
- CHK_ERROR(write_reg(ci, 0x09, 0x4D)); /* Input Mode C, BYPass Serial, TIVAL = low, MSB */
- CHK_ERROR(write_reg(ci, 0x0A, 0xA7)); /* TOSTRT = 8, Mode B (gated clock), falling Edge, Serial, POL=HIGH, MSB */
-
- /* Sync detector */
- CHK_ERROR(write_reg(ci, 0x0B, 0x33));
- CHK_ERROR(write_reg(ci, 0x0C, 0x33));
-
- CHK_ERROR(write_regm(ci, 0x14, 0x00, 0x0F));
- CHK_ERROR(write_reg(ci, 0x15, ci->clk_reg_b));
- CHK_ERROR(write_regm(ci, 0x16, 0x00, 0x0F));
- CHK_ERROR(write_reg(ci, 0x17, ci->clk_reg_f));
-
- CHK_ERROR(write_reg(ci, 0x20, 0x28)); /* Integer Divider, Falling Edge, Internal Sync, */
- CHK_ERROR(write_reg(ci, 0x21, 0x00)); /* MCLKI = TICLK/8 */
- CHK_ERROR(write_reg(ci, 0x22, 0x07)); /* MCLKI = TICLK/8 */
-
-
- CHK_ERROR(write_regm(ci, 0x20, 0x80, 0x80)); /* Reset CAM state machine */
-
- CHK_ERROR(write_regm(ci, 0x03, 0x02, 02)); /* Enable IREQA Interrupt */
- CHK_ERROR(write_reg(ci, 0x01, 0x04)); /* Enable CD Interrupt */
- CHK_ERROR(write_reg(ci, 0x00, 0x31)); /* Enable TS1,Hot Swap,Slot A */
- CHK_ERROR(write_regm(ci, 0x09, 0x08, 0x08)); /* Put TS in bypass */
+ status = write_reg(ci, 0x00, 0x00);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x01, 0x00);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x02, 0x10);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x03, 0x00);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x05, 0xFF);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x06, 0x1F);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x07, 0x1F);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x08, 0x28);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x14, 0x20);
+ if (status < 0)
+ break;
+
+#if 0
+ status = write_reg(ci, 0x09, 0x4D); /* Input Mode C, BYPass Serial, TIVAL = low, MSB */
+ if (status < 0)
+ break;
+#endif
+ status = write_reg(ci, 0x0A, 0xA7); /* TOSTRT = 8, Mode B (gated clock), falling Edge, Serial, POL=HIGH, MSB */
+ if (status < 0)
+ break;
+
+ status = write_reg(ci, 0x0B, 0x33);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x0C, 0x33);
+ if (status < 0)
+ break;
+
+ status = write_regm(ci, 0x14, 0x00, 0x0F);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x15, ci->clk_reg_b);
+ if (status < 0)
+ break;
+ status = write_regm(ci, 0x16, 0x00, 0x0F);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x17, ci->clk_reg_f);
+ if (status < 0)
+ break;
+
+ if (ci->cfg.clock_mode) {
+ if (ci->cfg.polarity) {
+ status = write_reg(ci, 0x09, 0x6f);
+ if (status < 0)
+ break;
+ } else {
+ status = write_reg(ci, 0x09, 0x6d);
+ if (status < 0)
+ break;
+ }
+ status = write_reg(ci, 0x20, 0x68);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x21, 0x00);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x22, 0x02);
+ if (status < 0)
+ break;
+ } else {
+ if (ci->cfg.polarity) {
+ status = write_reg(ci, 0x09, 0x4f);
+ if (status < 0)
+ break;
+ } else {
+ status = write_reg(ci, 0x09, 0x4d);
+ if (status < 0)
+ break;
+ }
+
+ status = write_reg(ci, 0x20, 0x28);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x21, 0x00);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x22, 0x07);
+ if (status < 0)
+ break;
+ }
+
+ status = write_regm(ci, 0x20, 0x80, 0x80);
+ if (status < 0)
+ break;
+ status = write_regm(ci, 0x03, 0x02, 0x02);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x01, 0x04);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x00, 0x31);
+ if (status < 0)
+ break;
+
+ /* Put TS in bypass */
+ status = write_regm(ci, 0x09, 0x08, 0x08);
+ if (status < 0)
+ break;
ci->cammode = -1;
-#ifdef BUFFER_MODE
cam_mode(ci, 0);
-#endif
} while (0);
mutex_unlock(&ci->lock);
return 0;
}
-
static int read_attribute_mem(struct dvb_ca_en50221 *ca,
int slot, int address)
{
struct cxd *ci = ca->data;
+#if 0
+ if (ci->amem_read) {
+ if (address <= 0 || address > 1024)
+ return -EIO;
+ return ci->amem[address];
+ }
+
+ mutex_lock(&ci->lock);
+ write_regm(ci, 0x06, 0x00, 0x05);
+ read_pccard(ci, 0, &ci->amem[0], 128);
+ read_pccard(ci, 128, &ci->amem[0], 128);
+ read_pccard(ci, 256, &ci->amem[0], 128);
+ read_pccard(ci, 384, &ci->amem[0], 128);
+ write_regm(ci, 0x06, 0x05, 0x05);
+ mutex_unlock(&ci->lock);
+ return ci->amem[address];
+#else
u8 val;
mutex_lock(&ci->lock);
set_mode(ci, 1);
read_pccard(ci, address, &val, 1);
mutex_unlock(&ci->lock);
+ /* printk(KERN_INFO "%02x:%02x\n", address,val); */
return val;
+#endif
}
-
static int write_attribute_mem(struct dvb_ca_en50221 *ca, int slot,
int address, u8 value)
{
@@ -372,6 +496,15 @@ static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
struct cxd *ci = ca->data;
mutex_lock(&ci->lock);
+#if 0
+ write_reg(ci, 0x00, 0x21);
+ write_reg(ci, 0x06, 0x1F);
+ write_reg(ci, 0x00, 0x31);
+#else
+#if 0
+ write_reg(ci, 0x06, 0x1F);
+ write_reg(ci, 0x06, 0x2F);
+#else
cam_mode(ci, 0);
write_reg(ci, 0x00, 0x21);
write_reg(ci, 0x06, 0x1F);
@@ -379,13 +512,25 @@ static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
write_regm(ci, 0x20, 0x80, 0x80);
write_reg(ci, 0x03, 0x02);
ci->ready = 0;
+#endif
+#endif
ci->mode = -1;
{
int i;
+#if 0
+ u8 val;
+#endif
for (i = 0; i < 100; i++) {
msleep(10);
+#if 0
+ read_reg(ci, 0x06, &val);
+ printk(KERN_INFO "%d:%02x\n", i, val);
+ if (!(val&0x10))
+ break;
+#else
if (ci->ready)
break;
+#endif
}
}
mutex_unlock(&ci->lock);
@@ -399,12 +544,12 @@ static int slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
printk(KERN_INFO "slot_shutdown\n");
mutex_lock(&ci->lock);
- /* write_regm(ci, 0x09, 0x08, 0x08); */
- write_regm(ci, 0x20, 0x80, 0x80);
- write_regm(ci, 0x06, 0x07, 0x07);
+ write_regm(ci, 0x09, 0x08, 0x08);
+ write_regm(ci, 0x20, 0x80, 0x80); /* Reset CAM Mode */
+ write_regm(ci, 0x06, 0x07, 0x07); /* Clear IO Mode */
ci->mode = -1;
mutex_unlock(&ci->lock);
- return 0; /* shutdown(ci); */
+ return 0;
}
static int slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
@@ -459,7 +604,6 @@ static int campoll(struct cxd *ci)
if (istat&8 && ci->slot_stat == DVB_CA_EN50221_POLL_CAM_PRESENT) {
ci->ready = 1;
ci->slot_stat |= DVB_CA_EN50221_POLL_CAM_READY;
- printk(KERN_INFO "READY\n");
}
}
return 0;
@@ -510,7 +654,7 @@ static int write_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
struct cxd *ci = ca->data;
mutex_lock(&ci->lock);
- printk(KERN_INFO "write_data %d\n", ecount);
+ printk(kern_INFO "write_data %d\n", ecount);
write_reg(ci, 0x0d, ecount>>8);
write_reg(ci, 0x0e, ecount&0xff);
write_block(ci, 0x11, ebuf, ecount);
@@ -535,15 +679,15 @@ static struct dvb_ca_en50221 en_templ = {
};
-struct dvb_ca_en50221 *cxd2099_attach(u8 adr, void *priv,
+struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
+ void *priv,
struct i2c_adapter *i2c)
{
struct cxd *ci = 0;
- u32 bitrate = 62000000;
u8 val;
- if (i2c_read_reg(i2c, adr, 0, &val) < 0) {
- printk(KERN_ERR "No CXD2099 detected at %02x\n", adr);
+ if (i2c_read_reg(i2c, cfg->adr, 0, &val) < 0) {
+ printk(KERN_INFO "No CXD2099 detected at %02x\n", cfg->adr);
return 0;
}
@@ -553,21 +697,20 @@ struct dvb_ca_en50221 *cxd2099_attach(u8 adr, void *priv,
memset(ci, 0, sizeof(*ci));
mutex_init(&ci->lock);
+ memcpy(&ci->cfg, cfg, sizeof(struct cxd2099_cfg));
ci->i2c = i2c;
- ci->adr = adr;
ci->lastaddress = 0xff;
ci->clk_reg_b = 0x4a;
ci->clk_reg_f = 0x1b;
- ci->bitrate = bitrate;
memcpy(&ci->en, &en_templ, sizeof(en_templ));
ci->en.data = ci;
init(ci);
- printk(KERN_INFO "Attached CXD2099AR at %02x\n", ci->adr);
+ printk(KERN_INFO "Attached CXD2099AR at %02x\n", ci->cfg.adr);
return &ci->en;
}
EXPORT_SYMBOL(cxd2099_attach);
MODULE_DESCRIPTION("cxd2099");
-MODULE_AUTHOR("Ralph Metzler <rjkm@metzlerbros.de>");
+MODULE_AUTHOR("Ralph Metzler");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/cxd2099/cxd2099.h b/drivers/staging/cxd2099/cxd2099.h
index bed54ff..19c588a 100644
--- a/drivers/staging/cxd2099/cxd2099.h
+++ b/drivers/staging/cxd2099/cxd2099.h
@@ -1,7 +1,7 @@
/*
* cxd2099.h: Driver for the CXD2099AR Common Interface Controller
*
- * Copyright (C) 2010 DigitalDevices UG
+ * Copyright (C) 2010-2011 Digital Devices GmbH
*
*
* This program is free software; you can redistribute it and/or
@@ -27,11 +27,21 @@
#include <dvb_ca_en50221.h>
+struct cxd2099_cfg {
+ u32 bitrate;
+ u8 adr;
+ u8 polarity:1;
+ u8 clock_mode:1;
+};
+
#if defined(CONFIG_DVB_CXD2099) || \
- (defined(CONFIG_DVB_CXD2099_MODULE) && defined(MODULE))
-struct dvb_ca_en50221 *cxd2099_attach(u8 adr, void *priv, struct i2c_adapter *i2c);
+ (defined(CONFIG_DVB_CXD2099_MODULE) && defined(MODULE))
+struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
+ void *priv, struct i2c_adapter *i2c);
#else
-static inline struct dvb_ca_en50221 *cxd2099_attach(u8 adr, void *priv, struct i2c_adapter *i2c)
+
+static inline struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
+ void *priv, struct i2c_adapter *i2c)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return NULL;
diff --git a/drivers/staging/dt3155v4l/dt3155v4l.c b/drivers/staging/dt3155v4l/dt3155v4l.c
index fe02d22..05aa41c 100644
--- a/drivers/staging/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/dt3155v4l/dt3155v4l.c
@@ -22,6 +22,7 @@
#include <linux/stringify.h>
#include <linux/delay.h>
#include <linux/kthread.h>
+#include <linux/slab.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-dma-contig.h>
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
index 627a98b..9e728b3 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
@@ -22,6 +22,7 @@
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/netdevice.h>
+#include <asm/io.h>
#include <asm/uaccess.h>
#include "ft1000.h"
diff --git a/drivers/staging/gma500/gem_glue.c b/drivers/staging/gma500/gem_glue.c
index 779ac1a..daac121 100644
--- a/drivers/staging/gma500/gem_glue.c
+++ b/drivers/staging/gma500/gem_glue.c
@@ -20,26 +20,6 @@
#include <drm/drmP.h>
#include <drm/drm.h>
-/**
- * Initialize an already allocated GEM object of the specified size with
- * no GEM provided backing store. Instead the caller is responsible for
- * backing the object and handling it.
- */
-int drm_gem_private_object_init(struct drm_device *dev,
- struct drm_gem_object *obj, size_t size)
-{
- BUG_ON((size & (PAGE_SIZE - 1)) != 0);
-
- obj->dev = dev;
- obj->filp = NULL;
-
- kref_init(&obj->refcount);
- atomic_set(&obj->handle_count, 0);
- obj->size = size;
-
- return 0;
-}
-
void drm_gem_object_release_wrap(struct drm_gem_object *obj)
{
/* Remove the list map if one is present */
@@ -51,8 +31,7 @@ void drm_gem_object_release_wrap(struct drm_gem_object *obj)
kfree(list->map);
list->map = NULL;
}
- if (obj->filp)
- drm_gem_object_release(obj);
+ drm_gem_object_release(obj);
}
/**
diff --git a/drivers/staging/gma500/gem_glue.h b/drivers/staging/gma500/gem_glue.h
index a0f2bc4..ce5ce30 100644
--- a/drivers/staging/gma500/gem_glue.h
+++ b/drivers/staging/gma500/gem_glue.h
@@ -1,4 +1,2 @@
extern void drm_gem_object_release_wrap(struct drm_gem_object *obj);
-extern int drm_gem_private_object_init(struct drm_device *dev,
- struct drm_gem_object *obj, size_t size);
extern int gem_create_mmap_offset(struct drm_gem_object *obj);
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.c b/drivers/staging/gma500/mdfld_dsi_dbi.c
index 02e17c9..fd211f3 100644
--- a/drivers/staging/gma500/mdfld_dsi_dbi.c
+++ b/drivers/staging/gma500/mdfld_dsi_dbi.c
@@ -711,10 +711,11 @@ struct mdfld_dsi_encoder *mdfld_dsi_dbi_init(struct drm_device *dev,
/* Create drm encoder object */
connector = &dsi_connector->base.base;
encoder = &dbi_output->base.base;
+ /* Review this if we ever get MIPI-HDMI bridges or similar */
drm_encoder_init(dev,
encoder,
p_funcs->encoder_funcs,
- DRM_MODE_ENCODER_MIPI);
+ DRM_MODE_ENCODER_LVDS);
drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs);
/* Attach to given connector */
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.h b/drivers/staging/gma500/mdfld_dsi_dbi.h
index dc6242c..f0fa986 100644
--- a/drivers/staging/gma500/mdfld_dsi_dbi.h
+++ b/drivers/staging/gma500/mdfld_dsi_dbi.h
@@ -42,9 +42,6 @@
#include "mdfld_dsi_output.h"
#include "mdfld_output.h"
-#define DRM_MODE_ENCODER_MIPI 5
-
-
/*
* DBI encoder which inherits from mdfld_dsi_encoder
*/
diff --git a/drivers/staging/gma500/mdfld_dsi_dpi.c b/drivers/staging/gma500/mdfld_dsi_dpi.c
index 6e03a91..e685f12 100644
--- a/drivers/staging/gma500/mdfld_dsi_dpi.c
+++ b/drivers/staging/gma500/mdfld_dsi_dpi.c
@@ -777,10 +777,15 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
/* Create drm encoder object */
connector = &dsi_connector->base.base;
encoder = &dpi_output->base.base;
+ /*
+ * On existing hardware this will be a panel of some form,
+ * if future devices also have HDMI bridges this will need
+ * revisiting
+ */
drm_encoder_init(dev,
encoder,
p_funcs->encoder_funcs,
- DRM_MODE_ENCODER_MIPI);
+ DRM_MODE_ENCODER_LVDS);
drm_encoder_helper_add(encoder,
p_funcs->encoder_helper_funcs);
diff --git a/drivers/staging/gma500/mdfld_dsi_output.c b/drivers/staging/gma500/mdfld_dsi_output.c
index 7536095..9050c0f 100644
--- a/drivers/staging/gma500/mdfld_dsi_output.c
+++ b/drivers/staging/gma500/mdfld_dsi_output.c
@@ -955,7 +955,9 @@ void mdfld_dsi_output_init(struct drm_device *dev,
psb_output->type = (pipe == 0) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2;
connector = &psb_output->base;
- drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs, DRM_MODE_CONNECTOR_MIPI);
+ /* Revisit type if MIPI/HDMI bridges ever appear on Medfield */
+ drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/staging/gma500/medfield.h b/drivers/staging/gma500/medfield.h
index 38165e8..09e9687 100644
--- a/drivers/staging/gma500/medfield.h
+++ b/drivers/staging/gma500/medfield.h
@@ -21,8 +21,6 @@
* DEALINGS IN THE SOFTWARE.
*/
-#define DRM_MODE_ENCODER_MIPI 5
-
/* Medfield DSI controller registers */
#define MIPIA_DEVICE_READY_REG 0xb000
diff --git a/drivers/staging/gma500/mrst_hdmi.c b/drivers/staging/gma500/mrst_hdmi.c
index d6a5179..e66607e 100644
--- a/drivers/staging/gma500/mrst_hdmi.c
+++ b/drivers/staging/gma500/mrst_hdmi.c
@@ -129,7 +129,7 @@ static void wait_for_vblank(struct drm_device *dev)
{
/* FIXME: Can we do this as a sleep ? */
/* Wait for 20ms, i.e. one cycle at 50hz. */
- udelay(20000);
+ mdelay(20);
}
static void scu_busy_loop(void *scu_base)
diff --git a/drivers/staging/gma500/psb_drv.h b/drivers/staging/gma500/psb_drv.h
index 72f487a..fd4732d 100644
--- a/drivers/staging/gma500/psb_drv.h
+++ b/drivers/staging/gma500/psb_drv.h
@@ -35,7 +35,6 @@
/* Append new drm mode definition here, align with libdrm definition */
#define DRM_MODE_SCALE_NO_SCALE 2
-#define DRM_MODE_CONNECTOR_MIPI 15
enum {
CHIP_PSB_8108 = 0, /* Poulsbo */
diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
index 3612574..d286b22 100644
--- a/drivers/staging/hv/blkvsc_drv.c
+++ b/drivers/staging/hv/blkvsc_drv.c
@@ -325,7 +325,7 @@ static int blkvsc_do_operation(struct block_device_context *blkdev,
page_buf = alloc_page(GFP_KERNEL);
if (!page_buf) {
- kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
+ kmem_cache_free(blkdev->request_pool, blkvsc_req);
return -ENOMEM;
}
@@ -422,7 +422,7 @@ cleanup:
__free_page(page_buf);
- kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
+ kmem_cache_free(blkdev->request_pool, blkvsc_req);
return ret;
}
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
index bf19888..cf5d15d 100644
--- a/drivers/staging/iio/accel/adis16203_core.c
+++ b/drivers/staging/iio/accel/adis16203_core.c
@@ -311,13 +311,17 @@ static int adis16203_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
addr = adis16203_addresses[chan->address][0];
ret = adis16203_spi_read_reg_16(indio_dev, addr, &val16);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
if (val16 & ADIS16203_ERROR_ACTIVE) {
ret = adis16203_check_status(indio_dev);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
}
val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
if (chan->scan_type.sign == 's')
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c
index cfd09b3..3e2b626 100644
--- a/drivers/staging/iio/accel/adis16204_core.c
+++ b/drivers/staging/iio/accel/adis16204_core.c
@@ -341,13 +341,17 @@ static int adis16204_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
addr = adis16204_addresses[chan->address][0];
ret = adis16204_spi_read_reg_16(indio_dev, addr, &val16);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
if (val16 & ADIS16204_ERROR_ACTIVE) {
ret = adis16204_check_status(indio_dev);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
}
val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
if (chan->scan_type.sign == 's')
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
index 55f3a7b..bec1fa8 100644
--- a/drivers/staging/iio/accel/adis16209_core.c
+++ b/drivers/staging/iio/accel/adis16209_core.c
@@ -337,13 +337,17 @@ static int adis16209_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
addr = adis16209_addresses[chan->address][0];
ret = adis16209_spi_read_reg_16(indio_dev, addr, &val16);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
if (val16 & ADIS16209_ERROR_ACTIVE) {
ret = adis16209_check_status(indio_dev);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
}
val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
if (chan->scan_type.sign == 's')
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
index 4a4eafc..aee8b69 100644
--- a/drivers/staging/iio/accel/adis16240_core.c
+++ b/drivers/staging/iio/accel/adis16240_core.c
@@ -370,13 +370,17 @@ static int adis16240_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
addr = adis16240_addresses[chan->address][0];
ret = adis16240_spi_read_reg_16(indio_dev, addr, &val16);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
if (val16 & ADIS16240_ERROR_ACTIVE) {
ret = adis16240_check_status(indio_dev);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
}
val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
if (chan->scan_type.sign == 's')
diff --git a/drivers/staging/iio/gyro/adis16260_core.c b/drivers/staging/iio/gyro/adis16260_core.c
index 05797f4..f2d43cf 100644
--- a/drivers/staging/iio/gyro/adis16260_core.c
+++ b/drivers/staging/iio/gyro/adis16260_core.c
@@ -446,13 +446,17 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
addr = adis16260_addresses[chan->address][0];
ret = adis16260_spi_read_reg_16(indio_dev, addr, &val16);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
if (val16 & ADIS16260_ERROR_ACTIVE) {
ret = adis16260_check_status(indio_dev);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
}
val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
if (chan->scan_type.sign == 's')
diff --git a/drivers/staging/nvec/TODO b/drivers/staging/nvec/TODO
index 77b47f7..649d6b7 100644
--- a/drivers/staging/nvec/TODO
+++ b/drivers/staging/nvec/TODO
@@ -4,5 +4,7 @@ ToDo list (incomplete, unordered)
- add compile as module support
- move nvec devices to mfd cells?
- adjust to kernel style
-
-
+ - fix clk usage
+ should not be using clk_get_sys(), but clk_get(&pdev->dev, conn)
+ where conn is either NULL if the device only has one clock, or
+ the device specific name if it has multiple clocks.
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index 9c0d293..c3d73f8 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -26,6 +26,7 @@
**********************************************************************/
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/interrupt.h>
#include <linux/phy.h>
#include <linux/ratelimit.h>
#include <net/dst.h>
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 0f22f0f..1a7c19a 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -42,7 +42,7 @@
#include <net/xfrm.h>
#endif /* CONFIG_XFRM */
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/octeon/octeon.h>
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index 9708254..d0e2d51 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -26,6 +26,7 @@
**********************************************************************/
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/interrupt.h>
#include <net/dst.h>
#include <asm/octeon/octeon.h>
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 6227571..b445cd6 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -38,7 +38,7 @@
#include <net/xfrm.h>
#endif /* CONFIG_XFRM */
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/octeon/octeon.h>
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c
index 6766f46..4bb5fff 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware.c
@@ -399,10 +399,7 @@ download_firmware_fail:
}
-
-
-
-
-
-
+MODULE_FIRMWARE("RTL8192U/boot.img");
+MODULE_FIRMWARE("RTL8192U/main.img");
+MODULE_FIRMWARE("RTL8192U/data.img");
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
index 5ff59f2..16c73fb 100644
--- a/drivers/staging/rts_pstor/rtsx.c
+++ b/drivers/staging/rts_pstor/rtsx.c
@@ -66,12 +66,6 @@ static int msi_en;
module_param(msi_en, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(msi_en, "enable msi");
-/* These are used to make sure the module doesn't unload before all the
- * threads have exited.
- */
-static atomic_t total_threads = ATOMIC_INIT(0);
-static DECLARE_COMPLETION(threads_gone);
-
static irqreturn_t rtsx_interrupt(int irq, void *dev_id);
/***********************************************************************
@@ -192,7 +186,7 @@ static int queuecommand_lck(struct scsi_cmnd *srb,
/* enqueue the command and wake up the control thread */
srb->scsi_done = done;
chip->srb = srb;
- up(&(dev->sema));
+ complete(&dev->cmnd_ready);
return 0;
}
@@ -475,7 +469,7 @@ static int rtsx_control_thread(void *__dev)
current->flags |= PF_NOFREEZE;
for (;;) {
- if (down_interruptible(&dev->sema))
+ if (wait_for_completion_interruptible(&dev->cmnd_ready))
break;
/* lock the device pointers */
@@ -557,8 +551,6 @@ SkipForAbort:
mutex_unlock(&dev->dev_mutex);
} /* for (;;) */
- scsi_host_put(host);
-
/* notify the exit routine that we're actually exiting now
*
* complete()/wait_for_completion() is similar to up()/down(),
@@ -573,7 +565,7 @@ SkipForAbort:
* This is important in preemption kernels, which transfer the flow
* of execution immediately upon a complete().
*/
- complete_and_exit(&threads_gone, 0);
+ complete_and_exit(&dev->control_exit, 0);
}
@@ -581,7 +573,6 @@ static int rtsx_polling_thread(void *__dev)
{
struct rtsx_dev *dev = (struct rtsx_dev *)__dev;
struct rtsx_chip *chip = dev->chip;
- struct Scsi_Host *host = rtsx_to_host(dev);
struct sd_info *sd_card = &(chip->sd_card);
struct xd_info *xd_card = &(chip->xd_card);
struct ms_info *ms_card = &(chip->ms_card);
@@ -621,8 +612,7 @@ static int rtsx_polling_thread(void *__dev)
mutex_unlock(&dev->dev_mutex);
}
- scsi_host_put(host);
- complete_and_exit(&threads_gone, 0);
+ complete_and_exit(&dev->polling_exit, 0);
}
/*
@@ -699,29 +689,38 @@ static void rtsx_release_resources(struct rtsx_dev *dev)
{
printk(KERN_INFO "-- %s\n", __func__);
+ /* Tell the control thread to exit. The SCSI host must
+ * already have been removed so it won't try to queue
+ * any more commands.
+ */
+ printk(KERN_INFO "-- sending exit command to thread\n");
+ complete(&dev->cmnd_ready);
+ if (dev->ctl_thread)
+ wait_for_completion(&dev->control_exit);
+ if (dev->polling_thread)
+ wait_for_completion(&dev->polling_exit);
+
+ wait_timeout(200);
+
if (dev->rtsx_resv_buf) {
- dma_free_coherent(&(dev->pci->dev), HOST_CMDS_BUF_LEN,
+ dma_free_coherent(&(dev->pci->dev), RTSX_RESV_BUF_LEN,
dev->rtsx_resv_buf, dev->rtsx_resv_buf_addr);
dev->chip->host_cmds_ptr = NULL;
dev->chip->host_sg_tbl_ptr = NULL;
}
- pci_disable_device(dev->pci);
- pci_release_regions(dev->pci);
-
- if (dev->irq > 0) {
+ if (dev->irq > 0)
free_irq(dev->irq, (void *)dev);
- }
- if (dev->chip->msi_en) {
+ if (dev->chip->msi_en)
pci_disable_msi(dev->pci);
- }
+ if (dev->remap_addr)
+ iounmap(dev->remap_addr);
- /* Tell the control thread to exit. The SCSI host must
- * already have been removed so it won't try to queue
- * any more commands.
- */
- printk(KERN_INFO "-- sending exit command to thread\n");
- up(&dev->sema);
+ pci_disable_device(dev->pci);
+ pci_release_regions(dev->pci);
+
+ rtsx_release_chip(dev->chip);
+ kfree(dev->chip);
}
/* First stage of disconnect processing: stop all commands and remove
@@ -739,6 +738,7 @@ static void quiesce_and_remove_host(struct rtsx_dev *dev)
scsi_unlock(host);
mutex_unlock(&dev->dev_mutex);
wake_up(&dev->delay_wait);
+ wait_for_completion(&dev->scanning_done);
/* Wait some time to let other threads exist */
wait_timeout(100);
@@ -793,8 +793,7 @@ static int rtsx_scan_thread(void *__dev)
/* Should we unbind if no devices were detected? */
}
- scsi_host_put(rtsx_to_host(dev));
- complete_and_exit(&threads_gone, 0);
+ complete_and_exit(&dev->scanning_done, 0);
}
static void rtsx_init_options(struct rtsx_chip *chip)
@@ -941,8 +940,11 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id
spin_lock_init(&dev->reg_lock);
mutex_init(&(dev->dev_mutex));
- sema_init(&(dev->sema), 0);
+ init_completion(&dev->cmnd_ready);
+ init_completion(&dev->control_exit);
+ init_completion(&dev->polling_exit);
init_completion(&(dev->notify));
+ init_completion(&dev->scanning_done);
init_waitqueue_head(&dev->delay_wait);
dev->pci = pci;
@@ -992,28 +994,22 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id
pci_set_master(pci);
synchronize_irq(dev->irq);
- err = scsi_add_host(host, &pci->dev);
- if (err) {
- printk(KERN_ERR "Unable to add the scsi host\n");
- goto errout;
- }
-
rtsx_init_chip(dev->chip);
/* Start up our control thread */
- th = kthread_create(rtsx_control_thread, dev, CR_DRIVER_NAME);
+ th = kthread_run(rtsx_control_thread, dev, CR_DRIVER_NAME);
if (IS_ERR(th)) {
printk(KERN_ERR "Unable to start control thread\n");
err = PTR_ERR(th);
goto errout;
}
+ dev->ctl_thread = th;
- /* Take a reference to the host for the control thread and
- * count it among all the threads we have launched. Then
- * start it up. */
- scsi_host_get(rtsx_to_host(dev));
- atomic_inc(&total_threads);
- wake_up_process(th);
+ err = scsi_add_host(host, &pci->dev);
+ if (err) {
+ printk(KERN_ERR "Unable to add the scsi host\n");
+ goto errout;
+ }
/* Start up the thread for delayed SCSI-device scanning */
th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
@@ -1024,28 +1020,17 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id
goto errout;
}
- /* Take a reference to the host for the scanning thread and
- * count it among all the threads we have launched. Then
- * start it up. */
- scsi_host_get(rtsx_to_host(dev));
- atomic_inc(&total_threads);
wake_up_process(th);
/* Start up the thread for polling thread */
- th = kthread_create(rtsx_polling_thread, dev, "rtsx-polling");
+ th = kthread_run(rtsx_polling_thread, dev, "rtsx-polling");
if (IS_ERR(th)) {
printk(KERN_ERR "Unable to start the device-polling thread\n");
quiesce_and_remove_host(dev);
err = PTR_ERR(th);
goto errout;
}
-
- /* Take a reference to the host for the polling thread and
- * count it among all the threads we have launched. Then
- * start it up. */
- scsi_host_get(rtsx_to_host(dev));
- atomic_inc(&total_threads);
- wake_up_process(th);
+ dev->polling_thread = th;
pci_set_drvdata(pci, dev);
@@ -1108,16 +1093,6 @@ static void __exit rtsx_exit(void)
pci_unregister_driver(&driver);
- /* Don't return until all of our control and scanning threads
- * have exited. Since each thread signals threads_gone as its
- * last act, we have to call wait_for_completion the right number
- * of times.
- */
- while (atomic_read(&total_threads) > 0) {
- wait_for_completion(&threads_gone);
- atomic_dec(&total_threads);
- }
-
printk(KERN_INFO "%s module exit\n", CR_DRIVER_NAME);
}
diff --git a/drivers/staging/rts_pstor/rtsx.h b/drivers/staging/rts_pstor/rtsx.h
index 247615b..86e47c2 100644
--- a/drivers/staging/rts_pstor/rtsx.h
+++ b/drivers/staging/rts_pstor/rtsx.h
@@ -112,9 +112,16 @@ struct rtsx_dev {
/* locks */
spinlock_t reg_lock;
+ struct task_struct *ctl_thread; /* the control thread */
+ struct task_struct *polling_thread; /* the polling thread */
+
/* mutual exclusion and synchronization structures */
- struct semaphore sema; /* to sleep thread on */
+ struct completion cmnd_ready; /* to sleep thread on */
+ struct completion control_exit; /* control thread exit */
+ struct completion polling_exit; /* polling thread exit */
struct completion notify; /* thread begin/end */
+ struct completion scanning_done; /* wait for scan thread */
+
wait_queue_head_t delay_wait; /* wait during scan, reset */
struct mutex dev_mutex;
diff --git a/drivers/staging/solo6x10/core.c b/drivers/staging/solo6x10/core.c
index 7677994..f974f64 100644
--- a/drivers/staging/solo6x10/core.c
+++ b/drivers/staging/solo6x10/core.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <linux/slab.h>
#include <linux/videodev2.h>
#include "solo6x10.h"
#include "tw28.h"
diff --git a/drivers/staging/solo6x10/enc.c b/drivers/staging/solo6x10/enc.c
index 285f7f3..de50259 100644
--- a/drivers/staging/solo6x10/enc.c
+++ b/drivers/staging/solo6x10/enc.c
@@ -18,6 +18,7 @@
*/
#include <linux/kernel.h>
+#include <linux/slab.h>
#include "solo6x10.h"
#include "osd-font.h"
diff --git a/drivers/staging/solo6x10/g723.c b/drivers/staging/solo6x10/g723.c
index bd8eb92..59274bf 100644
--- a/drivers/staging/solo6x10/g723.c
+++ b/drivers/staging/solo6x10/g723.c
@@ -21,6 +21,7 @@
#include <linux/mempool.h>
#include <linux/poll.h>
#include <linux/kthread.h>
+#include <linux/slab.h>
#include <linux/freezer.h>
#include <sound/core.h>
#include <sound/initval.h>
diff --git a/drivers/staging/solo6x10/p2m.c b/drivers/staging/solo6x10/p2m.c
index 5717eab..56210f0 100644
--- a/drivers/staging/solo6x10/p2m.c
+++ b/drivers/staging/solo6x10/p2m.c
@@ -18,6 +18,7 @@
*/
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/scatterlist.h>
#include "solo6x10.h"
diff --git a/drivers/staging/solo6x10/solo6x10.h b/drivers/staging/solo6x10/solo6x10.h
index fd59b09..abee721 100644
--- a/drivers/staging/solo6x10/solo6x10.h
+++ b/drivers/staging/solo6x10/solo6x10.h
@@ -28,8 +28,9 @@
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/delay.h>
+#include <linux/slab.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/videodev2.h>
#include <media/v4l2-dev.h>
#include <media/videobuf-core.h>
diff --git a/drivers/staging/speakup/devsynth.c b/drivers/staging/speakup/devsynth.c
index 39dc586..940769e 100644
--- a/drivers/staging/speakup/devsynth.c
+++ b/drivers/staging/speakup/devsynth.c
@@ -18,13 +18,14 @@ static ssize_t speakup_file_write(struct file *fp, const char *buffer,
{
size_t count = nbytes;
const char *ptr = buffer;
- int bytes;
+ size_t bytes;
unsigned long flags;
u_char buf[256];
+
if (synth == NULL)
return -ENODEV;
while (count > 0) {
- bytes = min_t(size_t, count, sizeof(buf));
+ bytes = min(count, sizeof(buf));
if (copy_from_user(buf, ptr, bytes))
return -EFAULT;
count -= bytes;
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index 589a055..3d1279c 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -209,7 +209,6 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
break;
#ifdef CONFIG_OMAP_MCBSP
case MCBSP_CLK:
- omap_mcbsp_set_io_type(MCBSP_ID(clk_id), OMAP_MCBSP_POLL_IO);
omap_mcbsp_request(MCBSP_ID(clk_id));
omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC);
break;
diff --git a/drivers/staging/tidspbridge/include/dspbridge/host_os.h b/drivers/staging/tidspbridge/include/dspbridge/host_os.h
index 1a38896..a2f31c6 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/host_os.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/host_os.h
@@ -18,7 +18,7 @@
#define _HOST_OS_H_
#include <asm/system.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/semaphore.h>
#include <linux/uaccess.h>
#include <linux/irq.h>
diff --git a/drivers/staging/tm6000/tm6000-alsa.c b/drivers/staging/tm6000/tm6000-alsa.c
index ddfd7c3..bd5fa89 100644
--- a/drivers/staging/tm6000/tm6000-alsa.c
+++ b/drivers/staging/tm6000/tm6000-alsa.c
@@ -84,7 +84,6 @@ static int _tm6000_start_audio_dma(struct snd_tm6000_card *chip)
tm6000_set_audio_bitrate(core, 48000);
-
return 0;
}
@@ -123,6 +122,7 @@ static int dsp_buffer_alloc(struct snd_pcm_substream *substream, int size)
if (substream->runtime->dma_area) {
if (substream->runtime->dma_bytes > size)
return 0;
+
dsp_buffer_free(substream);
}
@@ -152,9 +152,9 @@ static struct snd_pcm_hardware snd_tm6000_digital_hw = {
SNDRV_PCM_INFO_MMAP_VALID,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
- .rates = SNDRV_PCM_RATE_CONTINUOUS,
- .rate_min = 48000,
- .rate_max = 48000,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 48000,
+ .rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.period_bytes_min = 64,
@@ -254,9 +254,7 @@ static int tm6000_fillbuf(struct tm6000_core *core, char *buf, int size)
memcpy(runtime->dma_area + buf_pos * stride, buf,
length * stride);
-#ifndef NO_PCM_LOCK
snd_pcm_stream_lock(substream);
-#endif
chip->buf_pos += length;
if (chip->buf_pos >= runtime->buffer_size)
@@ -268,9 +266,7 @@ static int tm6000_fillbuf(struct tm6000_core *core, char *buf, int size)
period_elapsed = 1;
}
-#ifndef NO_PCM_LOCK
snd_pcm_stream_unlock(substream);
-#endif
if (period_elapsed)
snd_pcm_period_elapsed(substream);
diff --git a/drivers/staging/winbond/mds_s.h b/drivers/staging/winbond/mds_s.h
index eeedf01..07d835b 100644
--- a/drivers/staging/winbond/mds_s.h
+++ b/drivers/staging/winbond/mds_s.h
@@ -3,7 +3,7 @@
#include <linux/timer.h>
#include <linux/types.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "localpara.h"
#include "mac_structures.h"
diff --git a/drivers/staging/winbond/wb35reg_s.h b/drivers/staging/winbond/wb35reg_s.h
index eb274ff..dc79faa 100644
--- a/drivers/staging/winbond/wb35reg_s.h
+++ b/drivers/staging/winbond/wb35reg_s.h
@@ -3,7 +3,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
struct hw_data;
diff --git a/drivers/staging/zcache/Makefile b/drivers/staging/zcache/Makefile
index f5ec64f..60daa27 100644
--- a/drivers/staging/zcache/Makefile
+++ b/drivers/staging/zcache/Makefile
@@ -1,3 +1,3 @@
-zcache-y := tmem.o
+zcache-y := zcache-main.o tmem.o
obj-$(CONFIG_ZCACHE) += zcache.o
diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
index 975e34b..1ca66ea 100644
--- a/drivers/staging/zcache/tmem.c
+++ b/drivers/staging/zcache/tmem.c
@@ -604,7 +604,7 @@ int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
struct tmem_obj *obj;
void *pampd;
bool ephemeral = is_ephemeral(pool);
- uint32_t ret = -1;
+ int ret = -1;
struct tmem_hashbucket *hb;
bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral);
bool lock_held = false;
diff --git a/drivers/staging/zcache/zcache.c b/drivers/staging/zcache/zcache-main.c
index 65a81a0..a3f5162 100644
--- a/drivers/staging/zcache/zcache.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -19,6 +19,7 @@
* http://marc.info/?l=linux-mm&m=127811271605009
*/
+#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/highmem.h>
#include <linux/list.h>
@@ -27,6 +28,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/atomic.h>
+#include <linux/math64.h>
#include "tmem.h"
#include "../zram/xvmalloc.h" /* if built in drivers/staging */
@@ -53,6 +55,9 @@
#define MAX_CLIENTS 16
#define LOCAL_CLIENT ((uint16_t)-1)
+
+MODULE_LICENSE("GPL");
+
struct zcache_client {
struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
struct xv_pool *xvpool;
@@ -1153,11 +1158,12 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
size_t clen;
int ret;
unsigned long count;
- struct page *page = virt_to_page(data);
+ struct page *page = (struct page *)(data);
struct zcache_client *cli = pool->client;
uint16_t client_id = get_client_id_from_client(cli);
unsigned long zv_mean_zsize;
unsigned long curr_pers_pampd_count;
+ u64 total_zsize;
if (eph) {
ret = zcache_compress(page, &cdata, &clen);
@@ -1190,8 +1196,9 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
}
/* reject if mean compression is too poor */
if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
- zv_mean_zsize = xv_get_total_size_bytes(cli->xvpool) /
- curr_pers_pampd_count;
+ total_zsize = xv_get_total_size_bytes(cli->xvpool);
+ zv_mean_zsize = div_u64(total_zsize,
+ curr_pers_pampd_count);
if (zv_mean_zsize > zv_max_mean_zsize) {
zcache_mean_compress_poor++;
goto out;
@@ -1220,7 +1227,7 @@ static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw,
int ret = 0;
BUG_ON(is_ephemeral(pool));
- zv_decompress(virt_to_page(data), pampd);
+ zv_decompress((struct page *)(data), pampd);
return ret;
}
@@ -1532,7 +1539,7 @@ static int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,
goto out;
if (!zcache_freeze && zcache_do_preload(pool) == 0) {
/* preload does preempt_disable on success */
- ret = tmem_put(pool, oidp, index, page_address(page),
+ ret = tmem_put(pool, oidp, index, (char *)(page),
PAGE_SIZE, 0, is_ephemeral(pool));
if (ret < 0) {
if (is_ephemeral(pool))
@@ -1565,7 +1572,7 @@ static int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp,
pool = zcache_get_pool_by_id(cli_id, pool_id);
if (likely(pool != NULL)) {
if (atomic_read(&pool->obj_count) > 0)
- ret = tmem_get(pool, oidp, index, page_address(page),
+ ret = tmem_get(pool, oidp, index, (char *)(page),
&size, 0, is_ephemeral(pool));
zcache_put_pool(pool);
}
@@ -1929,9 +1936,9 @@ __setup("nofrontswap", no_frontswap);
static int __init zcache_init(void)
{
-#ifdef CONFIG_SYSFS
int ret = 0;
+#ifdef CONFIG_SYSFS
ret = sysfs_create_group(mm_kobj, &zcache_attr_group);
if (ret) {
pr_err("zcache: can't create sysfs\n");
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 5cb0f0e..b28794b 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -31,5 +31,6 @@ config TCM_PSCSI
source "drivers/target/loopback/Kconfig"
source "drivers/target/tcm_fc/Kconfig"
+source "drivers/target/iscsi/Kconfig"
endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 21df808..1060c7b 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -24,5 +24,5 @@ obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
# Fabric modules
obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
-
obj-$(CONFIG_TCM_FC) += tcm_fc/
+obj-$(CONFIG_ISCSI_TARGET) += iscsi/
diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig
new file mode 100644
index 0000000..8345fb4
--- /dev/null
+++ b/drivers/target/iscsi/Kconfig
@@ -0,0 +1,9 @@
+config ISCSI_TARGET
+ tristate "Linux-iSCSI.org iSCSI Target Mode Stack"
+ depends on NET
+ select CRYPTO
+ select CRYPTO_CRC32C
+ select CRYPTO_CRC32C_INTEL if X86
+ help
+ Say M here to enable the ConfigFS enabled Linux-iSCSI.org iSCSI
+ Target Mode Stack.
diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile
new file mode 100644
index 0000000..5b9a2cf
--- /dev/null
+++ b/drivers/target/iscsi/Makefile
@@ -0,0 +1,20 @@
+iscsi_target_mod-y += iscsi_target_parameters.o \
+ iscsi_target_seq_pdu_list.o \
+ iscsi_target_tq.o \
+ iscsi_target_auth.o \
+ iscsi_target_datain_values.o \
+ iscsi_target_device.o \
+ iscsi_target_erl0.o \
+ iscsi_target_erl1.o \
+ iscsi_target_erl2.o \
+ iscsi_target_login.o \
+ iscsi_target_nego.o \
+ iscsi_target_nodeattrib.o \
+ iscsi_target_tmr.o \
+ iscsi_target_tpg.o \
+ iscsi_target_util.o \
+ iscsi_target.o \
+ iscsi_target_configfs.o \
+ iscsi_target_stat.o
+
+obj-$(CONFIG_ISCSI_TARGET) += iscsi_target_mod.o
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
new file mode 100644
index 0000000..6a4ea29
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -0,0 +1,4563 @@
+/*******************************************************************************
+ * This file contains main functions related to the iSCSI Target Core Driver.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/kthread.h>
+#include <linux/crypto.h>
+#include <linux/completion.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi_device.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_tq.h"
+#include "iscsi_target_configfs.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_login.h"
+#include "iscsi_target_tmr.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_stat.h"
+
+static LIST_HEAD(g_tiqn_list);
+static LIST_HEAD(g_np_list);
+static DEFINE_SPINLOCK(tiqn_lock);
+static DEFINE_SPINLOCK(np_lock);
+
+static struct idr tiqn_idr;
+struct idr sess_idr;
+struct mutex auth_id_lock;
+spinlock_t sess_idr_lock;
+
+struct iscsit_global *iscsit_global;
+
+struct kmem_cache *lio_cmd_cache;
+struct kmem_cache *lio_qr_cache;
+struct kmem_cache *lio_dr_cache;
+struct kmem_cache *lio_ooo_cache;
+struct kmem_cache *lio_r2t_cache;
+
+static int iscsit_handle_immediate_data(struct iscsi_cmd *,
+ unsigned char *buf, u32);
+static int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+
+struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
+{
+ struct iscsi_tiqn *tiqn = NULL;
+
+ spin_lock(&tiqn_lock);
+ list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
+ if (!strcmp(tiqn->tiqn, buf)) {
+
+ spin_lock(&tiqn->tiqn_state_lock);
+ if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
+ tiqn->tiqn_access_count++;
+ spin_unlock(&tiqn->tiqn_state_lock);
+ spin_unlock(&tiqn_lock);
+ return tiqn;
+ }
+ spin_unlock(&tiqn->tiqn_state_lock);
+ }
+ }
+ spin_unlock(&tiqn_lock);
+
+ return NULL;
+}
+
+static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn)
+{
+ spin_lock(&tiqn->tiqn_state_lock);
+ if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
+ tiqn->tiqn_state = TIQN_STATE_SHUTDOWN;
+ spin_unlock(&tiqn->tiqn_state_lock);
+ return 0;
+ }
+ spin_unlock(&tiqn->tiqn_state_lock);
+
+ return -1;
+}
+
+void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn)
+{
+ spin_lock(&tiqn->tiqn_state_lock);
+ tiqn->tiqn_access_count--;
+ spin_unlock(&tiqn->tiqn_state_lock);
+}
+
+/*
+ * Note that IQN formatting is expected to be done in userspace, and
+ * no explict IQN format checks are done here.
+ */
+struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
+{
+ struct iscsi_tiqn *tiqn = NULL;
+ int ret;
+
+ if (strlen(buf) >= ISCSI_IQN_LEN) {
+ pr_err("Target IQN exceeds %d bytes\n",
+ ISCSI_IQN_LEN);
+ return ERR_PTR(-EINVAL);
+ }
+
+ tiqn = kzalloc(sizeof(struct iscsi_tiqn), GFP_KERNEL);
+ if (!tiqn) {
+ pr_err("Unable to allocate struct iscsi_tiqn\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sprintf(tiqn->tiqn, "%s", buf);
+ INIT_LIST_HEAD(&tiqn->tiqn_list);
+ INIT_LIST_HEAD(&tiqn->tiqn_tpg_list);
+ spin_lock_init(&tiqn->tiqn_state_lock);
+ spin_lock_init(&tiqn->tiqn_tpg_lock);
+ spin_lock_init(&tiqn->sess_err_stats.lock);
+ spin_lock_init(&tiqn->login_stats.lock);
+ spin_lock_init(&tiqn->logout_stats.lock);
+
+ if (!idr_pre_get(&tiqn_idr, GFP_KERNEL)) {
+ pr_err("idr_pre_get() for tiqn_idr failed\n");
+ kfree(tiqn);
+ return ERR_PTR(-ENOMEM);
+ }
+ tiqn->tiqn_state = TIQN_STATE_ACTIVE;
+
+ spin_lock(&tiqn_lock);
+ ret = idr_get_new(&tiqn_idr, NULL, &tiqn->tiqn_index);
+ if (ret < 0) {
+ pr_err("idr_get_new() failed for tiqn->tiqn_index\n");
+ spin_unlock(&tiqn_lock);
+ kfree(tiqn);
+ return ERR_PTR(ret);
+ }
+ list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
+ spin_unlock(&tiqn_lock);
+
+ pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
+
+ return tiqn;
+
+}
+
+static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn)
+{
+ /*
+ * Wait for accesses to said struct iscsi_tiqn to end.
+ */
+ spin_lock(&tiqn->tiqn_state_lock);
+ while (tiqn->tiqn_access_count != 0) {
+ spin_unlock(&tiqn->tiqn_state_lock);
+ msleep(10);
+ spin_lock(&tiqn->tiqn_state_lock);
+ }
+ spin_unlock(&tiqn->tiqn_state_lock);
+}
+
+void iscsit_del_tiqn(struct iscsi_tiqn *tiqn)
+{
+ /*
+ * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN
+ * while holding tiqn->tiqn_state_lock. This means that all subsequent
+ * attempts to access this struct iscsi_tiqn will fail from both transport
+ * fabric and control code paths.
+ */
+ if (iscsit_set_tiqn_shutdown(tiqn) < 0) {
+ pr_err("iscsit_set_tiqn_shutdown() failed\n");
+ return;
+ }
+
+ iscsit_wait_for_tiqn(tiqn);
+
+ spin_lock(&tiqn_lock);
+ list_del(&tiqn->tiqn_list);
+ idr_remove(&tiqn_idr, tiqn->tiqn_index);
+ spin_unlock(&tiqn_lock);
+
+ pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n",
+ tiqn->tiqn);
+ kfree(tiqn);
+}
+
+int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
+{
+ int ret;
+ /*
+ * Determine if the network portal is accepting storage traffic.
+ */
+ spin_lock_bh(&np->np_thread_lock);
+ if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
+ spin_unlock_bh(&np->np_thread_lock);
+ return -1;
+ }
+ if (np->np_login_tpg) {
+ pr_err("np->np_login_tpg() is not NULL!\n");
+ spin_unlock_bh(&np->np_thread_lock);
+ return -1;
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+ /*
+ * Determine if the portal group is accepting storage traffic.
+ */
+ spin_lock_bh(&tpg->tpg_state_lock);
+ if (tpg->tpg_state != TPG_STATE_ACTIVE) {
+ spin_unlock_bh(&tpg->tpg_state_lock);
+ return -1;
+ }
+ spin_unlock_bh(&tpg->tpg_state_lock);
+
+ /*
+ * Here we serialize access across the TIQN+TPG Tuple.
+ */
+ ret = mutex_lock_interruptible(&tpg->np_login_lock);
+ if ((ret != 0) || signal_pending(current))
+ return -1;
+
+ spin_lock_bh(&np->np_thread_lock);
+ np->np_login_tpg = tpg;
+ spin_unlock_bh(&np->np_thread_lock);
+
+ return 0;
+}
+
+int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
+{
+ struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+ spin_lock_bh(&np->np_thread_lock);
+ np->np_login_tpg = NULL;
+ spin_unlock_bh(&np->np_thread_lock);
+
+ mutex_unlock(&tpg->np_login_lock);
+
+ if (tiqn)
+ iscsit_put_tiqn_for_login(tiqn);
+
+ return 0;
+}
+
+static struct iscsi_np *iscsit_get_np(
+ struct __kernel_sockaddr_storage *sockaddr,
+ int network_transport)
+{
+ struct sockaddr_in *sock_in, *sock_in_e;
+ struct sockaddr_in6 *sock_in6, *sock_in6_e;
+ struct iscsi_np *np;
+ int ip_match = 0;
+ u16 port;
+
+ spin_lock_bh(&np_lock);
+ list_for_each_entry(np, &g_np_list, np_list) {
+ spin_lock(&np->np_thread_lock);
+ if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
+ spin_unlock(&np->np_thread_lock);
+ continue;
+ }
+
+ if (sockaddr->ss_family == AF_INET6) {
+ sock_in6 = (struct sockaddr_in6 *)sockaddr;
+ sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
+
+ if (!memcmp((void *)&sock_in6->sin6_addr.in6_u,
+ (void *)&sock_in6_e->sin6_addr.in6_u,
+ sizeof(struct in6_addr)))
+ ip_match = 1;
+
+ port = ntohs(sock_in6->sin6_port);
+ } else {
+ sock_in = (struct sockaddr_in *)sockaddr;
+ sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
+
+ if (sock_in->sin_addr.s_addr ==
+ sock_in_e->sin_addr.s_addr)
+ ip_match = 1;
+
+ port = ntohs(sock_in->sin_port);
+ }
+
+ if ((ip_match == 1) && (np->np_port == port) &&
+ (np->np_network_transport == network_transport)) {
+ /*
+ * Increment the np_exports reference count now to
+ * prevent iscsit_del_np() below from being called
+ * while iscsi_tpg_add_network_portal() is called.
+ */
+ np->np_exports++;
+ spin_unlock(&np->np_thread_lock);
+ spin_unlock_bh(&np_lock);
+ return np;
+ }
+ spin_unlock(&np->np_thread_lock);
+ }
+ spin_unlock_bh(&np_lock);
+
+ return NULL;
+}
+
+struct iscsi_np *iscsit_add_np(
+ struct __kernel_sockaddr_storage *sockaddr,
+ char *ip_str,
+ int network_transport)
+{
+ struct sockaddr_in *sock_in;
+ struct sockaddr_in6 *sock_in6;
+ struct iscsi_np *np;
+ int ret;
+ /*
+ * Locate the existing struct iscsi_np if already active..
+ */
+ np = iscsit_get_np(sockaddr, network_transport);
+ if (np)
+ return np;
+
+ np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
+ if (!np) {
+ pr_err("Unable to allocate memory for struct iscsi_np\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ np->np_flags |= NPF_IP_NETWORK;
+ if (sockaddr->ss_family == AF_INET6) {
+ sock_in6 = (struct sockaddr_in6 *)sockaddr;
+ snprintf(np->np_ip, IPV6_ADDRESS_SPACE, "%s", ip_str);
+ np->np_port = ntohs(sock_in6->sin6_port);
+ } else {
+ sock_in = (struct sockaddr_in *)sockaddr;
+ sprintf(np->np_ip, "%s", ip_str);
+ np->np_port = ntohs(sock_in->sin_port);
+ }
+
+ np->np_network_transport = network_transport;
+ spin_lock_init(&np->np_thread_lock);
+ init_completion(&np->np_restart_comp);
+ INIT_LIST_HEAD(&np->np_list);
+
+ ret = iscsi_target_setup_login_socket(np, sockaddr);
+ if (ret != 0) {
+ kfree(np);
+ return ERR_PTR(ret);
+ }
+
+ np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np");
+ if (IS_ERR(np->np_thread)) {
+ pr_err("Unable to create kthread: iscsi_np\n");
+ ret = PTR_ERR(np->np_thread);
+ kfree(np);
+ return ERR_PTR(ret);
+ }
+ /*
+ * Increment the np_exports reference count now to prevent
+ * iscsit_del_np() below from being run while a new call to
+ * iscsi_tpg_add_network_portal() for a matching iscsi_np is
+ * active. We don't need to hold np->np_thread_lock at this
+ * point because iscsi_np has not been added to g_np_list yet.
+ */
+ np->np_exports = 1;
+
+ spin_lock_bh(&np_lock);
+ list_add_tail(&np->np_list, &g_np_list);
+ spin_unlock_bh(&np_lock);
+
+ pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
+ np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
+ "TCP" : "SCTP");
+
+ return np;
+}
+
+int iscsit_reset_np_thread(
+ struct iscsi_np *np,
+ struct iscsi_tpg_np *tpg_np,
+ struct iscsi_portal_group *tpg)
+{
+ spin_lock_bh(&np->np_thread_lock);
+ if (tpg && tpg_np) {
+ /*
+ * The reset operation need only be performed when the
+ * passed struct iscsi_portal_group has a login in progress
+ * to one of the network portals.
+ */
+ if (tpg_np->tpg_np->np_login_tpg != tpg) {
+ spin_unlock_bh(&np->np_thread_lock);
+ return 0;
+ }
+ }
+ if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
+ spin_unlock_bh(&np->np_thread_lock);
+ return 0;
+ }
+ np->np_thread_state = ISCSI_NP_THREAD_RESET;
+
+ if (np->np_thread) {
+ spin_unlock_bh(&np->np_thread_lock);
+ send_sig(SIGINT, np->np_thread, 1);
+ wait_for_completion(&np->np_restart_comp);
+ spin_lock_bh(&np->np_thread_lock);
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+
+ return 0;
+}
+
+int iscsit_del_np_comm(struct iscsi_np *np)
+{
+ if (!np->np_socket)
+ return 0;
+
+ /*
+ * Some network transports allocate their own struct sock->file,
+ * see if we need to free any additional allocated resources.
+ */
+ if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
+ kfree(np->np_socket->file);
+ np->np_socket->file = NULL;
+ }
+
+ sock_release(np->np_socket);
+ return 0;
+}
+
+int iscsit_del_np(struct iscsi_np *np)
+{
+ spin_lock_bh(&np->np_thread_lock);
+ np->np_exports--;
+ if (np->np_exports) {
+ spin_unlock_bh(&np->np_thread_lock);
+ return 0;
+ }
+ np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN;
+ spin_unlock_bh(&np->np_thread_lock);
+
+ if (np->np_thread) {
+ /*
+ * We need to send the signal to wakeup Linux/Net
+ * which may be sleeping in sock_accept()..
+ */
+ send_sig(SIGINT, np->np_thread, 1);
+ kthread_stop(np->np_thread);
+ }
+ iscsit_del_np_comm(np);
+
+ spin_lock_bh(&np_lock);
+ list_del(&np->np_list);
+ spin_unlock_bh(&np_lock);
+
+ pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
+ np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
+ "TCP" : "SCTP");
+
+ kfree(np);
+ return 0;
+}
+
+static int __init iscsi_target_init_module(void)
+{
+ int ret = 0;
+
+ pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
+
+ iscsit_global = kzalloc(sizeof(struct iscsit_global), GFP_KERNEL);
+ if (!iscsit_global) {
+ pr_err("Unable to allocate memory for iscsit_global\n");
+ return -1;
+ }
+ mutex_init(&auth_id_lock);
+ spin_lock_init(&sess_idr_lock);
+ idr_init(&tiqn_idr);
+ idr_init(&sess_idr);
+
+ ret = iscsi_target_register_configfs();
+ if (ret < 0)
+ goto out;
+
+ ret = iscsi_thread_set_init();
+ if (ret < 0)
+ goto configfs_out;
+
+ if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) !=
+ TARGET_THREAD_SET_COUNT) {
+ pr_err("iscsi_allocate_thread_sets() returned"
+ " unexpected value!\n");
+ goto ts_out1;
+ }
+
+ lio_cmd_cache = kmem_cache_create("lio_cmd_cache",
+ sizeof(struct iscsi_cmd), __alignof__(struct iscsi_cmd),
+ 0, NULL);
+ if (!lio_cmd_cache) {
+ pr_err("Unable to kmem_cache_create() for"
+ " lio_cmd_cache\n");
+ goto ts_out2;
+ }
+
+ lio_qr_cache = kmem_cache_create("lio_qr_cache",
+ sizeof(struct iscsi_queue_req),
+ __alignof__(struct iscsi_queue_req), 0, NULL);
+ if (!lio_qr_cache) {
+ pr_err("nable to kmem_cache_create() for"
+ " lio_qr_cache\n");
+ goto cmd_out;
+ }
+
+ lio_dr_cache = kmem_cache_create("lio_dr_cache",
+ sizeof(struct iscsi_datain_req),
+ __alignof__(struct iscsi_datain_req), 0, NULL);
+ if (!lio_dr_cache) {
+ pr_err("Unable to kmem_cache_create() for"
+ " lio_dr_cache\n");
+ goto qr_out;
+ }
+
+ lio_ooo_cache = kmem_cache_create("lio_ooo_cache",
+ sizeof(struct iscsi_ooo_cmdsn),
+ __alignof__(struct iscsi_ooo_cmdsn), 0, NULL);
+ if (!lio_ooo_cache) {
+ pr_err("Unable to kmem_cache_create() for"
+ " lio_ooo_cache\n");
+ goto dr_out;
+ }
+
+ lio_r2t_cache = kmem_cache_create("lio_r2t_cache",
+ sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t),
+ 0, NULL);
+ if (!lio_r2t_cache) {
+ pr_err("Unable to kmem_cache_create() for"
+ " lio_r2t_cache\n");
+ goto ooo_out;
+ }
+
+ if (iscsit_load_discovery_tpg() < 0)
+ goto r2t_out;
+
+ return ret;
+r2t_out:
+ kmem_cache_destroy(lio_r2t_cache);
+ooo_out:
+ kmem_cache_destroy(lio_ooo_cache);
+dr_out:
+ kmem_cache_destroy(lio_dr_cache);
+qr_out:
+ kmem_cache_destroy(lio_qr_cache);
+cmd_out:
+ kmem_cache_destroy(lio_cmd_cache);
+ts_out2:
+ iscsi_deallocate_thread_sets();
+ts_out1:
+ iscsi_thread_set_free();
+configfs_out:
+ iscsi_target_deregister_configfs();
+out:
+ kfree(iscsit_global);
+ return -ENOMEM;
+}
+
+static void __exit iscsi_target_cleanup_module(void)
+{
+ iscsi_deallocate_thread_sets();
+ iscsi_thread_set_free();
+ iscsit_release_discovery_tpg();
+ kmem_cache_destroy(lio_cmd_cache);
+ kmem_cache_destroy(lio_qr_cache);
+ kmem_cache_destroy(lio_dr_cache);
+ kmem_cache_destroy(lio_ooo_cache);
+ kmem_cache_destroy(lio_r2t_cache);
+
+ iscsi_target_deregister_configfs();
+
+ kfree(iscsit_global);
+}
+
+int iscsit_add_reject(
+ u8 reason,
+ int fail_conn,
+ unsigned char *buf,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_cmd *cmd;
+ struct iscsi_reject *hdr;
+ int ret;
+
+ cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ if (!cmd)
+ return -1;
+
+ cmd->iscsi_opcode = ISCSI_OP_REJECT;
+ if (fail_conn)
+ cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
+
+ hdr = (struct iscsi_reject *) cmd->pdu;
+ hdr->reason = reason;
+
+ cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ if (!cmd->buf_ptr) {
+ pr_err("Unable to allocate memory for cmd->buf_ptr\n");
+ iscsit_release_cmd(cmd);
+ return -1;
+ }
+ memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ cmd->i_state = ISTATE_SEND_REJECT;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+ ret = wait_for_completion_interruptible(&cmd->reject_comp);
+ if (ret != 0)
+ return -1;
+
+ return (!fail_conn) ? 0 : -1;
+}
+
+int iscsit_add_reject_from_cmd(
+ u8 reason,
+ int fail_conn,
+ int add_to_conn,
+ unsigned char *buf,
+ struct iscsi_cmd *cmd)
+{
+ struct iscsi_conn *conn;
+ struct iscsi_reject *hdr;
+ int ret;
+
+ if (!cmd->conn) {
+ pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
+ cmd->init_task_tag);
+ return -1;
+ }
+ conn = cmd->conn;
+
+ cmd->iscsi_opcode = ISCSI_OP_REJECT;
+ if (fail_conn)
+ cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
+
+ hdr = (struct iscsi_reject *) cmd->pdu;
+ hdr->reason = reason;
+
+ cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ if (!cmd->buf_ptr) {
+ pr_err("Unable to allocate memory for cmd->buf_ptr\n");
+ iscsit_release_cmd(cmd);
+ return -1;
+ }
+ memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
+
+ if (add_to_conn) {
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+ }
+
+ cmd->i_state = ISTATE_SEND_REJECT;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+ ret = wait_for_completion_interruptible(&cmd->reject_comp);
+ if (ret != 0)
+ return -1;
+
+ return (!fail_conn) ? 0 : -1;
+}
+
+/*
+ * Map some portion of the allocated scatterlist to an iovec, suitable for
+ * kernel sockets to copy data in/out. This handles both pages and slab-allocated
+ * buffers, since we have been tricky and mapped t_mem_sg to the buffer in
+ * either case (see iscsit_alloc_buffs)
+ */
+static int iscsit_map_iovec(
+ struct iscsi_cmd *cmd,
+ struct kvec *iov,
+ u32 data_offset,
+ u32 data_length)
+{
+ u32 i = 0;
+ struct scatterlist *sg;
+ unsigned int page_off;
+
+ /*
+ * We have a private mapping of the allocated pages in t_mem_sg.
+ * At this point, we also know each contains a page.
+ */
+ sg = &cmd->t_mem_sg[data_offset / PAGE_SIZE];
+ page_off = (data_offset % PAGE_SIZE);
+
+ cmd->first_data_sg = sg;
+ cmd->first_data_sg_off = page_off;
+
+ while (data_length) {
+ u32 cur_len = min_t(u32, data_length, sg->length - page_off);
+
+ iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
+ iov[i].iov_len = cur_len;
+
+ data_length -= cur_len;
+ page_off = 0;
+ sg = sg_next(sg);
+ i++;
+ }
+
+ cmd->kmapped_nents = i;
+
+ return i;
+}
+
+static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
+{
+ u32 i;
+ struct scatterlist *sg;
+
+ sg = cmd->first_data_sg;
+
+ for (i = 0; i < cmd->kmapped_nents; i++)
+ kunmap(sg_page(&sg[i]));
+}
+
+static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
+{
+ struct iscsi_cmd *cmd;
+
+ conn->exp_statsn = exp_statsn;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ spin_lock(&cmd->istate_lock);
+ if ((cmd->i_state == ISTATE_SENT_STATUS) &&
+ (cmd->stat_sn < exp_statsn)) {
+ cmd->i_state = ISTATE_REMOVE;
+ spin_unlock(&cmd->istate_lock);
+ iscsit_add_cmd_to_immediate_queue(cmd, conn,
+ cmd->i_state);
+ continue;
+ }
+ spin_unlock(&cmd->istate_lock);
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+}
+
+static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
+{
+ u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 :
+ cmd->se_cmd.t_data_nents;
+
+ iov_count += TRANSPORT_IOV_DATA_BUFFER;
+
+ cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL);
+ if (!cmd->iov_data) {
+ pr_err("Unable to allocate cmd->iov_data\n");
+ return -ENOMEM;
+ }
+
+ cmd->orig_iov_data_count = iov_count;
+ return 0;
+}
+
+static int iscsit_alloc_buffs(struct iscsi_cmd *cmd)
+{
+ struct scatterlist *sgl;
+ u32 length = cmd->se_cmd.data_length;
+ int nents = DIV_ROUND_UP(length, PAGE_SIZE);
+ int i = 0, ret;
+ /*
+ * If no SCSI payload is present, allocate the default iovecs used for
+ * iSCSI PDU Header
+ */
+ if (!length)
+ return iscsit_allocate_iovecs(cmd);
+
+ sgl = kzalloc(sizeof(*sgl) * nents, GFP_KERNEL);
+ if (!sgl)
+ return -ENOMEM;
+
+ sg_init_table(sgl, nents);
+
+ while (length) {
+ int buf_size = min_t(int, length, PAGE_SIZE);
+ struct page *page;
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ goto page_alloc_failed;
+
+ sg_set_page(&sgl[i], page, buf_size, 0);
+
+ length -= buf_size;
+ i++;
+ }
+
+ cmd->t_mem_sg = sgl;
+ cmd->t_mem_sg_nents = nents;
+
+ /* BIDI ops not supported */
+
+ /* Tell the core about our preallocated memory */
+ transport_generic_map_mem_to_cmd(&cmd->se_cmd, sgl, nents, NULL, 0);
+ /*
+ * Allocate iovecs for SCSI payload after transport_generic_map_mem_to_cmd
+ * so that cmd->se_cmd.t_tasks_se_num has been set.
+ */
+ ret = iscsit_allocate_iovecs(cmd);
+ if (ret < 0)
+ goto page_alloc_failed;
+
+ return 0;
+
+page_alloc_failed:
+ while (i >= 0) {
+ __free_page(sg_page(&sgl[i]));
+ i--;
+ }
+ kfree(cmd->t_mem_sg);
+ cmd->t_mem_sg = NULL;
+ return -ENOMEM;
+}
+
+static int iscsit_handle_scsi_cmd(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ int data_direction, cmdsn_ret = 0, immed_ret, ret, transport_ret;
+ int dump_immediate_data = 0, send_check_condition = 0, payload_length;
+ struct iscsi_cmd *cmd = NULL;
+ struct iscsi_scsi_req *hdr;
+
+ spin_lock_bh(&conn->sess->session_stats_lock);
+ conn->sess->cmd_pdus++;
+ if (conn->sess->se_sess->se_node_acl) {
+ spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
+ conn->sess->se_sess->se_node_acl->num_cmds++;
+ spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
+ }
+ spin_unlock_bh(&conn->sess->session_stats_lock);
+
+ hdr = (struct iscsi_scsi_req *) buf;
+ payload_length = ntoh24(hdr->dlength);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->data_length = be32_to_cpu(hdr->data_length);
+ hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+
+ /* FIXME; Add checks for AdditionalHeaderSegment */
+
+ if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) &&
+ !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
+ pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
+ " not set. Bad iSCSI Initiator.\n");
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+ buf, conn);
+ }
+
+ if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
+ (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
+ /*
+ * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
+ * that adds support for RESERVE/RELEASE. There is a bug
+ * add with this new functionality that sets R/W bits when
+ * neither CDB carries any READ or WRITE datapayloads.
+ */
+ if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
+ hdr->flags &= ~ISCSI_FLAG_CMD_READ;
+ hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
+ goto done;
+ }
+
+ pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
+ " set when Expected Data Transfer Length is 0 for"
+ " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+ buf, conn);
+ }
+done:
+
+ if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
+ !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
+ pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
+ " MUST be set if Expected Data Transfer Length is not 0."
+ " Bad iSCSI Initiator\n");
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+ buf, conn);
+ }
+
+ if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
+ (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
+ pr_err("Bidirectional operations not supported!\n");
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+ buf, conn);
+ }
+
+ if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
+ pr_err("Illegally set Immediate Bit in iSCSI Initiator"
+ " Scsi Command PDU.\n");
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+ buf, conn);
+ }
+
+ if (payload_length && !conn->sess->sess_ops->ImmediateData) {
+ pr_err("ImmediateData=No but DataSegmentLength=%u,"
+ " protocol error.\n", payload_length);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ if ((hdr->data_length == payload_length) &&
+ (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
+ pr_err("Expected Data Transfer Length and Length of"
+ " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
+ " bit is not set protocol error\n");
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ if (payload_length > hdr->data_length) {
+ pr_err("DataSegmentLength: %u is greater than"
+ " EDTL: %u, protocol error.\n", payload_length,
+ hdr->data_length);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
+ pr_err("DataSegmentLength: %u is greater than"
+ " MaxRecvDataSegmentLength: %u, protocol error.\n",
+ payload_length, conn->conn_ops->MaxRecvDataSegmentLength);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
+ pr_err("DataSegmentLength: %u is greater than"
+ " FirstBurstLength: %u, protocol error.\n",
+ payload_length, conn->sess->sess_ops->FirstBurstLength);
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+ buf, conn);
+ }
+
+ data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
+ (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
+ DMA_NONE;
+
+ cmd = iscsit_allocate_se_cmd(conn, hdr->data_length, data_direction,
+ (hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK));
+ if (!cmd)
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
+ buf, conn);
+
+ pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
+ " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
+ hdr->cmdsn, hdr->data_length, payload_length, conn->cid);
+
+ cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD;
+ cmd->i_state = ISTATE_NEW_CMD;
+ cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+ cmd->immediate_data = (payload_length) ? 1 : 0;
+ cmd->unsolicited_data = ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) &&
+ (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0);
+ if (cmd->unsolicited_data)
+ cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
+
+ conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+ spin_lock_bh(&conn->sess->ttt_lock);
+ cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
+ if (cmd->targ_xfer_tag == 0xFFFFFFFF)
+ cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
+ spin_unlock_bh(&conn->sess->ttt_lock);
+ } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
+ cmd->targ_xfer_tag = 0xFFFFFFFF;
+ cmd->cmd_sn = hdr->cmdsn;
+ cmd->exp_stat_sn = hdr->exp_statsn;
+ cmd->first_burst_len = payload_length;
+
+ if (cmd->data_direction == DMA_FROM_DEVICE) {
+ struct iscsi_datain_req *dr;
+
+ dr = iscsit_allocate_datain_req();
+ if (!dr)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 1, buf, cmd);
+
+ iscsit_attach_datain_req(cmd, dr);
+ }
+
+ /*
+ * The CDB is going to an se_device_t.
+ */
+ ret = iscsit_get_lun_for_cmd(cmd, hdr->cdb,
+ get_unaligned_le64(&hdr->lun));
+ if (ret < 0) {
+ if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) {
+ pr_debug("Responding to non-acl'ed,"
+ " non-existent or non-exported iSCSI LUN:"
+ " 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
+ }
+ if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 1, buf, cmd);
+
+ send_check_condition = 1;
+ goto attach_cmd;
+ }
+ /*
+ * The Initiator Node has access to the LUN (the addressing method
+ * is handled inside of iscsit_get_lun_for_cmd()). Now it's time to
+ * allocate 1->N transport tasks (depending on sector count and
+ * maximum request size the physical HBA(s) can handle.
+ */
+ transport_ret = transport_generic_allocate_tasks(&cmd->se_cmd, hdr->cdb);
+ if (transport_ret == -ENOMEM) {
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 1, buf, cmd);
+ } else if (transport_ret == -EINVAL) {
+ /*
+ * Unsupported SAM Opcode. CHECK_CONDITION will be sent
+ * in iscsit_execute_cmd() during the CmdSN OOO Execution
+ * Mechinism.
+ */
+ send_check_condition = 1;
+ } else {
+ if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 1, buf, cmd);
+ }
+
+attach_cmd:
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+ /*
+ * Check if we need to delay processing because of ALUA
+ * Active/NonOptimized primary access state..
+ */
+ core_alua_check_nonop_delay(&cmd->se_cmd);
+ /*
+ * Allocate and setup SGL used with transport_generic_map_mem_to_cmd().
+ * also call iscsit_allocate_iovecs()
+ */
+ ret = iscsit_alloc_buffs(cmd);
+ if (ret < 0)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 1, buf, cmd);
+ /*
+ * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
+ * the Immediate Bit is not set, and no Immediate
+ * Data is attached.
+ *
+ * A PDU/CmdSN carrying Immediate Data can only
+ * be processed after the DataCRC has passed.
+ * If the DataCRC fails, the CmdSN MUST NOT
+ * be acknowledged. (See below)
+ */
+ if (!cmd->immediate_data) {
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+ }
+
+ iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+
+ /*
+ * If no Immediate Data is attached, it's OK to return now.
+ */
+ if (!cmd->immediate_data) {
+ if (send_check_condition)
+ return 0;
+
+ if (cmd->unsolicited_data) {
+ iscsit_set_dataout_sequence_values(cmd);
+
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ iscsit_start_dataout_timer(cmd, cmd->conn);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ }
+
+ return 0;
+ }
+
+ /*
+ * Early CHECK_CONDITIONs never make it to the transport processing
+ * thread. They are processed in CmdSN order by
+ * iscsit_check_received_cmdsn() below.
+ */
+ if (send_check_condition) {
+ immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
+ dump_immediate_data = 1;
+ goto after_immediate_data;
+ }
+ /*
+ * Call directly into transport_generic_new_cmd() to perform
+ * the backend memory allocation.
+ */
+ ret = transport_generic_new_cmd(&cmd->se_cmd);
+ if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) {
+ immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
+ dump_immediate_data = 1;
+ goto after_immediate_data;
+ }
+
+ immed_ret = iscsit_handle_immediate_data(cmd, buf, payload_length);
+after_immediate_data:
+ if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
+ /*
+ * A PDU/CmdSN carrying Immediate Data passed
+ * DataCRC, check against ExpCmdSN/MaxCmdSN if
+ * Immediate Bit is not set.
+ */
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ /*
+ * Special case for Unsupported SAM WRITE Opcodes
+ * and ImmediateData=Yes.
+ */
+ if (dump_immediate_data) {
+ if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+ return -1;
+ } else if (cmd->unsolicited_data) {
+ iscsit_set_dataout_sequence_values(cmd);
+
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ iscsit_start_dataout_timer(cmd, cmd->conn);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ }
+
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+
+ } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
+ /*
+ * Immediate Data failed DataCRC and ERL>=1,
+ * silently drop this PDU and let the initiator
+ * plug the CmdSN gap.
+ *
+ * FIXME: Send Unsolicited NOPIN with reserved
+ * TTT here to help the initiator figure out
+ * the missing CmdSN, although they should be
+ * intelligent enough to determine the missing
+ * CmdSN and issue a retry to plug the sequence.
+ */
+ cmd->i_state = ISTATE_REMOVE;
+ iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
+ } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
+ return -1;
+
+ return 0;
+}
+
+static u32 iscsit_do_crypto_hash_sg(
+ struct hash_desc *hash,
+ struct iscsi_cmd *cmd,
+ u32 data_offset,
+ u32 data_length,
+ u32 padding,
+ u8 *pad_bytes)
+{
+ u32 data_crc;
+ u32 i;
+ struct scatterlist *sg;
+ unsigned int page_off;
+
+ crypto_hash_init(hash);
+
+ sg = cmd->first_data_sg;
+ page_off = cmd->first_data_sg_off;
+
+ i = 0;
+ while (data_length) {
+ u32 cur_len = min_t(u32, data_length, (sg[i].length - page_off));
+
+ crypto_hash_update(hash, &sg[i], cur_len);
+
+ data_length -= cur_len;
+ page_off = 0;
+ i++;
+ }
+
+ if (padding) {
+ struct scatterlist pad_sg;
+
+ sg_init_one(&pad_sg, pad_bytes, padding);
+ crypto_hash_update(hash, &pad_sg, padding);
+ }
+ crypto_hash_final(hash, (u8 *) &data_crc);
+
+ return data_crc;
+}
+
+static void iscsit_do_crypto_hash_buf(
+ struct hash_desc *hash,
+ unsigned char *buf,
+ u32 payload_length,
+ u32 padding,
+ u8 *pad_bytes,
+ u8 *data_crc)
+{
+ struct scatterlist sg;
+
+ crypto_hash_init(hash);
+
+ sg_init_one(&sg, (u8 *)buf, payload_length);
+ crypto_hash_update(hash, &sg, payload_length);
+
+ if (padding) {
+ sg_init_one(&sg, pad_bytes, padding);
+ crypto_hash_update(hash, &sg, padding);
+ }
+ crypto_hash_final(hash, data_crc);
+}
+
+static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
+{
+ int iov_ret, ooo_cmdsn = 0, ret;
+ u8 data_crc_failed = 0;
+ u32 checksum, iov_count = 0, padding = 0, rx_got = 0;
+ u32 rx_size = 0, payload_length;
+ struct iscsi_cmd *cmd = NULL;
+ struct se_cmd *se_cmd;
+ struct iscsi_data *hdr;
+ struct kvec *iov;
+ unsigned long flags;
+
+ hdr = (struct iscsi_data *) buf;
+ payload_length = ntoh24(hdr->dlength);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->ttt = be32_to_cpu(hdr->ttt);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+ hdr->datasn = be32_to_cpu(hdr->datasn);
+ hdr->offset = be32_to_cpu(hdr->offset);
+
+ if (!payload_length) {
+ pr_err("DataOUT payload is ZERO, protocol error.\n");
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ /* iSCSI write */
+ spin_lock_bh(&conn->sess->session_stats_lock);
+ conn->sess->rx_data_octets += payload_length;
+ if (conn->sess->se_sess->se_node_acl) {
+ spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
+ conn->sess->se_sess->se_node_acl->write_bytes += payload_length;
+ spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
+ }
+ spin_unlock_bh(&conn->sess->session_stats_lock);
+
+ if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
+ pr_err("DataSegmentLength: %u is greater than"
+ " MaxRecvDataSegmentLength: %u\n", payload_length,
+ conn->conn_ops->MaxRecvDataSegmentLength);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt,
+ payload_length);
+ if (!cmd)
+ return 0;
+
+ pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
+ " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
+ hdr->itt, hdr->ttt, hdr->datasn, hdr->offset,
+ payload_length, conn->cid);
+
+ if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
+ pr_err("Command ITT: 0x%08x received DataOUT after"
+ " last DataOUT received, dumping payload\n",
+ cmd->init_task_tag);
+ return iscsit_dump_data_payload(conn, payload_length, 1);
+ }
+
+ if (cmd->data_direction != DMA_TO_DEVICE) {
+ pr_err("Command ITT: 0x%08x received DataOUT for a"
+ " NON-WRITE command.\n", cmd->init_task_tag);
+ return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+ }
+ se_cmd = &cmd->se_cmd;
+ iscsit_mod_dataout_timer(cmd);
+
+ if ((hdr->offset + payload_length) > cmd->data_length) {
+ pr_err("DataOut Offset: %u, Length %u greater than"
+ " iSCSI Command EDTL %u, protocol error.\n",
+ hdr->offset, payload_length, cmd->data_length);
+ return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
+ 1, 0, buf, cmd);
+ }
+
+ if (cmd->unsolicited_data) {
+ int dump_unsolicited_data = 0;
+
+ if (conn->sess->sess_ops->InitialR2T) {
+ pr_err("Received unexpected unsolicited data"
+ " while InitialR2T=Yes, protocol error.\n");
+ transport_send_check_condition_and_sense(&cmd->se_cmd,
+ TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
+ return -1;
+ }
+ /*
+ * Special case for dealing with Unsolicited DataOUT
+ * and Unsupported SAM WRITE Opcodes and SE resource allocation
+ * failures;
+ */
+
+ /* Something's amiss if we're not in WRITE_PENDING state... */
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+ WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING);
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+ if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
+ (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED))
+ dump_unsolicited_data = 1;
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+ if (dump_unsolicited_data) {
+ /*
+ * Check if a delayed TASK_ABORTED status needs to
+ * be sent now if the ISCSI_FLAG_CMD_FINAL has been
+ * received with the unsolicitied data out.
+ */
+ if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
+ iscsit_stop_dataout_timer(cmd);
+
+ transport_check_aborted_status(se_cmd,
+ (hdr->flags & ISCSI_FLAG_CMD_FINAL));
+ return iscsit_dump_data_payload(conn, payload_length, 1);
+ }
+ } else {
+ /*
+ * For the normal solicited data path:
+ *
+ * Check for a delayed TASK_ABORTED status and dump any
+ * incoming data out payload if one exists. Also, when the
+ * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current
+ * data out sequence, we decrement outstanding_r2ts. Once
+ * outstanding_r2ts reaches zero, go ahead and send the delayed
+ * TASK_ABORTED status.
+ */
+ if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
+ if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
+ if (--cmd->outstanding_r2ts < 1) {
+ iscsit_stop_dataout_timer(cmd);
+ transport_check_aborted_status(
+ se_cmd, 1);
+ }
+
+ return iscsit_dump_data_payload(conn, payload_length, 1);
+ }
+ }
+ /*
+ * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and
+ * within-command recovery checks before receiving the payload.
+ */
+ ret = iscsit_check_pre_dataout(cmd, buf);
+ if (ret == DATAOUT_WITHIN_COMMAND_RECOVERY)
+ return 0;
+ else if (ret == DATAOUT_CANNOT_RECOVER)
+ return -1;
+
+ rx_size += payload_length;
+ iov = &cmd->iov_data[0];
+
+ iov_ret = iscsit_map_iovec(cmd, iov, hdr->offset, payload_length);
+ if (iov_ret < 0)
+ return -1;
+
+ iov_count += iov_ret;
+
+ padding = ((-payload_length) & 3);
+ if (padding != 0) {
+ iov[iov_count].iov_base = cmd->pad_bytes;
+ iov[iov_count++].iov_len = padding;
+ rx_size += padding;
+ pr_debug("Receiving %u padding bytes.\n", padding);
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iov[iov_count].iov_base = &checksum;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ rx_size += ISCSI_CRC_LEN;
+ }
+
+ rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
+
+ iscsit_unmap_iovec(cmd);
+
+ if (rx_got != rx_size)
+ return -1;
+
+ if (conn->conn_ops->DataDigest) {
+ u32 data_crc;
+
+ data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
+ hdr->offset, payload_length, padding,
+ cmd->pad_bytes);
+
+ if (checksum != data_crc) {
+ pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
+ " DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
+ " does not match computed 0x%08x\n",
+ hdr->itt, hdr->offset, payload_length,
+ hdr->datasn, checksum, data_crc);
+ data_crc_failed = 1;
+ } else {
+ pr_debug("Got CRC32C DataDigest 0x%08x for"
+ " %u bytes of Data Out\n", checksum,
+ payload_length);
+ }
+ }
+ /*
+ * Increment post receive data and CRC values or perform
+ * within-command recovery.
+ */
+ ret = iscsit_check_post_dataout(cmd, buf, data_crc_failed);
+ if ((ret == DATAOUT_NORMAL) || (ret == DATAOUT_WITHIN_COMMAND_RECOVERY))
+ return 0;
+ else if (ret == DATAOUT_SEND_R2T) {
+ iscsit_set_dataout_sequence_values(cmd);
+ iscsit_build_r2ts_for_cmd(cmd, conn, 0);
+ } else if (ret == DATAOUT_SEND_TO_TRANSPORT) {
+ /*
+ * Handle extra special case for out of order
+ * Unsolicited Data Out.
+ */
+ spin_lock_bh(&cmd->istate_lock);
+ ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN);
+ cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
+ cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+ spin_unlock_bh(&cmd->istate_lock);
+
+ iscsit_stop_dataout_timer(cmd);
+ return (!ooo_cmdsn) ? transport_generic_handle_data(
+ &cmd->se_cmd) : 0;
+ } else /* DATAOUT_CANNOT_RECOVER */
+ return -1;
+
+ return 0;
+}
+
+static int iscsit_handle_nop_out(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ unsigned char *ping_data = NULL;
+ int cmdsn_ret, niov = 0, ret = 0, rx_got, rx_size;
+ u32 checksum, data_crc, padding = 0, payload_length;
+ u64 lun;
+ struct iscsi_cmd *cmd = NULL;
+ struct kvec *iov = NULL;
+ struct iscsi_nopout *hdr;
+
+ hdr = (struct iscsi_nopout *) buf;
+ payload_length = ntoh24(hdr->dlength);
+ lun = get_unaligned_le64(&hdr->lun);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->ttt = be32_to_cpu(hdr->ttt);
+ hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+
+ if ((hdr->itt == 0xFFFFFFFF) && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+ pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
+ " not set, protocol error.\n");
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
+ pr_err("NOPOUT Ping Data DataSegmentLength: %u is"
+ " greater than MaxRecvDataSegmentLength: %u, protocol"
+ " error.\n", payload_length,
+ conn->conn_ops->MaxRecvDataSegmentLength);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%09x,"
+ " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
+ (hdr->itt == 0xFFFFFFFF) ? "Response" : "Request",
+ hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
+ payload_length);
+ /*
+ * This is not a response to a Unsolicited NopIN, which means
+ * it can either be a NOPOUT ping request (with a valid ITT),
+ * or a NOPOUT not requesting a NOPIN (with a reserved ITT).
+ * Either way, make sure we allocate an struct iscsi_cmd, as both
+ * can contain ping data.
+ */
+ if (hdr->ttt == 0xFFFFFFFF) {
+ cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ if (!cmd)
+ return iscsit_add_reject(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, buf, conn);
+
+ cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT;
+ cmd->i_state = ISTATE_SEND_NOPIN;
+ cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ?
+ 1 : 0);
+ conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
+ cmd->targ_xfer_tag = 0xFFFFFFFF;
+ cmd->cmd_sn = hdr->cmdsn;
+ cmd->exp_stat_sn = hdr->exp_statsn;
+ cmd->data_direction = DMA_NONE;
+ }
+
+ if (payload_length && (hdr->ttt == 0xFFFFFFFF)) {
+ rx_size = payload_length;
+ ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
+ if (!ping_data) {
+ pr_err("Unable to allocate memory for"
+ " NOPOUT ping data.\n");
+ ret = -1;
+ goto out;
+ }
+
+ iov = &cmd->iov_misc[0];
+ iov[niov].iov_base = ping_data;
+ iov[niov++].iov_len = payload_length;
+
+ padding = ((-payload_length) & 3);
+ if (padding != 0) {
+ pr_debug("Receiving %u additional bytes"
+ " for padding.\n", padding);
+ iov[niov].iov_base = &cmd->pad_bytes;
+ iov[niov++].iov_len = padding;
+ rx_size += padding;
+ }
+ if (conn->conn_ops->DataDigest) {
+ iov[niov].iov_base = &checksum;
+ iov[niov++].iov_len = ISCSI_CRC_LEN;
+ rx_size += ISCSI_CRC_LEN;
+ }
+
+ rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size);
+ if (rx_got != rx_size) {
+ ret = -1;
+ goto out;
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+ ping_data, payload_length,
+ padding, cmd->pad_bytes,
+ (u8 *)&data_crc);
+
+ if (checksum != data_crc) {
+ pr_err("Ping data CRC32C DataDigest"
+ " 0x%08x does not match computed 0x%08x\n",
+ checksum, data_crc);
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from"
+ " NOPOUT Ping DataCRC failure while in"
+ " ERL=0.\n");
+ ret = -1;
+ goto out;
+ } else {
+ /*
+ * Silently drop this PDU and let the
+ * initiator plug the CmdSN gap.
+ */
+ pr_debug("Dropping NOPOUT"
+ " Command CmdSN: 0x%08x due to"
+ " DataCRC error.\n", hdr->cmdsn);
+ ret = 0;
+ goto out;
+ }
+ } else {
+ pr_debug("Got CRC32C DataDigest"
+ " 0x%08x for %u bytes of ping data.\n",
+ checksum, payload_length);
+ }
+ }
+
+ ping_data[payload_length] = '\0';
+ /*
+ * Attach ping data to struct iscsi_cmd->buf_ptr.
+ */
+ cmd->buf_ptr = (void *)ping_data;
+ cmd->buf_ptr_size = payload_length;
+
+ pr_debug("Got %u bytes of NOPOUT ping"
+ " data.\n", payload_length);
+ pr_debug("Ping Data: \"%s\"\n", ping_data);
+ }
+
+ if (hdr->itt != 0xFFFFFFFF) {
+ if (!cmd) {
+ pr_err("Checking CmdSN for NOPOUT,"
+ " but cmd is NULL!\n");
+ return -1;
+ }
+ /*
+ * Initiator is expecting a NopIN ping reply,
+ */
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+
+ if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
+ iscsit_add_cmd_to_response_queue(cmd, conn,
+ cmd->i_state);
+ return 0;
+ }
+
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ ret = 0;
+ goto ping_out;
+ }
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+
+ return 0;
+ }
+
+ if (hdr->ttt != 0xFFFFFFFF) {
+ /*
+ * This was a response to a unsolicited NOPIN ping.
+ */
+ cmd = iscsit_find_cmd_from_ttt(conn, hdr->ttt);
+ if (!cmd)
+ return -1;
+
+ iscsit_stop_nopin_response_timer(conn);
+
+ cmd->i_state = ISTATE_REMOVE;
+ iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
+ iscsit_start_nopin_timer(conn);
+ } else {
+ /*
+ * Initiator is not expecting a NOPIN is response.
+ * Just ignore for now.
+ *
+ * iSCSI v19-91 10.18
+ * "A NOP-OUT may also be used to confirm a changed
+ * ExpStatSN if another PDU will not be available
+ * for a long time."
+ */
+ ret = 0;
+ goto out;
+ }
+
+ return 0;
+out:
+ if (cmd)
+ iscsit_release_cmd(cmd);
+ping_out:
+ kfree(ping_data);
+ return ret;
+}
+
+static int iscsit_handle_task_mgt_cmd(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ struct iscsi_cmd *cmd;
+ struct se_tmr_req *se_tmr;
+ struct iscsi_tmr_req *tmr_req;
+ struct iscsi_tm *hdr;
+ u32 payload_length;
+ int out_of_order_cmdsn = 0;
+ int ret;
+ u8 function;
+
+ hdr = (struct iscsi_tm *) buf;
+ payload_length = ntoh24(hdr->dlength);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->rtt = be32_to_cpu(hdr->rtt);
+ hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+ hdr->refcmdsn = be32_to_cpu(hdr->refcmdsn);
+ hdr->exp_datasn = be32_to_cpu(hdr->exp_datasn);
+ hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
+ function = hdr->flags;
+
+ pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:"
+ " 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:"
+ " 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function,
+ hdr->rtt, hdr->refcmdsn, conn->cid);
+
+ if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
+ ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
+ (hdr->rtt != ISCSI_RESERVED_TAG))) {
+ pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n");
+ hdr->rtt = ISCSI_RESERVED_TAG;
+ }
+
+ if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) &&
+ !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+ pr_err("Task Management Request TASK_REASSIGN not"
+ " issued as immediate command, bad iSCSI Initiator"
+ "implementation\n");
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+ if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
+ (hdr->refcmdsn != ISCSI_RESERVED_TAG))
+ hdr->refcmdsn = ISCSI_RESERVED_TAG;
+
+ cmd = iscsit_allocate_se_cmd_for_tmr(conn, function);
+ if (!cmd)
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, buf, conn);
+
+ cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC;
+ cmd->i_state = ISTATE_SEND_TASKMGTRSP;
+ cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+ cmd->init_task_tag = hdr->itt;
+ cmd->targ_xfer_tag = 0xFFFFFFFF;
+ cmd->cmd_sn = hdr->cmdsn;
+ cmd->exp_stat_sn = hdr->exp_statsn;
+ se_tmr = cmd->se_cmd.se_tmr_req;
+ tmr_req = cmd->tmr_req;
+ /*
+ * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN
+ */
+ if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
+ ret = iscsit_get_lun_for_tmr(cmd,
+ get_unaligned_le64(&hdr->lun));
+ if (ret < 0) {
+ cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
+ goto attach;
+ }
+ }
+
+ switch (function) {
+ case ISCSI_TM_FUNC_ABORT_TASK:
+ se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
+ if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) {
+ cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ goto attach;
+ }
+ break;
+ case ISCSI_TM_FUNC_ABORT_TASK_SET:
+ case ISCSI_TM_FUNC_CLEAR_ACA:
+ case ISCSI_TM_FUNC_CLEAR_TASK_SET:
+ case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+ break;
+ case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+ if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
+ cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
+ goto attach;
+ }
+ break;
+ case ISCSI_TM_FUNC_TARGET_COLD_RESET:
+ if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
+ cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
+ goto attach;
+ }
+ break;
+ case ISCSI_TM_FUNC_TASK_REASSIGN:
+ se_tmr->response = iscsit_tmr_task_reassign(cmd, buf);
+ /*
+ * Perform sanity checks on the ExpDataSN only if the
+ * TASK_REASSIGN was successful.
+ */
+ if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE)
+ break;
+
+ if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_INVALID, 1, 1,
+ buf, cmd);
+ break;
+ default:
+ pr_err("Unknown TMR function: 0x%02x, protocol"
+ " error.\n", function);
+ cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
+ goto attach;
+ }
+
+ if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
+ (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
+ se_tmr->call_transport = 1;
+attach:
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+ int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
+ out_of_order_cmdsn = 1;
+ else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ return 0;
+ } else { /* (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) */
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+ }
+ }
+ iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+
+ if (out_of_order_cmdsn)
+ return 0;
+ /*
+ * Found the referenced task, send to transport for processing.
+ */
+ if (se_tmr->call_transport)
+ return transport_generic_handle_tmr(&cmd->se_cmd);
+
+ /*
+ * Could not find the referenced LUN, task, or Task Management
+ * command not authorized or supported. Change state and
+ * let the tx_thread send the response.
+ *
+ * For connection recovery, this is also the default action for
+ * TMR TASK_REASSIGN.
+ */
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+}
+
+/* #warning FIXME: Support Text Command parameters besides SendTargets */
+static int iscsit_handle_text_cmd(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ char *text_ptr, *text_in;
+ int cmdsn_ret, niov = 0, rx_got, rx_size;
+ u32 checksum = 0, data_crc = 0, payload_length;
+ u32 padding = 0, pad_bytes = 0, text_length = 0;
+ struct iscsi_cmd *cmd;
+ struct kvec iov[3];
+ struct iscsi_text *hdr;
+
+ hdr = (struct iscsi_text *) buf;
+ payload_length = ntoh24(hdr->dlength);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->ttt = be32_to_cpu(hdr->ttt);
+ hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+
+ if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
+ pr_err("Unable to accept text parameter length: %u"
+ "greater than MaxRecvDataSegmentLength %u.\n",
+ payload_length, conn->conn_ops->MaxRecvDataSegmentLength);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
+ " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
+ hdr->exp_statsn, payload_length);
+
+ rx_size = text_length = payload_length;
+ if (text_length) {
+ text_in = kzalloc(text_length, GFP_KERNEL);
+ if (!text_in) {
+ pr_err("Unable to allocate memory for"
+ " incoming text parameters\n");
+ return -1;
+ }
+
+ memset(iov, 0, 3 * sizeof(struct kvec));
+ iov[niov].iov_base = text_in;
+ iov[niov++].iov_len = text_length;
+
+ padding = ((-payload_length) & 3);
+ if (padding != 0) {
+ iov[niov].iov_base = &pad_bytes;
+ iov[niov++].iov_len = padding;
+ rx_size += padding;
+ pr_debug("Receiving %u additional bytes"
+ " for padding.\n", padding);
+ }
+ if (conn->conn_ops->DataDigest) {
+ iov[niov].iov_base = &checksum;
+ iov[niov++].iov_len = ISCSI_CRC_LEN;
+ rx_size += ISCSI_CRC_LEN;
+ }
+
+ rx_got = rx_data(conn, &iov[0], niov, rx_size);
+ if (rx_got != rx_size) {
+ kfree(text_in);
+ return -1;
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+ text_in, text_length,
+ padding, (u8 *)&pad_bytes,
+ (u8 *)&data_crc);
+
+ if (checksum != data_crc) {
+ pr_err("Text data CRC32C DataDigest"
+ " 0x%08x does not match computed"
+ " 0x%08x\n", checksum, data_crc);
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from"
+ " Text Data digest failure while in"
+ " ERL=0.\n");
+ kfree(text_in);
+ return -1;
+ } else {
+ /*
+ * Silently drop this PDU and let the
+ * initiator plug the CmdSN gap.
+ */
+ pr_debug("Dropping Text"
+ " Command CmdSN: 0x%08x due to"
+ " DataCRC error.\n", hdr->cmdsn);
+ kfree(text_in);
+ return 0;
+ }
+ } else {
+ pr_debug("Got CRC32C DataDigest"
+ " 0x%08x for %u bytes of text data.\n",
+ checksum, text_length);
+ }
+ }
+ text_in[text_length - 1] = '\0';
+ pr_debug("Successfully read %d bytes of text"
+ " data.\n", text_length);
+
+ if (strncmp("SendTargets", text_in, 11) != 0) {
+ pr_err("Received Text Data that is not"
+ " SendTargets, cannot continue.\n");
+ kfree(text_in);
+ return -1;
+ }
+ text_ptr = strchr(text_in, '=');
+ if (!text_ptr) {
+ pr_err("No \"=\" separator found in Text Data,"
+ " cannot continue.\n");
+ kfree(text_in);
+ return -1;
+ }
+ if (strncmp("=All", text_ptr, 4) != 0) {
+ pr_err("Unable to locate All value for"
+ " SendTargets key, cannot continue.\n");
+ kfree(text_in);
+ return -1;
+ }
+/*#warning Support SendTargets=(iSCSI Target Name/Nothing) values. */
+ kfree(text_in);
+ }
+
+ cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ if (!cmd)
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, buf, conn);
+
+ cmd->iscsi_opcode = ISCSI_OP_TEXT;
+ cmd->i_state = ISTATE_SEND_TEXTRSP;
+ cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+ conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
+ cmd->targ_xfer_tag = 0xFFFFFFFF;
+ cmd->cmd_sn = hdr->cmdsn;
+ cmd->exp_stat_sn = hdr->exp_statsn;
+ cmd->data_direction = DMA_NONE;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+
+ if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+
+ return 0;
+ }
+
+ return iscsit_execute_cmd(cmd, 0);
+}
+
+int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+ struct iscsi_conn *conn_p;
+ struct iscsi_session *sess = conn->sess;
+
+ pr_debug("Received logout request CLOSESESSION on CID: %hu"
+ " for SID: %u.\n", conn->cid, conn->sess->sid);
+
+ atomic_set(&sess->session_logout, 1);
+ atomic_set(&conn->conn_logout_remove, 1);
+ conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION;
+
+ iscsit_inc_conn_usage_count(conn);
+ iscsit_inc_session_usage_count(sess);
+
+ spin_lock_bh(&sess->conn_lock);
+ list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) {
+ if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN)
+ continue;
+
+ pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
+ conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT;
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+ return 0;
+}
+
+int iscsit_logout_closeconnection(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+ struct iscsi_conn *l_conn;
+ struct iscsi_session *sess = conn->sess;
+
+ pr_debug("Received logout request CLOSECONNECTION for CID:"
+ " %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
+
+ /*
+ * A Logout Request with a CLOSECONNECTION reason code for a CID
+ * can arrive on a connection with a differing CID.
+ */
+ if (conn->cid == cmd->logout_cid) {
+ spin_lock_bh(&conn->state_lock);
+ pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
+ conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
+
+ atomic_set(&conn->conn_logout_remove, 1);
+ conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION;
+ iscsit_inc_conn_usage_count(conn);
+
+ spin_unlock_bh(&conn->state_lock);
+ } else {
+ /*
+ * Handle all different cid CLOSECONNECTION requests in
+ * iscsit_logout_post_handler_diffcid() as to give enough
+ * time for any non immediate command's CmdSN to be
+ * acknowledged on the connection in question.
+ *
+ * Here we simply make sure the CID is still around.
+ */
+ l_conn = iscsit_get_conn_from_cid(sess,
+ cmd->logout_cid);
+ if (!l_conn) {
+ cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
+ iscsit_add_cmd_to_response_queue(cmd, conn,
+ cmd->i_state);
+ return 0;
+ }
+
+ iscsit_dec_conn_usage_count(l_conn);
+ }
+
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+ return 0;
+}
+
+int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+
+ pr_debug("Received explicit REMOVECONNFORRECOVERY logout for"
+ " CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
+
+ if (sess->sess_ops->ErrorRecoveryLevel != 2) {
+ pr_err("Received Logout Request REMOVECONNFORRECOVERY"
+ " while ERL!=2.\n");
+ cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+ }
+
+ if (conn->cid == cmd->logout_cid) {
+ pr_err("Received Logout Request REMOVECONNFORRECOVERY"
+ " with CID: %hu on CID: %hu, implementation error.\n",
+ cmd->logout_cid, conn->cid);
+ cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+ }
+
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+ return 0;
+}
+
+static int iscsit_handle_logout_cmd(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ int cmdsn_ret, logout_remove = 0;
+ u8 reason_code = 0;
+ struct iscsi_cmd *cmd;
+ struct iscsi_logout *hdr;
+ struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn);
+
+ hdr = (struct iscsi_logout *) buf;
+ reason_code = (hdr->flags & 0x7f);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->cid = be16_to_cpu(hdr->cid);
+ hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+
+ if (tiqn) {
+ spin_lock(&tiqn->logout_stats.lock);
+ if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION)
+ tiqn->logout_stats.normal_logouts++;
+ else
+ tiqn->logout_stats.abnormal_logouts++;
+ spin_unlock(&tiqn->logout_stats.lock);
+ }
+
+ pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x"
+ " ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n",
+ hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code,
+ hdr->cid, conn->cid);
+
+ if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
+ pr_err("Received logout request on connection that"
+ " is not in logged in state, ignoring request.\n");
+ return 0;
+ }
+
+ cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ if (!cmd)
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
+ buf, conn);
+
+ cmd->iscsi_opcode = ISCSI_OP_LOGOUT;
+ cmd->i_state = ISTATE_SEND_LOGOUTRSP;
+ cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+ conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
+ cmd->targ_xfer_tag = 0xFFFFFFFF;
+ cmd->cmd_sn = hdr->cmdsn;
+ cmd->exp_stat_sn = hdr->exp_statsn;
+ cmd->logout_cid = hdr->cid;
+ cmd->logout_reason = reason_code;
+ cmd->data_direction = DMA_NONE;
+
+ /*
+ * We need to sleep in these cases (by returning 1) until the Logout
+ * Response gets sent in the tx thread.
+ */
+ if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) ||
+ ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) &&
+ (hdr->cid == conn->cid)))
+ logout_remove = 1;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
+ iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+
+ /*
+ * Immediate commands are executed, well, immediately.
+ * Non-Immediate Logout Commands are executed in CmdSN order.
+ */
+ if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
+ int ret = iscsit_execute_cmd(cmd, 0);
+
+ if (ret < 0)
+ return ret;
+ } else {
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ logout_remove = 0;
+ } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+ }
+ }
+
+ return logout_remove;
+}
+
+static int iscsit_handle_snack(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ u32 unpacked_lun;
+ u64 lun;
+ struct iscsi_snack *hdr;
+
+ hdr = (struct iscsi_snack *) buf;
+ hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
+ lun = get_unaligned_le64(&hdr->lun);
+ unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->ttt = be32_to_cpu(hdr->ttt);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+ hdr->begrun = be32_to_cpu(hdr->begrun);
+ hdr->runlength = be32_to_cpu(hdr->runlength);
+
+ pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:"
+ " 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x,"
+ " CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags,
+ hdr->begrun, hdr->runlength, conn->cid);
+
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Initiator sent SNACK request while in"
+ " ErrorRecoveryLevel=0.\n");
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+ /*
+ * SNACK_DATA and SNACK_R2T are both 0, so check which function to
+ * call from inside iscsi_send_recovery_datain_or_r2t().
+ */
+ switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) {
+ case 0:
+ return iscsit_handle_recovery_datain_or_r2t(conn, buf,
+ hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength);
+ case ISCSI_FLAG_SNACK_TYPE_STATUS:
+ return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt,
+ hdr->begrun, hdr->runlength);
+ case ISCSI_FLAG_SNACK_TYPE_DATA_ACK:
+ return iscsit_handle_data_ack(conn, hdr->ttt, hdr->begrun,
+ hdr->runlength);
+ case ISCSI_FLAG_SNACK_TYPE_RDATA:
+ /* FIXME: Support R-Data SNACK */
+ pr_err("R-Data SNACK Not Supported.\n");
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ default:
+ pr_err("Unknown SNACK type 0x%02x, protocol"
+ " error.\n", hdr->flags & 0x0f);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ return 0;
+}
+
+static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
+{
+ if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
+ (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
+ wait_for_completion_interruptible_timeout(
+ &conn->rx_half_close_comp,
+ ISCSI_RX_THREAD_TCP_TIMEOUT * HZ);
+ }
+}
+
+static int iscsit_handle_immediate_data(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf,
+ u32 length)
+{
+ int iov_ret, rx_got = 0, rx_size = 0;
+ u32 checksum, iov_count = 0, padding = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct kvec *iov;
+
+ iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, cmd->write_data_done, length);
+ if (iov_ret < 0)
+ return IMMEDIATE_DATA_CANNOT_RECOVER;
+
+ rx_size = length;
+ iov_count = iov_ret;
+ iov = &cmd->iov_data[0];
+
+ padding = ((-length) & 3);
+ if (padding != 0) {
+ iov[iov_count].iov_base = cmd->pad_bytes;
+ iov[iov_count++].iov_len = padding;
+ rx_size += padding;
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iov[iov_count].iov_base = &checksum;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ rx_size += ISCSI_CRC_LEN;
+ }
+
+ rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
+
+ iscsit_unmap_iovec(cmd);
+
+ if (rx_got != rx_size) {
+ iscsit_rx_thread_wait_for_tcp(conn);
+ return IMMEDIATE_DATA_CANNOT_RECOVER;
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ u32 data_crc;
+
+ data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
+ cmd->write_data_done, length, padding,
+ cmd->pad_bytes);
+
+ if (checksum != data_crc) {
+ pr_err("ImmediateData CRC32C DataDigest 0x%08x"
+ " does not match computed 0x%08x\n", checksum,
+ data_crc);
+
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from"
+ " Immediate Data digest failure while"
+ " in ERL=0.\n");
+ iscsit_add_reject_from_cmd(
+ ISCSI_REASON_DATA_DIGEST_ERROR,
+ 1, 0, buf, cmd);
+ return IMMEDIATE_DATA_CANNOT_RECOVER;
+ } else {
+ iscsit_add_reject_from_cmd(
+ ISCSI_REASON_DATA_DIGEST_ERROR,
+ 0, 0, buf, cmd);
+ return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
+ }
+ } else {
+ pr_debug("Got CRC32C DataDigest 0x%08x for"
+ " %u bytes of Immediate Data\n", checksum,
+ length);
+ }
+ }
+
+ cmd->write_data_done += length;
+
+ if (cmd->write_data_done == cmd->data_length) {
+ spin_lock_bh(&cmd->istate_lock);
+ cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
+ cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+ spin_unlock_bh(&cmd->istate_lock);
+ }
+
+ return IMMEDIATE_DATA_NORMAL_OPERATION;
+}
+
+/*
+ * Called with sess->conn_lock held.
+ */
+/* #warning iscsi_build_conn_drop_async_message() only sends out on connections
+ with active network interface */
+static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
+{
+ struct iscsi_cmd *cmd;
+ struct iscsi_conn *conn_p;
+
+ /*
+ * Only send a Asynchronous Message on connections whos network
+ * interface is still functional.
+ */
+ list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
+ if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
+ iscsit_inc_conn_usage_count(conn_p);
+ break;
+ }
+ }
+
+ if (!conn_p)
+ return;
+
+ cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL);
+ if (!cmd) {
+ iscsit_dec_conn_usage_count(conn_p);
+ return;
+ }
+
+ cmd->logout_cid = conn->cid;
+ cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
+ cmd->i_state = ISTATE_SEND_ASYNCMSG;
+
+ spin_lock_bh(&conn_p->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn_p->conn_cmd_list);
+ spin_unlock_bh(&conn_p->cmd_lock);
+
+ iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state);
+ iscsit_dec_conn_usage_count(conn_p);
+}
+
+static int iscsit_send_conn_drop_async_message(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_async *hdr;
+
+ cmd->tx_size = ISCSI_HDR_LEN;
+ cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
+
+ hdr = (struct iscsi_async *) cmd->pdu;
+ hdr->opcode = ISCSI_OP_ASYNC_EVENT;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ cmd->init_task_tag = 0xFFFFFFFF;
+ cmd->targ_xfer_tag = 0xFFFFFFFF;
+ put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]);
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION;
+ hdr->param1 = cpu_to_be16(cmd->logout_cid);
+ hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
+ hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ cmd->tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32C HeaderDigest to"
+ " Async Message 0x%08x\n", *header_digest);
+ }
+
+ cmd->iov_misc[0].iov_base = cmd->pdu;
+ cmd->iov_misc[0].iov_len = cmd->tx_size;
+ cmd->iov_misc_count = 1;
+
+ pr_debug("Sending Connection Dropped Async Message StatSN:"
+ " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
+ cmd->logout_cid, conn->cid);
+ return 0;
+}
+
+static int iscsit_send_data_in(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn,
+ int *eodr)
+{
+ int iov_ret = 0, set_statsn = 0;
+ u32 iov_count = 0, tx_size = 0;
+ struct iscsi_datain datain;
+ struct iscsi_datain_req *dr;
+ struct iscsi_data_rsp *hdr;
+ struct kvec *iov;
+
+ memset(&datain, 0, sizeof(struct iscsi_datain));
+ dr = iscsit_get_datain_values(cmd, &datain);
+ if (!dr) {
+ pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n",
+ cmd->init_task_tag);
+ return -1;
+ }
+
+ /*
+ * Be paranoid and double check the logic for now.
+ */
+ if ((datain.offset + datain.length) > cmd->data_length) {
+ pr_err("Command ITT: 0x%08x, datain.offset: %u and"
+ " datain.length: %u exceeds cmd->data_length: %u\n",
+ cmd->init_task_tag, datain.offset, datain.length,
+ cmd->data_length);
+ return -1;
+ }
+
+ spin_lock_bh(&conn->sess->session_stats_lock);
+ conn->sess->tx_data_octets += datain.length;
+ if (conn->sess->se_sess->se_node_acl) {
+ spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
+ conn->sess->se_sess->se_node_acl->read_bytes += datain.length;
+ spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
+ }
+ spin_unlock_bh(&conn->sess->session_stats_lock);
+ /*
+ * Special case for successfully execution w/ both DATAIN
+ * and Sense Data.
+ */
+ if ((datain.flags & ISCSI_FLAG_DATA_STATUS) &&
+ (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
+ datain.flags &= ~ISCSI_FLAG_DATA_STATUS;
+ else {
+ if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) ||
+ (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) {
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ cmd->stat_sn = conn->stat_sn++;
+ set_statsn = 1;
+ } else if (dr->dr_complete ==
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
+ set_statsn = 1;
+ }
+
+ hdr = (struct iscsi_data_rsp *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
+ hdr->flags = datain.flags;
+ if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+ if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
+ hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
+ hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
+ hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ }
+ }
+ hton24(hdr->dlength, datain.length);
+ if (hdr->flags & ISCSI_FLAG_DATA_ACK)
+ int_to_scsilun(cmd->se_cmd.orig_fe_lun,
+ (struct scsi_lun *)&hdr->lun);
+ else
+ put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
+
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ hdr->ttt = (hdr->flags & ISCSI_FLAG_DATA_ACK) ?
+ cpu_to_be32(cmd->targ_xfer_tag) :
+ 0xFFFFFFFF;
+ hdr->statsn = (set_statsn) ? cpu_to_be32(cmd->stat_sn) :
+ 0xFFFFFFFF;
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->datasn = cpu_to_be32(datain.data_sn);
+ hdr->offset = cpu_to_be32(datain.offset);
+
+ iov = &cmd->iov_data[0];
+ iov[iov_count].iov_base = cmd->pdu;
+ iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+ tx_size += ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+
+ pr_debug("Attaching CRC32 HeaderDigest"
+ " for DataIN PDU 0x%08x\n", *header_digest);
+ }
+
+ iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], datain.offset, datain.length);
+ if (iov_ret < 0)
+ return -1;
+
+ iov_count += iov_ret;
+ tx_size += datain.length;
+
+ cmd->padding = ((-datain.length) & 3);
+ if (cmd->padding) {
+ iov[iov_count].iov_base = cmd->pad_bytes;
+ iov[iov_count++].iov_len = cmd->padding;
+ tx_size += cmd->padding;
+
+ pr_debug("Attaching %u padding bytes\n",
+ cmd->padding);
+ }
+ if (conn->conn_ops->DataDigest) {
+ cmd->data_crc = iscsit_do_crypto_hash_sg(&conn->conn_tx_hash, cmd,
+ datain.offset, datain.length, cmd->padding, cmd->pad_bytes);
+
+ iov[iov_count].iov_base = &cmd->data_crc;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+
+ pr_debug("Attached CRC32C DataDigest %d bytes, crc"
+ " 0x%08x\n", datain.length+cmd->padding, cmd->data_crc);
+ }
+
+ cmd->iov_data_count = iov_count;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
+ " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
+ cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
+ ntohl(hdr->offset), datain.length, conn->cid);
+
+ if (dr->dr_complete) {
+ *eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
+ 2 : 1;
+ iscsit_free_datain_req(cmd, dr);
+ }
+
+ return 0;
+}
+
+static int iscsit_send_logout_response(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ int niov = 0, tx_size;
+ struct iscsi_conn *logout_conn = NULL;
+ struct iscsi_conn_recovery *cr = NULL;
+ struct iscsi_session *sess = conn->sess;
+ struct kvec *iov;
+ struct iscsi_logout_rsp *hdr;
+ /*
+ * The actual shutting down of Sessions and/or Connections
+ * for CLOSESESSION and CLOSECONNECTION Logout Requests
+ * is done in scsi_logout_post_handler().
+ */
+ switch (cmd->logout_reason) {
+ case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
+ pr_debug("iSCSI session logout successful, setting"
+ " logout response to ISCSI_LOGOUT_SUCCESS.\n");
+ cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
+ break;
+ case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
+ if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND)
+ break;
+ /*
+ * For CLOSECONNECTION logout requests carrying
+ * a matching logout CID -> local CID, the reference
+ * for the local CID will have been incremented in
+ * iscsi_logout_closeconnection().
+ *
+ * For CLOSECONNECTION logout requests carrying
+ * a different CID than the connection it arrived
+ * on, the connection responding to cmd->logout_cid
+ * is stopped in iscsit_logout_post_handler_diffcid().
+ */
+
+ pr_debug("iSCSI CID: %hu logout on CID: %hu"
+ " successful.\n", cmd->logout_cid, conn->cid);
+ cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
+ break;
+ case ISCSI_LOGOUT_REASON_RECOVERY:
+ if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) ||
+ (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED))
+ break;
+ /*
+ * If the connection is still active from our point of view
+ * force connection recovery to occur.
+ */
+ logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
+ cmd->logout_cid);
+ if ((logout_conn)) {
+ iscsit_connection_reinstatement_rcfr(logout_conn);
+ iscsit_dec_conn_usage_count(logout_conn);
+ }
+
+ cr = iscsit_get_inactive_connection_recovery_entry(
+ conn->sess, cmd->logout_cid);
+ if (!cr) {
+ pr_err("Unable to locate CID: %hu for"
+ " REMOVECONNFORRECOVERY Logout Request.\n",
+ cmd->logout_cid);
+ cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
+ break;
+ }
+
+ iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn);
+
+ pr_debug("iSCSI REMOVECONNFORRECOVERY logout"
+ " for recovery for CID: %hu on CID: %hu successful.\n",
+ cmd->logout_cid, conn->cid);
+ cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
+ break;
+ default:
+ pr_err("Unknown cmd->logout_reason: 0x%02x\n",
+ cmd->logout_reason);
+ return -1;
+ }
+
+ tx_size = ISCSI_HDR_LEN;
+ hdr = (struct iscsi_logout_rsp *)cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_LOGOUT_RSP;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hdr->response = cmd->logout_response;
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ iov = &cmd->iov_misc[0];
+ iov[niov].iov_base = cmd->pdu;
+ iov[niov++].iov_len = ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32C HeaderDigest to"
+ " Logout Response 0x%08x\n", *header_digest);
+ }
+ cmd->iov_misc_count = niov;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Sending Logout Response ITT: 0x%08x StatSN:"
+ " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
+ cmd->init_task_tag, cmd->stat_sn, hdr->response,
+ cmd->logout_cid, conn->cid);
+
+ return 0;
+}
+
+/*
+ * Unsolicited NOPIN, either requesting a response or not.
+ */
+static int iscsit_send_unsolicited_nopin(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn,
+ int want_response)
+{
+ int tx_size = ISCSI_HDR_LEN;
+ struct iscsi_nopin *hdr;
+
+ hdr = (struct iscsi_nopin *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_NOOP_IN;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
+ cmd->stat_sn = conn->stat_sn;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32C HeaderDigest to"
+ " NopIN 0x%08x\n", *header_digest);
+ }
+
+ cmd->iov_misc[0].iov_base = cmd->pdu;
+ cmd->iov_misc[0].iov_len = tx_size;
+ cmd->iov_misc_count = 1;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
+ " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
+
+ return 0;
+}
+
+static int iscsit_send_nopin_response(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ int niov = 0, tx_size;
+ u32 padding = 0;
+ struct kvec *iov;
+ struct iscsi_nopin *hdr;
+
+ tx_size = ISCSI_HDR_LEN;
+ hdr = (struct iscsi_nopin *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_NOOP_IN;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hton24(hdr->dlength, cmd->buf_ptr_size);
+ put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ iov = &cmd->iov_misc[0];
+ iov[niov].iov_base = cmd->pdu;
+ iov[niov++].iov_len = ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32C HeaderDigest"
+ " to NopIn 0x%08x\n", *header_digest);
+ }
+
+ /*
+ * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr.
+ * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size.
+ */
+ if (cmd->buf_ptr_size) {
+ iov[niov].iov_base = cmd->buf_ptr;
+ iov[niov++].iov_len = cmd->buf_ptr_size;
+ tx_size += cmd->buf_ptr_size;
+
+ pr_debug("Echoing back %u bytes of ping"
+ " data.\n", cmd->buf_ptr_size);
+
+ padding = ((-cmd->buf_ptr_size) & 3);
+ if (padding != 0) {
+ iov[niov].iov_base = &cmd->pad_bytes;
+ iov[niov++].iov_len = padding;
+ tx_size += padding;
+ pr_debug("Attaching %u additional"
+ " padding bytes.\n", padding);
+ }
+ if (conn->conn_ops->DataDigest) {
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ cmd->buf_ptr, cmd->buf_ptr_size,
+ padding, (u8 *)&cmd->pad_bytes,
+ (u8 *)&cmd->data_crc);
+
+ iov[niov].iov_base = &cmd->data_crc;
+ iov[niov++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attached DataDigest for %u"
+ " bytes of ping data, CRC 0x%08x\n",
+ cmd->buf_ptr_size, cmd->data_crc);
+ }
+ }
+
+ cmd->iov_misc_count = niov;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Sending NOPIN Response ITT: 0x%08x, TTT:"
+ " 0x%08x, StatSN: 0x%08x, Length %u\n", cmd->init_task_tag,
+ cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
+
+ return 0;
+}
+
+int iscsit_send_r2t(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ int tx_size = 0;
+ struct iscsi_r2t *r2t;
+ struct iscsi_r2t_rsp *hdr;
+
+ r2t = iscsit_get_r2t_from_list(cmd);
+ if (!r2t)
+ return -1;
+
+ hdr = (struct iscsi_r2t_rsp *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_R2T;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ int_to_scsilun(cmd->se_cmd.orig_fe_lun,
+ (struct scsi_lun *)&hdr->lun);
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ spin_lock_bh(&conn->sess->ttt_lock);
+ r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
+ if (r2t->targ_xfer_tag == 0xFFFFFFFF)
+ r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
+ spin_unlock_bh(&conn->sess->ttt_lock);
+ hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
+ hdr->statsn = cpu_to_be32(conn->stat_sn);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->r2tsn = cpu_to_be32(r2t->r2t_sn);
+ hdr->data_offset = cpu_to_be32(r2t->offset);
+ hdr->data_length = cpu_to_be32(r2t->xfer_len);
+
+ cmd->iov_misc[0].iov_base = cmd->pdu;
+ cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
+ tx_size += ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32 HeaderDigest for R2T"
+ " PDU 0x%08x\n", *header_digest);
+ }
+
+ pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
+ " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
+ (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
+ r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
+ r2t->offset, r2t->xfer_len, conn->cid);
+
+ cmd->iov_misc_count = 1;
+ cmd->tx_size = tx_size;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ r2t->sent_r2t = 1;
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ return 0;
+}
+
+/*
+ * type 0: Normal Operation.
+ * type 1: Called from Storage Transport.
+ * type 2: Called from iscsi_task_reassign_complete_write() for
+ * connection recovery.
+ */
+int iscsit_build_r2ts_for_cmd(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn,
+ int type)
+{
+ int first_r2t = 1;
+ u32 offset = 0, xfer_len = 0;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ if (cmd->cmd_flags & ICF_SENT_LAST_R2T) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return 0;
+ }
+
+ if (conn->sess->sess_ops->DataSequenceInOrder && (type != 2))
+ if (cmd->r2t_offset < cmd->write_data_done)
+ cmd->r2t_offset = cmd->write_data_done;
+
+ while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) {
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ offset = cmd->r2t_offset;
+
+ if (first_r2t && (type == 2)) {
+ xfer_len = ((offset +
+ (conn->sess->sess_ops->MaxBurstLength -
+ cmd->next_burst_len) >
+ cmd->data_length) ?
+ (cmd->data_length - offset) :
+ (conn->sess->sess_ops->MaxBurstLength -
+ cmd->next_burst_len));
+ } else {
+ xfer_len = ((offset +
+ conn->sess->sess_ops->MaxBurstLength) >
+ cmd->data_length) ?
+ (cmd->data_length - offset) :
+ conn->sess->sess_ops->MaxBurstLength;
+ }
+ cmd->r2t_offset += xfer_len;
+
+ if (cmd->r2t_offset == cmd->data_length)
+ cmd->cmd_flags |= ICF_SENT_LAST_R2T;
+ } else {
+ struct iscsi_seq *seq;
+
+ seq = iscsit_get_seq_holder_for_r2t(cmd);
+ if (!seq) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+
+ offset = seq->offset;
+ xfer_len = seq->xfer_len;
+
+ if (cmd->seq_send_order == cmd->seq_count)
+ cmd->cmd_flags |= ICF_SENT_LAST_R2T;
+ }
+ cmd->outstanding_r2ts++;
+ first_r2t = 0;
+
+ if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+
+ if (cmd->cmd_flags & ICF_SENT_LAST_R2T)
+ break;
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ return 0;
+}
+
+static int iscsit_send_status(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ u8 iov_count = 0, recovery;
+ u32 padding = 0, tx_size = 0;
+ struct iscsi_scsi_rsp *hdr;
+ struct kvec *iov;
+
+ recovery = (cmd->i_state != ISTATE_SEND_STATUS);
+ if (!recovery)
+ cmd->stat_sn = conn->stat_sn++;
+
+ spin_lock_bh(&conn->sess->session_stats_lock);
+ conn->sess->rsp_pdus++;
+ spin_unlock_bh(&conn->sess->session_stats_lock);
+
+ hdr = (struct iscsi_scsi_rsp *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_SCSI_CMD_RSP;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
+ hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
+ hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
+ hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ }
+ hdr->response = cmd->iscsi_response;
+ hdr->cmd_status = cmd->se_cmd.scsi_status;
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ iov = &cmd->iov_misc[0];
+ iov[iov_count].iov_base = cmd->pdu;
+ iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+ tx_size += ISCSI_HDR_LEN;
+
+ /*
+ * Attach SENSE DATA payload to iSCSI Response PDU
+ */
+ if (cmd->se_cmd.sense_buffer &&
+ ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
+ (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
+ padding = -(cmd->se_cmd.scsi_sense_length) & 3;
+ hton24(hdr->dlength, cmd->se_cmd.scsi_sense_length);
+ iov[iov_count].iov_base = cmd->se_cmd.sense_buffer;
+ iov[iov_count++].iov_len =
+ (cmd->se_cmd.scsi_sense_length + padding);
+ tx_size += cmd->se_cmd.scsi_sense_length;
+
+ if (padding) {
+ memset(cmd->se_cmd.sense_buffer +
+ cmd->se_cmd.scsi_sense_length, 0, padding);
+ tx_size += padding;
+ pr_debug("Adding %u bytes of padding to"
+ " SENSE.\n", padding);
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ cmd->se_cmd.sense_buffer,
+ (cmd->se_cmd.scsi_sense_length + padding),
+ 0, NULL, (u8 *)&cmd->data_crc);
+
+ iov[iov_count].iov_base = &cmd->data_crc;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+
+ pr_debug("Attaching CRC32 DataDigest for"
+ " SENSE, %u bytes CRC 0x%08x\n",
+ (cmd->se_cmd.scsi_sense_length + padding),
+ cmd->data_crc);
+ }
+
+ pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
+ " Response PDU\n",
+ cmd->se_cmd.scsi_sense_length);
+ }
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32 HeaderDigest for Response"
+ " PDU 0x%08x\n", *header_digest);
+ }
+
+ cmd->iov_misc_count = iov_count;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Built %sSCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
+ " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
+ (!recovery) ? "" : "Recovery ", cmd->init_task_tag,
+ cmd->stat_sn, 0x00, cmd->se_cmd.scsi_status, conn->cid);
+
+ return 0;
+}
+
+static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
+{
+ switch (se_tmr->response) {
+ case TMR_FUNCTION_COMPLETE:
+ return ISCSI_TMF_RSP_COMPLETE;
+ case TMR_TASK_DOES_NOT_EXIST:
+ return ISCSI_TMF_RSP_NO_TASK;
+ case TMR_LUN_DOES_NOT_EXIST:
+ return ISCSI_TMF_RSP_NO_LUN;
+ case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
+ return ISCSI_TMF_RSP_NOT_SUPPORTED;
+ case TMR_FUNCTION_AUTHORIZATION_FAILED:
+ return ISCSI_TMF_RSP_AUTH_FAILED;
+ case TMR_FUNCTION_REJECTED:
+ default:
+ return ISCSI_TMF_RSP_REJECTED;
+ }
+}
+
+static int iscsit_send_task_mgt_rsp(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
+ struct iscsi_tm_rsp *hdr;
+ u32 tx_size = 0;
+
+ hdr = (struct iscsi_tm_rsp *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
+ hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ cmd->iov_misc[0].iov_base = cmd->pdu;
+ cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
+ tx_size += ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32 HeaderDigest for Task"
+ " Mgmt Response PDU 0x%08x\n", *header_digest);
+ }
+
+ cmd->iov_misc_count = 1;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Built Task Management Response ITT: 0x%08x,"
+ " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
+ cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
+
+ return 0;
+}
+
+static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
+{
+ char *payload = NULL;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tiqn *tiqn;
+ struct iscsi_tpg_np *tpg_np;
+ int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
+ unsigned char buf[256];
+
+ buffer_len = (conn->conn_ops->MaxRecvDataSegmentLength > 32768) ?
+ 32768 : conn->conn_ops->MaxRecvDataSegmentLength;
+
+ memset(buf, 0, 256);
+
+ payload = kzalloc(buffer_len, GFP_KERNEL);
+ if (!payload) {
+ pr_err("Unable to allocate memory for sendtargets"
+ " response.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock(&tiqn_lock);
+ list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
+ len = sprintf(buf, "TargetName=%s", tiqn->tiqn);
+ len += 1;
+
+ if ((len + payload_len) > buffer_len) {
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+ end_of_buf = 1;
+ goto eob;
+ }
+ memcpy((void *)payload + payload_len, buf, len);
+ payload_len += len;
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
+
+ spin_lock(&tpg->tpg_state_lock);
+ if ((tpg->tpg_state == TPG_STATE_FREE) ||
+ (tpg->tpg_state == TPG_STATE_INACTIVE)) {
+ spin_unlock(&tpg->tpg_state_lock);
+ continue;
+ }
+ spin_unlock(&tpg->tpg_state_lock);
+
+ spin_lock(&tpg->tpg_np_lock);
+ list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
+ tpg_np_list) {
+ len = sprintf(buf, "TargetAddress="
+ "%s%s%s:%hu,%hu",
+ (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ?
+ "[" : "", tpg_np->tpg_np->np_ip,
+ (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ?
+ "]" : "", tpg_np->tpg_np->np_port,
+ tpg->tpgt);
+ len += 1;
+
+ if ((len + payload_len) > buffer_len) {
+ spin_unlock(&tpg->tpg_np_lock);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+ end_of_buf = 1;
+ goto eob;
+ }
+ memcpy((void *)payload + payload_len, buf, len);
+ payload_len += len;
+ }
+ spin_unlock(&tpg->tpg_np_lock);
+ }
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+eob:
+ if (end_of_buf)
+ break;
+ }
+ spin_unlock(&tiqn_lock);
+
+ cmd->buf_ptr = payload;
+
+ return payload_len;
+}
+
+/*
+ * FIXME: Add support for F_BIT and C_BIT when the length is longer than
+ * MaxRecvDataSegmentLength.
+ */
+static int iscsit_send_text_rsp(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_text_rsp *hdr;
+ struct kvec *iov;
+ u32 padding = 0, tx_size = 0;
+ int text_length, iov_count = 0;
+
+ text_length = iscsit_build_sendtargets_response(cmd);
+ if (text_length < 0)
+ return text_length;
+
+ padding = ((-text_length) & 3);
+ if (padding != 0) {
+ memset(cmd->buf_ptr + text_length, 0, padding);
+ pr_debug("Attaching %u additional bytes for"
+ " padding.\n", padding);
+ }
+
+ hdr = (struct iscsi_text_rsp *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_TEXT_RSP;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hton24(hdr->dlength, text_length);
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ iov = &cmd->iov_misc[0];
+
+ iov[iov_count].iov_base = cmd->pdu;
+ iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+ iov[iov_count].iov_base = cmd->buf_ptr;
+ iov[iov_count++].iov_len = text_length + padding;
+
+ tx_size += (ISCSI_HDR_LEN + text_length + padding);
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32 HeaderDigest for"
+ " Text Response PDU 0x%08x\n", *header_digest);
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ cmd->buf_ptr, (text_length + padding),
+ 0, NULL, (u8 *)&cmd->data_crc);
+
+ iov[iov_count].iov_base = &cmd->data_crc;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+
+ pr_debug("Attaching DataDigest for %u bytes of text"
+ " data, CRC 0x%08x\n", (text_length + padding),
+ cmd->data_crc);
+ }
+
+ cmd->iov_misc_count = iov_count;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x,"
+ " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn,
+ text_length, conn->cid);
+ return 0;
+}
+
+static int iscsit_send_reject(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ u32 iov_count = 0, tx_size = 0;
+ struct iscsi_reject *hdr;
+ struct kvec *iov;
+
+ hdr = (struct iscsi_reject *) cmd->pdu;
+ hdr->opcode = ISCSI_OP_REJECT;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hton24(hdr->dlength, ISCSI_HDR_LEN);
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ iov = &cmd->iov_misc[0];
+
+ iov[iov_count].iov_base = cmd->pdu;
+ iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+ iov[iov_count].iov_base = cmd->buf_ptr;
+ iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+
+ tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN);
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32 HeaderDigest for"
+ " REJECT PDU 0x%08x\n", *header_digest);
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)&cmd->data_crc);
+
+ iov[iov_count].iov_base = &cmd->data_crc;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32 DataDigest for REJECT"
+ " PDU 0x%08x\n", cmd->data_crc);
+ }
+
+ cmd->iov_misc_count = iov_count;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
+ " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
+
+ return 0;
+}
+
+static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
+{
+ if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
+ (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
+ wait_for_completion_interruptible_timeout(
+ &conn->tx_half_close_comp,
+ ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
+ }
+}
+
+#ifdef CONFIG_SMP
+
+void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
+{
+ struct iscsi_thread_set *ts = conn->thread_set;
+ int ord, cpu;
+ /*
+ * thread_id is assigned from iscsit_global->ts_bitmap from
+ * within iscsi_thread_set.c:iscsi_allocate_thread_sets()
+ *
+ * Here we use thread_id to determine which CPU that this
+ * iSCSI connection's iscsi_thread_set will be scheduled to
+ * execute upon.
+ */
+ ord = ts->thread_id % cpumask_weight(cpu_online_mask);
+#if 0
+ pr_debug(">>>>>>>>>>>>>>>>>>>> Generated ord: %d from"
+ " thread_id: %d\n", ord, ts->thread_id);
+#endif
+ for_each_online_cpu(cpu) {
+ if (ord-- == 0) {
+ cpumask_set_cpu(cpu, conn->conn_cpumask);
+ return;
+ }
+ }
+ /*
+ * This should never be reached..
+ */
+ dump_stack();
+ cpumask_setall(conn->conn_cpumask);
+}
+
+static inline void iscsit_thread_check_cpumask(
+ struct iscsi_conn *conn,
+ struct task_struct *p,
+ int mode)
+{
+ char buf[128];
+ /*
+ * mode == 1 signals iscsi_target_tx_thread() usage.
+ * mode == 0 signals iscsi_target_rx_thread() usage.
+ */
+ if (mode == 1) {
+ if (!conn->conn_tx_reset_cpumask)
+ return;
+ conn->conn_tx_reset_cpumask = 0;
+ } else {
+ if (!conn->conn_rx_reset_cpumask)
+ return;
+ conn->conn_rx_reset_cpumask = 0;
+ }
+ /*
+ * Update the CPU mask for this single kthread so that
+ * both TX and RX kthreads are scheduled to run on the
+ * same CPU.
+ */
+ memset(buf, 0, 128);
+ cpumask_scnprintf(buf, 128, conn->conn_cpumask);
+#if 0
+ pr_debug(">>>>>>>>>>>>>> Calling set_cpus_allowed_ptr():"
+ " %s for %s\n", buf, p->comm);
+#endif
+ set_cpus_allowed_ptr(p, conn->conn_cpumask);
+}
+
+#else
+
+void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
+{
+ return;
+}
+
+#define iscsit_thread_check_cpumask(X, Y, Z) ({})
+#endif /* CONFIG_SMP */
+
+int iscsi_target_tx_thread(void *arg)
+{
+ u8 state;
+ int eodr = 0;
+ int ret = 0;
+ int sent_status = 0;
+ int use_misc = 0;
+ int map_sg = 0;
+ struct iscsi_cmd *cmd = NULL;
+ struct iscsi_conn *conn;
+ struct iscsi_queue_req *qr = NULL;
+ struct se_cmd *se_cmd;
+ struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
+ /*
+ * Allow ourselves to be interrupted by SIGINT so that a
+ * connection recovery / failure event can be triggered externally.
+ */
+ allow_signal(SIGINT);
+
+restart:
+ conn = iscsi_tx_thread_pre_handler(ts);
+ if (!conn)
+ goto out;
+
+ eodr = map_sg = ret = sent_status = use_misc = 0;
+
+ while (!kthread_should_stop()) {
+ /*
+ * Ensure that both TX and RX per connection kthreads
+ * are scheduled to run on the same CPU.
+ */
+ iscsit_thread_check_cpumask(conn, current, 1);
+
+ schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
+
+ if ((ts->status == ISCSI_THREAD_SET_RESET) ||
+ signal_pending(current))
+ goto transport_err;
+
+get_immediate:
+ qr = iscsit_get_cmd_from_immediate_queue(conn);
+ if (qr) {
+ atomic_set(&conn->check_immediate_queue, 0);
+ cmd = qr->cmd;
+ state = qr->state;
+ kmem_cache_free(lio_qr_cache, qr);
+
+ spin_lock_bh(&cmd->istate_lock);
+ switch (state) {
+ case ISTATE_SEND_R2T:
+ spin_unlock_bh(&cmd->istate_lock);
+ ret = iscsit_send_r2t(cmd, conn);
+ break;
+ case ISTATE_REMOVE:
+ spin_unlock_bh(&cmd->istate_lock);
+
+ if (cmd->data_direction == DMA_TO_DEVICE)
+ iscsit_stop_dataout_timer(cmd);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_del(&cmd->i_list);
+ spin_unlock_bh(&conn->cmd_lock);
+ /*
+ * Determine if a struct se_cmd is assoicated with
+ * this struct iscsi_cmd.
+ */
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) &&
+ !(cmd->tmr_req))
+ iscsit_release_cmd(cmd);
+ else
+ transport_generic_free_cmd(&cmd->se_cmd,
+ 1, 0);
+ goto get_immediate;
+ case ISTATE_SEND_NOPIN_WANT_RESPONSE:
+ spin_unlock_bh(&cmd->istate_lock);
+ iscsit_mod_nopin_response_timer(conn);
+ ret = iscsit_send_unsolicited_nopin(cmd,
+ conn, 1);
+ break;
+ case ISTATE_SEND_NOPIN_NO_RESPONSE:
+ spin_unlock_bh(&cmd->istate_lock);
+ ret = iscsit_send_unsolicited_nopin(cmd,
+ conn, 0);
+ break;
+ default:
+ pr_err("Unknown Opcode: 0x%02x ITT:"
+ " 0x%08x, i_state: %d on CID: %hu\n",
+ cmd->iscsi_opcode, cmd->init_task_tag, state,
+ conn->cid);
+ spin_unlock_bh(&cmd->istate_lock);
+ goto transport_err;
+ }
+ if (ret < 0) {
+ conn->tx_immediate_queue = 0;
+ goto transport_err;
+ }
+
+ if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
+ conn->tx_immediate_queue = 0;
+ iscsit_tx_thread_wait_for_tcp(conn);
+ goto transport_err;
+ }
+
+ spin_lock_bh(&cmd->istate_lock);
+ switch (state) {
+ case ISTATE_SEND_R2T:
+ spin_unlock_bh(&cmd->istate_lock);
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ iscsit_start_dataout_timer(cmd, conn);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ break;
+ case ISTATE_SEND_NOPIN_WANT_RESPONSE:
+ cmd->i_state = ISTATE_SENT_NOPIN_WANT_RESPONSE;
+ spin_unlock_bh(&cmd->istate_lock);
+ break;
+ case ISTATE_SEND_NOPIN_NO_RESPONSE:
+ cmd->i_state = ISTATE_SENT_STATUS;
+ spin_unlock_bh(&cmd->istate_lock);
+ break;
+ default:
+ pr_err("Unknown Opcode: 0x%02x ITT:"
+ " 0x%08x, i_state: %d on CID: %hu\n",
+ cmd->iscsi_opcode, cmd->init_task_tag,
+ state, conn->cid);
+ spin_unlock_bh(&cmd->istate_lock);
+ goto transport_err;
+ }
+ goto get_immediate;
+ } else
+ conn->tx_immediate_queue = 0;
+
+get_response:
+ qr = iscsit_get_cmd_from_response_queue(conn);
+ if (qr) {
+ cmd = qr->cmd;
+ state = qr->state;
+ kmem_cache_free(lio_qr_cache, qr);
+
+ spin_lock_bh(&cmd->istate_lock);
+check_rsp_state:
+ switch (state) {
+ case ISTATE_SEND_DATAIN:
+ spin_unlock_bh(&cmd->istate_lock);
+ ret = iscsit_send_data_in(cmd, conn,
+ &eodr);
+ map_sg = 1;
+ break;
+ case ISTATE_SEND_STATUS:
+ case ISTATE_SEND_STATUS_RECOVERY:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_status(cmd, conn);
+ break;
+ case ISTATE_SEND_LOGOUTRSP:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_logout_response(cmd, conn);
+ break;
+ case ISTATE_SEND_ASYNCMSG:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_conn_drop_async_message(
+ cmd, conn);
+ break;
+ case ISTATE_SEND_NOPIN:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_nopin_response(cmd, conn);
+ break;
+ case ISTATE_SEND_REJECT:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_reject(cmd, conn);
+ break;
+ case ISTATE_SEND_TASKMGTRSP:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_task_mgt_rsp(cmd, conn);
+ if (ret != 0)
+ break;
+ ret = iscsit_tmr_post_handler(cmd, conn);
+ if (ret != 0)
+ iscsit_fall_back_to_erl0(conn->sess);
+ break;
+ case ISTATE_SEND_TEXTRSP:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_text_rsp(cmd, conn);
+ break;
+ default:
+ pr_err("Unknown Opcode: 0x%02x ITT:"
+ " 0x%08x, i_state: %d on CID: %hu\n",
+ cmd->iscsi_opcode, cmd->init_task_tag,
+ state, conn->cid);
+ spin_unlock_bh(&cmd->istate_lock);
+ goto transport_err;
+ }
+ if (ret < 0) {
+ conn->tx_response_queue = 0;
+ goto transport_err;
+ }
+
+ se_cmd = &cmd->se_cmd;
+
+ if (map_sg && !conn->conn_ops->IFMarker) {
+ if (iscsit_fe_sendpage_sg(cmd, conn) < 0) {
+ conn->tx_response_queue = 0;
+ iscsit_tx_thread_wait_for_tcp(conn);
+ iscsit_unmap_iovec(cmd);
+ goto transport_err;
+ }
+ } else {
+ if (iscsit_send_tx_data(cmd, conn, use_misc) < 0) {
+ conn->tx_response_queue = 0;
+ iscsit_tx_thread_wait_for_tcp(conn);
+ iscsit_unmap_iovec(cmd);
+ goto transport_err;
+ }
+ }
+ map_sg = 0;
+ iscsit_unmap_iovec(cmd);
+
+ spin_lock_bh(&cmd->istate_lock);
+ switch (state) {
+ case ISTATE_SEND_DATAIN:
+ if (!eodr)
+ goto check_rsp_state;
+
+ if (eodr == 1) {
+ cmd->i_state = ISTATE_SENT_LAST_DATAIN;
+ sent_status = 1;
+ eodr = use_misc = 0;
+ } else if (eodr == 2) {
+ cmd->i_state = state =
+ ISTATE_SEND_STATUS;
+ sent_status = 0;
+ eodr = use_misc = 0;
+ goto check_rsp_state;
+ }
+ break;
+ case ISTATE_SEND_STATUS:
+ use_misc = 0;
+ sent_status = 1;
+ break;
+ case ISTATE_SEND_ASYNCMSG:
+ case ISTATE_SEND_NOPIN:
+ case ISTATE_SEND_STATUS_RECOVERY:
+ case ISTATE_SEND_TEXTRSP:
+ use_misc = 0;
+ sent_status = 1;
+ break;
+ case ISTATE_SEND_REJECT:
+ use_misc = 0;
+ if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) {
+ cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN;
+ spin_unlock_bh(&cmd->istate_lock);
+ complete(&cmd->reject_comp);
+ goto transport_err;
+ }
+ complete(&cmd->reject_comp);
+ break;
+ case ISTATE_SEND_TASKMGTRSP:
+ use_misc = 0;
+ sent_status = 1;
+ break;
+ case ISTATE_SEND_LOGOUTRSP:
+ spin_unlock_bh(&cmd->istate_lock);
+ if (!iscsit_logout_post_handler(cmd, conn))
+ goto restart;
+ spin_lock_bh(&cmd->istate_lock);
+ use_misc = 0;
+ sent_status = 1;
+ break;
+ default:
+ pr_err("Unknown Opcode: 0x%02x ITT:"
+ " 0x%08x, i_state: %d on CID: %hu\n",
+ cmd->iscsi_opcode, cmd->init_task_tag,
+ cmd->i_state, conn->cid);
+ spin_unlock_bh(&cmd->istate_lock);
+ goto transport_err;
+ }
+
+ if (sent_status) {
+ cmd->i_state = ISTATE_SENT_STATUS;
+ sent_status = 0;
+ }
+ spin_unlock_bh(&cmd->istate_lock);
+
+ if (atomic_read(&conn->check_immediate_queue))
+ goto get_immediate;
+
+ goto get_response;
+ } else
+ conn->tx_response_queue = 0;
+ }
+
+transport_err:
+ iscsit_take_action_for_connection_exit(conn);
+ goto restart;
+out:
+ return 0;
+}
+
+int iscsi_target_rx_thread(void *arg)
+{
+ int ret;
+ u8 buffer[ISCSI_HDR_LEN], opcode;
+ u32 checksum = 0, digest = 0;
+ struct iscsi_conn *conn = NULL;
+ struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
+ struct kvec iov;
+ /*
+ * Allow ourselves to be interrupted by SIGINT so that a
+ * connection recovery / failure event can be triggered externally.
+ */
+ allow_signal(SIGINT);
+
+restart:
+ conn = iscsi_rx_thread_pre_handler(ts);
+ if (!conn)
+ goto out;
+
+ while (!kthread_should_stop()) {
+ /*
+ * Ensure that both TX and RX per connection kthreads
+ * are scheduled to run on the same CPU.
+ */
+ iscsit_thread_check_cpumask(conn, current, 0);
+
+ memset(buffer, 0, ISCSI_HDR_LEN);
+ memset(&iov, 0, sizeof(struct kvec));
+
+ iov.iov_base = buffer;
+ iov.iov_len = ISCSI_HDR_LEN;
+
+ ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
+ if (ret != ISCSI_HDR_LEN) {
+ iscsit_rx_thread_wait_for_tcp(conn);
+ goto transport_err;
+ }
+
+ /*
+ * Set conn->bad_hdr for use with REJECT PDUs.
+ */
+ memcpy(&conn->bad_hdr, &buffer, ISCSI_HDR_LEN);
+
+ if (conn->conn_ops->HeaderDigest) {
+ iov.iov_base = &digest;
+ iov.iov_len = ISCSI_CRC_LEN;
+
+ ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
+ if (ret != ISCSI_CRC_LEN) {
+ iscsit_rx_thread_wait_for_tcp(conn);
+ goto transport_err;
+ }
+
+ iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+ buffer, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)&checksum);
+
+ if (digest != checksum) {
+ pr_err("HeaderDigest CRC32C failed,"
+ " received 0x%08x, computed 0x%08x\n",
+ digest, checksum);
+ /*
+ * Set the PDU to 0xff so it will intentionally
+ * hit default in the switch below.
+ */
+ memset(buffer, 0xff, ISCSI_HDR_LEN);
+ spin_lock_bh(&conn->sess->session_stats_lock);
+ conn->sess->conn_digest_errors++;
+ spin_unlock_bh(&conn->sess->session_stats_lock);
+ } else {
+ pr_debug("Got HeaderDigest CRC32C"
+ " 0x%08x\n", checksum);
+ }
+ }
+
+ if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
+ goto transport_err;
+
+ opcode = buffer[0] & ISCSI_OPCODE_MASK;
+
+ if (conn->sess->sess_ops->SessionType &&
+ ((!(opcode & ISCSI_OP_TEXT)) ||
+ (!(opcode & ISCSI_OP_LOGOUT)))) {
+ pr_err("Received illegal iSCSI Opcode: 0x%02x"
+ " while in Discovery Session, rejecting.\n", opcode);
+ iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buffer, conn);
+ goto transport_err;
+ }
+
+ switch (opcode) {
+ case ISCSI_OP_SCSI_CMD:
+ if (iscsit_handle_scsi_cmd(conn, buffer) < 0)
+ goto transport_err;
+ break;
+ case ISCSI_OP_SCSI_DATA_OUT:
+ if (iscsit_handle_data_out(conn, buffer) < 0)
+ goto transport_err;
+ break;
+ case ISCSI_OP_NOOP_OUT:
+ if (iscsit_handle_nop_out(conn, buffer) < 0)
+ goto transport_err;
+ break;
+ case ISCSI_OP_SCSI_TMFUNC:
+ if (iscsit_handle_task_mgt_cmd(conn, buffer) < 0)
+ goto transport_err;
+ break;
+ case ISCSI_OP_TEXT:
+ if (iscsit_handle_text_cmd(conn, buffer) < 0)
+ goto transport_err;
+ break;
+ case ISCSI_OP_LOGOUT:
+ ret = iscsit_handle_logout_cmd(conn, buffer);
+ if (ret > 0) {
+ wait_for_completion_timeout(&conn->conn_logout_comp,
+ SECONDS_FOR_LOGOUT_COMP * HZ);
+ goto transport_err;
+ } else if (ret < 0)
+ goto transport_err;
+ break;
+ case ISCSI_OP_SNACK:
+ if (iscsit_handle_snack(conn, buffer) < 0)
+ goto transport_err;
+ break;
+ default:
+ pr_err("Got unknown iSCSI OpCode: 0x%02x\n",
+ opcode);
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Cannot recover from unknown"
+ " opcode while ERL=0, closing iSCSI connection"
+ ".\n");
+ goto transport_err;
+ }
+ if (!conn->conn_ops->OFMarker) {
+ pr_err("Unable to recover from unknown"
+ " opcode while OFMarker=No, closing iSCSI"
+ " connection.\n");
+ goto transport_err;
+ }
+ if (iscsit_recover_from_unknown_opcode(conn) < 0) {
+ pr_err("Unable to recover from unknown"
+ " opcode, closing iSCSI connection.\n");
+ goto transport_err;
+ }
+ break;
+ }
+ }
+
+transport_err:
+ if (!signal_pending(current))
+ atomic_set(&conn->transport_failed, 1);
+ iscsit_take_action_for_connection_exit(conn);
+ goto restart;
+out:
+ return 0;
+}
+
+static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
+{
+ struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
+ struct iscsi_session *sess = conn->sess;
+ struct se_cmd *se_cmd;
+ /*
+ * We expect this function to only ever be called from either RX or TX
+ * thread context via iscsit_close_connection() once the other context
+ * has been reset -> returned sleeping pre-handler state.
+ */
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) {
+
+ list_del(&cmd->i_list);
+ spin_unlock_bh(&conn->cmd_lock);
+ iscsit_increment_maxcmdsn(cmd, sess);
+ se_cmd = &cmd->se_cmd;
+ /*
+ * Special cases for active iSCSI TMR, and
+ * transport_lookup_cmd_lun() failing from
+ * iscsit_get_lun_for_cmd() in iscsit_handle_scsi_cmd().
+ */
+ if (cmd->tmr_req && se_cmd->transport_wait_for_tasks)
+ se_cmd->transport_wait_for_tasks(se_cmd, 1, 1);
+ else if (cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)
+ transport_release_cmd(se_cmd);
+ else
+ iscsit_release_cmd(cmd);
+
+ spin_lock_bh(&conn->cmd_lock);
+ continue;
+ }
+ list_del(&cmd->i_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ iscsit_increment_maxcmdsn(cmd, sess);
+ se_cmd = &cmd->se_cmd;
+
+ if (se_cmd->transport_wait_for_tasks)
+ se_cmd->transport_wait_for_tasks(se_cmd, 1, 1);
+
+ spin_lock_bh(&conn->cmd_lock);
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+}
+
+static void iscsit_stop_timers_for_cmds(
+ struct iscsi_conn *conn)
+{
+ struct iscsi_cmd *cmd;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ if (cmd->data_direction == DMA_TO_DEVICE)
+ iscsit_stop_dataout_timer(cmd);
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+}
+
+int iscsit_close_connection(
+ struct iscsi_conn *conn)
+{
+ int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT);
+ struct iscsi_session *sess = conn->sess;
+
+ pr_debug("Closing iSCSI connection CID %hu on SID:"
+ " %u\n", conn->cid, sess->sid);
+ /*
+ * Always up conn_logout_comp just in case the RX Thread is sleeping
+ * and the logout response never got sent because the connection
+ * failed.
+ */
+ complete(&conn->conn_logout_comp);
+
+ iscsi_release_thread_set(conn);
+
+ iscsit_stop_timers_for_cmds(conn);
+ iscsit_stop_nopin_response_timer(conn);
+ iscsit_stop_nopin_timer(conn);
+ iscsit_free_queue_reqs_for_conn(conn);
+
+ /*
+ * During Connection recovery drop unacknowledged out of order
+ * commands for this connection, and prepare the other commands
+ * for realligence.
+ *
+ * During normal operation clear the out of order commands (but
+ * do not free the struct iscsi_ooo_cmdsn's) and release all
+ * struct iscsi_cmds.
+ */
+ if (atomic_read(&conn->connection_recovery)) {
+ iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn);
+ iscsit_prepare_cmds_for_realligance(conn);
+ } else {
+ iscsit_clear_ooo_cmdsns_for_conn(conn);
+ iscsit_release_commands_from_conn(conn);
+ }
+
+ /*
+ * Handle decrementing session or connection usage count if
+ * a logout response was not able to be sent because the
+ * connection failed. Fall back to Session Recovery here.
+ */
+ if (atomic_read(&conn->conn_logout_remove)) {
+ if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
+ iscsit_dec_conn_usage_count(conn);
+ iscsit_dec_session_usage_count(sess);
+ }
+ if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION)
+ iscsit_dec_conn_usage_count(conn);
+
+ atomic_set(&conn->conn_logout_remove, 0);
+ atomic_set(&sess->session_reinstatement, 0);
+ atomic_set(&sess->session_fall_back_to_erl0, 1);
+ }
+
+ spin_lock_bh(&sess->conn_lock);
+ list_del(&conn->conn_list);
+
+ /*
+ * Attempt to let the Initiator know this connection failed by
+ * sending an Connection Dropped Async Message on another
+ * active connection.
+ */
+ if (atomic_read(&conn->connection_recovery))
+ iscsit_build_conn_drop_async_message(conn);
+
+ spin_unlock_bh(&sess->conn_lock);
+
+ /*
+ * If connection reinstatement is being performed on this connection,
+ * up the connection reinstatement semaphore that is being blocked on
+ * in iscsit_cause_connection_reinstatement().
+ */
+ spin_lock_bh(&conn->state_lock);
+ if (atomic_read(&conn->sleep_on_conn_wait_comp)) {
+ spin_unlock_bh(&conn->state_lock);
+ complete(&conn->conn_wait_comp);
+ wait_for_completion(&conn->conn_post_wait_comp);
+ spin_lock_bh(&conn->state_lock);
+ }
+
+ /*
+ * If connection reinstatement is being performed on this connection
+ * by receiving a REMOVECONNFORRECOVERY logout request, up the
+ * connection wait rcfr semaphore that is being blocked on
+ * an iscsit_connection_reinstatement_rcfr().
+ */
+ if (atomic_read(&conn->connection_wait_rcfr)) {
+ spin_unlock_bh(&conn->state_lock);
+ complete(&conn->conn_wait_rcfr_comp);
+ wait_for_completion(&conn->conn_post_wait_comp);
+ spin_lock_bh(&conn->state_lock);
+ }
+ atomic_set(&conn->connection_reinstatement, 1);
+ spin_unlock_bh(&conn->state_lock);
+
+ /*
+ * If any other processes are accessing this connection pointer we
+ * must wait until they have completed.
+ */
+ iscsit_check_conn_usage_count(conn);
+
+ if (conn->conn_rx_hash.tfm)
+ crypto_free_hash(conn->conn_rx_hash.tfm);
+ if (conn->conn_tx_hash.tfm)
+ crypto_free_hash(conn->conn_tx_hash.tfm);
+
+ if (conn->conn_cpumask)
+ free_cpumask_var(conn->conn_cpumask);
+
+ kfree(conn->conn_ops);
+ conn->conn_ops = NULL;
+
+ if (conn->sock) {
+ if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
+ kfree(conn->sock->file);
+ conn->sock->file = NULL;
+ }
+ sock_release(conn->sock);
+ }
+ conn->thread_set = NULL;
+
+ pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
+ conn->conn_state = TARG_CONN_STATE_FREE;
+ kfree(conn);
+
+ spin_lock_bh(&sess->conn_lock);
+ atomic_dec(&sess->nconn);
+ pr_debug("Decremented iSCSI connection count to %hu from node:"
+ " %s\n", atomic_read(&sess->nconn),
+ sess->sess_ops->InitiatorName);
+ /*
+ * Make sure that if one connection fails in an non ERL=2 iSCSI
+ * Session that they all fail.
+ */
+ if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout &&
+ !atomic_read(&sess->session_logout))
+ atomic_set(&sess->session_fall_back_to_erl0, 1);
+
+ /*
+ * If this was not the last connection in the session, and we are
+ * performing session reinstatement or falling back to ERL=0, call
+ * iscsit_stop_session() without sleeping to shutdown the other
+ * active connections.
+ */
+ if (atomic_read(&sess->nconn)) {
+ if (!atomic_read(&sess->session_reinstatement) &&
+ !atomic_read(&sess->session_fall_back_to_erl0)) {
+ spin_unlock_bh(&sess->conn_lock);
+ return 0;
+ }
+ if (!atomic_read(&sess->session_stop_active)) {
+ atomic_set(&sess->session_stop_active, 1);
+ spin_unlock_bh(&sess->conn_lock);
+ iscsit_stop_session(sess, 0, 0);
+ return 0;
+ }
+ spin_unlock_bh(&sess->conn_lock);
+ return 0;
+ }
+
+ /*
+ * If this was the last connection in the session and one of the
+ * following is occurring:
+ *
+ * Session Reinstatement is not being performed, and are falling back
+ * to ERL=0 call iscsit_close_session().
+ *
+ * Session Logout was requested. iscsit_close_session() will be called
+ * elsewhere.
+ *
+ * Session Continuation is not being performed, start the Time2Retain
+ * handler and check if sleep_on_sess_wait_sem is active.
+ */
+ if (!atomic_read(&sess->session_reinstatement) &&
+ atomic_read(&sess->session_fall_back_to_erl0)) {
+ spin_unlock_bh(&sess->conn_lock);
+ iscsit_close_session(sess);
+
+ return 0;
+ } else if (atomic_read(&sess->session_logout)) {
+ pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
+ sess->session_state = TARG_SESS_STATE_FREE;
+ spin_unlock_bh(&sess->conn_lock);
+
+ if (atomic_read(&sess->sleep_on_sess_wait_comp))
+ complete(&sess->session_wait_comp);
+
+ return 0;
+ } else {
+ pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
+ sess->session_state = TARG_SESS_STATE_FAILED;
+
+ if (!atomic_read(&sess->session_continuation)) {
+ spin_unlock_bh(&sess->conn_lock);
+ iscsit_start_time2retain_handler(sess);
+ } else
+ spin_unlock_bh(&sess->conn_lock);
+
+ if (atomic_read(&sess->sleep_on_sess_wait_comp))
+ complete(&sess->session_wait_comp);
+
+ return 0;
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ return 0;
+}
+
+int iscsit_close_session(struct iscsi_session *sess)
+{
+ struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+
+ if (atomic_read(&sess->nconn)) {
+ pr_err("%d connection(s) still exist for iSCSI session"
+ " to %s\n", atomic_read(&sess->nconn),
+ sess->sess_ops->InitiatorName);
+ BUG();
+ }
+
+ spin_lock_bh(&se_tpg->session_lock);
+ atomic_set(&sess->session_logout, 1);
+ atomic_set(&sess->session_reinstatement, 1);
+ iscsit_stop_time2retain_timer(sess);
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ /*
+ * transport_deregister_session_configfs() will clear the
+ * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
+ * can be setting it again with __transport_register_session() in
+ * iscsi_post_login_handler() again after the iscsit_stop_session()
+ * completes in iscsi_np context.
+ */
+ transport_deregister_session_configfs(sess->se_sess);
+
+ /*
+ * If any other processes are accessing this session pointer we must
+ * wait until they have completed. If we are in an interrupt (the
+ * time2retain handler) and contain and active session usage count we
+ * restart the timer and exit.
+ */
+ if (!in_interrupt()) {
+ if (iscsit_check_session_usage_count(sess) == 1)
+ iscsit_stop_session(sess, 1, 1);
+ } else {
+ if (iscsit_check_session_usage_count(sess) == 2) {
+ atomic_set(&sess->session_logout, 0);
+ iscsit_start_time2retain_handler(sess);
+ return 0;
+ }
+ }
+
+ transport_deregister_session(sess->se_sess);
+
+ if (sess->sess_ops->ErrorRecoveryLevel == 2)
+ iscsit_free_connection_recovery_entires(sess);
+
+ iscsit_free_all_ooo_cmdsns(sess);
+
+ spin_lock_bh(&se_tpg->session_lock);
+ pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
+ sess->session_state = TARG_SESS_STATE_FREE;
+ pr_debug("Released iSCSI session from node: %s\n",
+ sess->sess_ops->InitiatorName);
+ tpg->nsessions--;
+ if (tpg->tpg_tiqn)
+ tpg->tpg_tiqn->tiqn_nsessions--;
+
+ pr_debug("Decremented number of active iSCSI Sessions on"
+ " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
+
+ spin_lock(&sess_idr_lock);
+ idr_remove(&sess_idr, sess->session_index);
+ spin_unlock(&sess_idr_lock);
+
+ kfree(sess->sess_ops);
+ sess->sess_ops = NULL;
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ kfree(sess);
+ return 0;
+}
+
+static void iscsit_logout_post_handler_closesession(
+ struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+
+ iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
+ iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
+
+ atomic_set(&conn->conn_logout_remove, 0);
+ complete(&conn->conn_logout_comp);
+
+ iscsit_dec_conn_usage_count(conn);
+ iscsit_stop_session(sess, 1, 1);
+ iscsit_dec_session_usage_count(sess);
+ iscsit_close_session(sess);
+}
+
+static void iscsit_logout_post_handler_samecid(
+ struct iscsi_conn *conn)
+{
+ iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
+ iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
+
+ atomic_set(&conn->conn_logout_remove, 0);
+ complete(&conn->conn_logout_comp);
+
+ iscsit_cause_connection_reinstatement(conn, 1);
+ iscsit_dec_conn_usage_count(conn);
+}
+
+static void iscsit_logout_post_handler_diffcid(
+ struct iscsi_conn *conn,
+ u16 cid)
+{
+ struct iscsi_conn *l_conn;
+ struct iscsi_session *sess = conn->sess;
+
+ if (!sess)
+ return;
+
+ spin_lock_bh(&sess->conn_lock);
+ list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
+ if (l_conn->cid == cid) {
+ iscsit_inc_conn_usage_count(l_conn);
+ break;
+ }
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ if (!l_conn)
+ return;
+
+ if (l_conn->sock)
+ l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN);
+
+ spin_lock_bh(&l_conn->state_lock);
+ pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
+ l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
+ spin_unlock_bh(&l_conn->state_lock);
+
+ iscsit_cause_connection_reinstatement(l_conn, 1);
+ iscsit_dec_conn_usage_count(l_conn);
+}
+
+/*
+ * Return of 0 causes the TX thread to restart.
+ */
+static int iscsit_logout_post_handler(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ int ret = 0;
+
+ switch (cmd->logout_reason) {
+ case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
+ switch (cmd->logout_response) {
+ case ISCSI_LOGOUT_SUCCESS:
+ case ISCSI_LOGOUT_CLEANUP_FAILED:
+ default:
+ iscsit_logout_post_handler_closesession(conn);
+ break;
+ }
+ ret = 0;
+ break;
+ case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
+ if (conn->cid == cmd->logout_cid) {
+ switch (cmd->logout_response) {
+ case ISCSI_LOGOUT_SUCCESS:
+ case ISCSI_LOGOUT_CLEANUP_FAILED:
+ default:
+ iscsit_logout_post_handler_samecid(conn);
+ break;
+ }
+ ret = 0;
+ } else {
+ switch (cmd->logout_response) {
+ case ISCSI_LOGOUT_SUCCESS:
+ iscsit_logout_post_handler_diffcid(conn,
+ cmd->logout_cid);
+ break;
+ case ISCSI_LOGOUT_CID_NOT_FOUND:
+ case ISCSI_LOGOUT_CLEANUP_FAILED:
+ default:
+ break;
+ }
+ ret = 1;
+ }
+ break;
+ case ISCSI_LOGOUT_REASON_RECOVERY:
+ switch (cmd->logout_response) {
+ case ISCSI_LOGOUT_SUCCESS:
+ case ISCSI_LOGOUT_CID_NOT_FOUND:
+ case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED:
+ case ISCSI_LOGOUT_CLEANUP_FAILED:
+ default:
+ break;
+ }
+ ret = 1;
+ break;
+ default:
+ break;
+
+ }
+ return ret;
+}
+
+void iscsit_fail_session(struct iscsi_session *sess)
+{
+ struct iscsi_conn *conn;
+
+ spin_lock_bh(&sess->conn_lock);
+ list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+ pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
+ conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
+ sess->session_state = TARG_SESS_STATE_FAILED;
+}
+
+int iscsit_free_session(struct iscsi_session *sess)
+{
+ u16 conn_count = atomic_read(&sess->nconn);
+ struct iscsi_conn *conn, *conn_tmp = NULL;
+ int is_last;
+
+ spin_lock_bh(&sess->conn_lock);
+ atomic_set(&sess->sleep_on_sess_wait_comp, 1);
+
+ list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
+ conn_list) {
+ if (conn_count == 0)
+ break;
+
+ if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
+ is_last = 1;
+ } else {
+ iscsit_inc_conn_usage_count(conn_tmp);
+ is_last = 0;
+ }
+ iscsit_inc_conn_usage_count(conn);
+
+ spin_unlock_bh(&sess->conn_lock);
+ iscsit_cause_connection_reinstatement(conn, 1);
+ spin_lock_bh(&sess->conn_lock);
+
+ iscsit_dec_conn_usage_count(conn);
+ if (is_last == 0)
+ iscsit_dec_conn_usage_count(conn_tmp);
+
+ conn_count--;
+ }
+
+ if (atomic_read(&sess->nconn)) {
+ spin_unlock_bh(&sess->conn_lock);
+ wait_for_completion(&sess->session_wait_comp);
+ } else
+ spin_unlock_bh(&sess->conn_lock);
+
+ iscsit_close_session(sess);
+ return 0;
+}
+
+void iscsit_stop_session(
+ struct iscsi_session *sess,
+ int session_sleep,
+ int connection_sleep)
+{
+ u16 conn_count = atomic_read(&sess->nconn);
+ struct iscsi_conn *conn, *conn_tmp = NULL;
+ int is_last;
+
+ spin_lock_bh(&sess->conn_lock);
+ if (session_sleep)
+ atomic_set(&sess->sleep_on_sess_wait_comp, 1);
+
+ if (connection_sleep) {
+ list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
+ conn_list) {
+ if (conn_count == 0)
+ break;
+
+ if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
+ is_last = 1;
+ } else {
+ iscsit_inc_conn_usage_count(conn_tmp);
+ is_last = 0;
+ }
+ iscsit_inc_conn_usage_count(conn);
+
+ spin_unlock_bh(&sess->conn_lock);
+ iscsit_cause_connection_reinstatement(conn, 1);
+ spin_lock_bh(&sess->conn_lock);
+
+ iscsit_dec_conn_usage_count(conn);
+ if (is_last == 0)
+ iscsit_dec_conn_usage_count(conn_tmp);
+ conn_count--;
+ }
+ } else {
+ list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
+ iscsit_cause_connection_reinstatement(conn, 0);
+ }
+
+ if (session_sleep && atomic_read(&sess->nconn)) {
+ spin_unlock_bh(&sess->conn_lock);
+ wait_for_completion(&sess->session_wait_comp);
+ } else
+ spin_unlock_bh(&sess->conn_lock);
+}
+
+int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
+{
+ struct iscsi_session *sess;
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+ struct se_session *se_sess, *se_sess_tmp;
+ int session_count = 0;
+
+ spin_lock_bh(&se_tpg->session_lock);
+ if (tpg->nsessions && !force) {
+ spin_unlock_bh(&se_tpg->session_lock);
+ return -1;
+ }
+
+ list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
+ sess_list) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+
+ spin_lock(&sess->conn_lock);
+ if (atomic_read(&sess->session_fall_back_to_erl0) ||
+ atomic_read(&sess->session_logout) ||
+ (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+ spin_unlock(&sess->conn_lock);
+ continue;
+ }
+ atomic_set(&sess->session_reinstatement, 1);
+ spin_unlock(&sess->conn_lock);
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ iscsit_free_session(sess);
+ spin_lock_bh(&se_tpg->session_lock);
+
+ session_count++;
+ }
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ pr_debug("Released %d iSCSI Session(s) from Target Portal"
+ " Group: %hu\n", session_count, tpg->tpgt);
+ return 0;
+}
+
+MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure");
+MODULE_VERSION("4.1.x");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(iscsi_target_init_module);
+module_exit(iscsi_target_cleanup_module);
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
new file mode 100644
index 0000000..5db2dde
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -0,0 +1,42 @@
+#ifndef ISCSI_TARGET_H
+#define ISCSI_TARGET_H
+
+extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *);
+extern struct iscsi_tiqn *iscsit_get_tiqn(unsigned char *, int);
+extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *);
+extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *);
+extern void iscsit_del_tiqn(struct iscsi_tiqn *);
+extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *);
+extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *);
+extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
+ char *, int);
+extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
+ struct iscsi_portal_group *);
+extern int iscsit_del_np(struct iscsi_np *);
+extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *, struct iscsi_cmd *);
+extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8);
+extern int iscsit_send_r2t(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, int);
+extern void iscsit_thread_get_cpumask(struct iscsi_conn *);
+extern int iscsi_target_tx_thread(void *);
+extern int iscsi_target_rx_thread(void *);
+extern int iscsit_close_connection(struct iscsi_conn *);
+extern int iscsit_close_session(struct iscsi_session *);
+extern void iscsit_fail_session(struct iscsi_session *);
+extern int iscsit_free_session(struct iscsi_session *);
+extern void iscsit_stop_session(struct iscsi_session *, int, int);
+extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
+
+extern struct iscsit_global *iscsit_global;
+extern struct target_fabric_configfs *lio_target_fabric_configfs;
+
+extern struct kmem_cache *lio_dr_cache;
+extern struct kmem_cache *lio_ooo_cache;
+extern struct kmem_cache *lio_cmd_cache;
+extern struct kmem_cache *lio_qr_cache;
+extern struct kmem_cache *lio_r2t_cache;
+
+#endif /*** ISCSI_TARGET_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
new file mode 100644
index 0000000..11fd743
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -0,0 +1,490 @@
+/*******************************************************************************
+ * This file houses the main functions for the iSCSI CHAP support
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/scatterlist.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_nego.h"
+#include "iscsi_target_auth.h"
+
+static unsigned char chap_asciihex_to_binaryhex(unsigned char val[2])
+{
+ unsigned char result = 0;
+ /*
+ * MSB
+ */
+ if ((val[0] >= 'a') && (val[0] <= 'f'))
+ result = ((val[0] - 'a' + 10) & 0xf) << 4;
+ else
+ if ((val[0] >= 'A') && (val[0] <= 'F'))
+ result = ((val[0] - 'A' + 10) & 0xf) << 4;
+ else /* digit */
+ result = ((val[0] - '0') & 0xf) << 4;
+ /*
+ * LSB
+ */
+ if ((val[1] >= 'a') && (val[1] <= 'f'))
+ result |= ((val[1] - 'a' + 10) & 0xf);
+ else
+ if ((val[1] >= 'A') && (val[1] <= 'F'))
+ result |= ((val[1] - 'A' + 10) & 0xf);
+ else /* digit */
+ result |= ((val[1] - '0') & 0xf);
+
+ return result;
+}
+
+static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
+{
+ int i, j = 0;
+
+ for (i = 0; i < len; i += 2) {
+ dst[j++] = (unsigned char) chap_asciihex_to_binaryhex(&src[i]);
+ }
+
+ dst[j] = '\0';
+ return j;
+}
+
+static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
+{
+ int i;
+
+ for (i = 0; i < src_len; i++) {
+ sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
+ }
+}
+
+static void chap_set_random(char *data, int length)
+{
+ long r;
+ unsigned n;
+
+ while (length > 0) {
+ get_random_bytes(&r, sizeof(long));
+ r = r ^ (r >> 8);
+ r = r ^ (r >> 4);
+ n = r & 0x7;
+
+ get_random_bytes(&r, sizeof(long));
+ r = r ^ (r >> 8);
+ r = r ^ (r >> 5);
+ n = (n << 3) | (r & 0x7);
+
+ get_random_bytes(&r, sizeof(long));
+ r = r ^ (r >> 8);
+ r = r ^ (r >> 5);
+ n = (n << 2) | (r & 0x3);
+
+ *data++ = n;
+ length--;
+ }
+}
+
+static void chap_gen_challenge(
+ struct iscsi_conn *conn,
+ int caller,
+ char *c_str,
+ unsigned int *c_len)
+{
+ unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
+ struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+
+ memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
+
+ chap_set_random(chap->challenge, CHAP_CHALLENGE_LENGTH);
+ chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
+ CHAP_CHALLENGE_LENGTH);
+ /*
+ * Set CHAP_C, and copy the generated challenge into c_str.
+ */
+ *c_len += sprintf(c_str + *c_len, "CHAP_C=0x%s", challenge_asciihex);
+ *c_len += 1;
+
+ pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client",
+ challenge_asciihex);
+}
+
+
+static struct iscsi_chap *chap_server_open(
+ struct iscsi_conn *conn,
+ struct iscsi_node_auth *auth,
+ const char *a_str,
+ char *aic_str,
+ unsigned int *aic_len)
+{
+ struct iscsi_chap *chap;
+
+ if (!(auth->naf_flags & NAF_USERID_SET) ||
+ !(auth->naf_flags & NAF_PASSWORD_SET)) {
+ pr_err("CHAP user or password not set for"
+ " Initiator ACL\n");
+ return NULL;
+ }
+
+ conn->auth_protocol = kzalloc(sizeof(struct iscsi_chap), GFP_KERNEL);
+ if (!conn->auth_protocol)
+ return NULL;
+
+ chap = (struct iscsi_chap *) conn->auth_protocol;
+ /*
+ * We only support MD5 MDA presently.
+ */
+ if (strncmp(a_str, "CHAP_A=5", 8)) {
+ pr_err("CHAP_A is not MD5.\n");
+ return NULL;
+ }
+ pr_debug("[server] Got CHAP_A=5\n");
+ /*
+ * Send back CHAP_A set to MD5.
+ */
+ *aic_len = sprintf(aic_str, "CHAP_A=5");
+ *aic_len += 1;
+ chap->digest_type = CHAP_DIGEST_MD5;
+ pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
+ /*
+ * Set Identifier.
+ */
+ chap->id = ISCSI_TPG_C(conn)->tpg_chap_id++;
+ *aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id);
+ *aic_len += 1;
+ pr_debug("[server] Sending CHAP_I=%d\n", chap->id);
+ /*
+ * Generate Challenge.
+ */
+ chap_gen_challenge(conn, 1, aic_str, aic_len);
+
+ return chap;
+}
+
+static void chap_close(struct iscsi_conn *conn)
+{
+ kfree(conn->auth_protocol);
+ conn->auth_protocol = NULL;
+}
+
+static int chap_server_compute_md5(
+ struct iscsi_conn *conn,
+ struct iscsi_node_auth *auth,
+ char *nr_in_ptr,
+ char *nr_out_ptr,
+ unsigned int *nr_out_len)
+{
+ char *endptr;
+ unsigned char id, digest[MD5_SIGNATURE_SIZE];
+ unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
+ unsigned char identifier[10], *challenge = NULL;
+ unsigned char *challenge_binhex = NULL;
+ unsigned char client_digest[MD5_SIGNATURE_SIZE];
+ unsigned char server_digest[MD5_SIGNATURE_SIZE];
+ unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
+ struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+ struct crypto_hash *tfm;
+ struct hash_desc desc;
+ struct scatterlist sg;
+ int auth_ret = -1, ret, challenge_len;
+
+ memset(identifier, 0, 10);
+ memset(chap_n, 0, MAX_CHAP_N_SIZE);
+ memset(chap_r, 0, MAX_RESPONSE_LENGTH);
+ memset(digest, 0, MD5_SIGNATURE_SIZE);
+ memset(response, 0, MD5_SIGNATURE_SIZE * 2 + 2);
+ memset(client_digest, 0, MD5_SIGNATURE_SIZE);
+ memset(server_digest, 0, MD5_SIGNATURE_SIZE);
+
+ challenge = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
+ if (!challenge) {
+ pr_err("Unable to allocate challenge buffer\n");
+ goto out;
+ }
+
+ challenge_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
+ if (!challenge_binhex) {
+ pr_err("Unable to allocate challenge_binhex buffer\n");
+ goto out;
+ }
+ /*
+ * Extract CHAP_N.
+ */
+ if (extract_param(nr_in_ptr, "CHAP_N", MAX_CHAP_N_SIZE, chap_n,
+ &type) < 0) {
+ pr_err("Could not find CHAP_N.\n");
+ goto out;
+ }
+ if (type == HEX) {
+ pr_err("Could not find CHAP_N.\n");
+ goto out;
+ }
+
+ if (memcmp(chap_n, auth->userid, strlen(auth->userid)) != 0) {
+ pr_err("CHAP_N values do not match!\n");
+ goto out;
+ }
+ pr_debug("[server] Got CHAP_N=%s\n", chap_n);
+ /*
+ * Extract CHAP_R.
+ */
+ if (extract_param(nr_in_ptr, "CHAP_R", MAX_RESPONSE_LENGTH, chap_r,
+ &type) < 0) {
+ pr_err("Could not find CHAP_R.\n");
+ goto out;
+ }
+ if (type != HEX) {
+ pr_err("Could not find CHAP_R.\n");
+ goto out;
+ }
+
+ pr_debug("[server] Got CHAP_R=%s\n", chap_r);
+ chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
+
+ tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ pr_err("Unable to allocate struct crypto_hash\n");
+ goto out;
+ }
+ desc.tfm = tfm;
+ desc.flags = 0;
+
+ ret = crypto_hash_init(&desc);
+ if (ret < 0) {
+ pr_err("crypto_hash_init() failed\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ sg_init_one(&sg, (void *)&chap->id, 1);
+ ret = crypto_hash_update(&desc, &sg, 1);
+ if (ret < 0) {
+ pr_err("crypto_hash_update() failed for id\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ sg_init_one(&sg, (void *)&auth->password, strlen(auth->password));
+ ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
+ if (ret < 0) {
+ pr_err("crypto_hash_update() failed for password\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ sg_init_one(&sg, (void *)chap->challenge, CHAP_CHALLENGE_LENGTH);
+ ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
+ if (ret < 0) {
+ pr_err("crypto_hash_update() failed for challenge\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ ret = crypto_hash_final(&desc, server_digest);
+ if (ret < 0) {
+ pr_err("crypto_hash_final() failed for server digest\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+ crypto_free_hash(tfm);
+
+ chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
+ pr_debug("[server] MD5 Server Digest: %s\n", response);
+
+ if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
+ pr_debug("[server] MD5 Digests do not match!\n\n");
+ goto out;
+ } else
+ pr_debug("[server] MD5 Digests match, CHAP connetication"
+ " successful.\n\n");
+ /*
+ * One way authentication has succeeded, return now if mutual
+ * authentication is not enabled.
+ */
+ if (!auth->authenticate_target) {
+ kfree(challenge);
+ kfree(challenge_binhex);
+ return 0;
+ }
+ /*
+ * Get CHAP_I.
+ */
+ if (extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type) < 0) {
+ pr_err("Could not find CHAP_I.\n");
+ goto out;
+ }
+
+ if (type == HEX)
+ id = (unsigned char)simple_strtoul((char *)&identifier[2],
+ &endptr, 0);
+ else
+ id = (unsigned char)simple_strtoul(identifier, &endptr, 0);
+ /*
+ * RFC 1994 says Identifier is no more than octet (8 bits).
+ */
+ pr_debug("[server] Got CHAP_I=%d\n", id);
+ /*
+ * Get CHAP_C.
+ */
+ if (extract_param(nr_in_ptr, "CHAP_C", CHAP_CHALLENGE_STR_LEN,
+ challenge, &type) < 0) {
+ pr_err("Could not find CHAP_C.\n");
+ goto out;
+ }
+
+ if (type != HEX) {
+ pr_err("Could not find CHAP_C.\n");
+ goto out;
+ }
+ pr_debug("[server] Got CHAP_C=%s\n", challenge);
+ challenge_len = chap_string_to_hex(challenge_binhex, challenge,
+ strlen(challenge));
+ if (!challenge_len) {
+ pr_err("Unable to convert incoming challenge\n");
+ goto out;
+ }
+ /*
+ * Generate CHAP_N and CHAP_R for mutual authentication.
+ */
+ tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ pr_err("Unable to allocate struct crypto_hash\n");
+ goto out;
+ }
+ desc.tfm = tfm;
+ desc.flags = 0;
+
+ ret = crypto_hash_init(&desc);
+ if (ret < 0) {
+ pr_err("crypto_hash_init() failed\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ sg_init_one(&sg, (void *)&id, 1);
+ ret = crypto_hash_update(&desc, &sg, 1);
+ if (ret < 0) {
+ pr_err("crypto_hash_update() failed for id\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ sg_init_one(&sg, (void *)auth->password_mutual,
+ strlen(auth->password_mutual));
+ ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
+ if (ret < 0) {
+ pr_err("crypto_hash_update() failed for"
+ " password_mutual\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+ /*
+ * Convert received challenge to binary hex.
+ */
+ sg_init_one(&sg, (void *)challenge_binhex, challenge_len);
+ ret = crypto_hash_update(&desc, &sg, challenge_len);
+ if (ret < 0) {
+ pr_err("crypto_hash_update() failed for ma challenge\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ ret = crypto_hash_final(&desc, digest);
+ if (ret < 0) {
+ pr_err("crypto_hash_final() failed for ma digest\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+ crypto_free_hash(tfm);
+ /*
+ * Generate CHAP_N and CHAP_R.
+ */
+ *nr_out_len = sprintf(nr_out_ptr, "CHAP_N=%s", auth->userid_mutual);
+ *nr_out_len += 1;
+ pr_debug("[server] Sending CHAP_N=%s\n", auth->userid_mutual);
+ /*
+ * Convert response from binary hex to ascii hext.
+ */
+ chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
+ *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
+ response);
+ *nr_out_len += 1;
+ pr_debug("[server] Sending CHAP_R=0x%s\n", response);
+ auth_ret = 0;
+out:
+ kfree(challenge);
+ kfree(challenge_binhex);
+ return auth_ret;
+}
+
+static int chap_got_response(
+ struct iscsi_conn *conn,
+ struct iscsi_node_auth *auth,
+ char *nr_in_ptr,
+ char *nr_out_ptr,
+ unsigned int *nr_out_len)
+{
+ struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+
+ switch (chap->digest_type) {
+ case CHAP_DIGEST_MD5:
+ if (chap_server_compute_md5(conn, auth, nr_in_ptr,
+ nr_out_ptr, nr_out_len) < 0)
+ return -1;
+ return 0;
+ default:
+ pr_err("Unknown CHAP digest type %d!\n",
+ chap->digest_type);
+ return -1;
+ }
+}
+
+u32 chap_main_loop(
+ struct iscsi_conn *conn,
+ struct iscsi_node_auth *auth,
+ char *in_text,
+ char *out_text,
+ int *in_len,
+ int *out_len)
+{
+ struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+
+ if (!chap) {
+ chap = chap_server_open(conn, auth, in_text, out_text, out_len);
+ if (!chap)
+ return 2;
+ chap->chap_state = CHAP_STAGE_SERVER_AIC;
+ return 0;
+ } else if (chap->chap_state == CHAP_STAGE_SERVER_AIC) {
+ convert_null_to_semi(in_text, *in_len);
+ if (chap_got_response(conn, auth, in_text, out_text,
+ out_len) < 0) {
+ chap_close(conn);
+ return 2;
+ }
+ if (auth->authenticate_target)
+ chap->chap_state = CHAP_STAGE_SERVER_NR;
+ else
+ *out_len = 0;
+ chap_close(conn);
+ return 1;
+ }
+
+ return 2;
+}
diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h
new file mode 100644
index 0000000..2f463c0
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_auth.h
@@ -0,0 +1,31 @@
+#ifndef _ISCSI_CHAP_H_
+#define _ISCSI_CHAP_H_
+
+#define CHAP_DIGEST_MD5 5
+#define CHAP_DIGEST_SHA 6
+
+#define CHAP_CHALLENGE_LENGTH 16
+#define CHAP_CHALLENGE_STR_LEN 4096
+#define MAX_RESPONSE_LENGTH 64 /* sufficient for MD5 */
+#define MAX_CHAP_N_SIZE 512
+
+#define MD5_SIGNATURE_SIZE 16 /* 16 bytes in a MD5 message digest */
+
+#define CHAP_STAGE_CLIENT_A 1
+#define CHAP_STAGE_SERVER_AIC 2
+#define CHAP_STAGE_CLIENT_NR 3
+#define CHAP_STAGE_CLIENT_NRIC 4
+#define CHAP_STAGE_SERVER_NR 5
+
+extern u32 chap_main_loop(struct iscsi_conn *, struct iscsi_node_auth *, char *, char *,
+ int *, int *);
+
+struct iscsi_chap {
+ unsigned char digest_type;
+ unsigned char id;
+ unsigned char challenge[CHAP_CHALLENGE_LENGTH];
+ unsigned int authenticate_target;
+ unsigned int chap_state;
+} ____cacheline_aligned;
+
+#endif /*** _ISCSI_CHAP_H_ ***/
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
new file mode 100644
index 0000000..f1643db
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -0,0 +1,1882 @@
+/*******************************************************************************
+ * This file contains the configfs implementation for iSCSI Target mode
+ * from the LIO-Target Project.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/configfs.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_fabric_lib.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_nodeattrib.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_stat.h"
+#include "iscsi_target_configfs.h"
+
+struct target_fabric_configfs *lio_target_fabric_configfs;
+
+struct lio_target_configfs_attribute {
+ struct configfs_attribute attr;
+ ssize_t (*show)(void *, char *);
+ ssize_t (*store)(void *, const char *, size_t);
+};
+
+struct iscsi_portal_group *lio_get_tpg_from_tpg_item(
+ struct config_item *item,
+ struct iscsi_tiqn **tiqn_out)
+{
+ struct se_portal_group *se_tpg = container_of(to_config_group(item),
+ struct se_portal_group, tpg_group);
+ struct iscsi_portal_group *tpg =
+ (struct iscsi_portal_group *)se_tpg->se_tpg_fabric_ptr;
+ int ret;
+
+ if (!tpg) {
+ pr_err("Unable to locate struct iscsi_portal_group "
+ "pointer\n");
+ return NULL;
+ }
+ ret = iscsit_get_tpg(tpg);
+ if (ret < 0)
+ return NULL;
+
+ *tiqn_out = tpg->tpg_tiqn;
+ return tpg;
+}
+
+/* Start items for lio_target_portal_cit */
+
+static ssize_t lio_target_np_show_sctp(
+ struct se_tpg_np *se_tpg_np,
+ char *page)
+{
+ struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
+ struct iscsi_tpg_np, se_tpg_np);
+ struct iscsi_tpg_np *tpg_np_sctp;
+ ssize_t rb;
+
+ tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
+ if (tpg_np_sctp)
+ rb = sprintf(page, "1\n");
+ else
+ rb = sprintf(page, "0\n");
+
+ return rb;
+}
+
+static ssize_t lio_target_np_store_sctp(
+ struct se_tpg_np *se_tpg_np,
+ const char *page,
+ size_t count)
+{
+ struct iscsi_np *np;
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
+ struct iscsi_tpg_np, se_tpg_np);
+ struct iscsi_tpg_np *tpg_np_sctp = NULL;
+ char *endptr;
+ u32 op;
+ int ret;
+
+ op = simple_strtoul(page, &endptr, 0);
+ if ((op != 1) && (op != 0)) {
+ pr_err("Illegal value for tpg_enable: %u\n", op);
+ return -EINVAL;
+ }
+ np = tpg_np->tpg_np;
+ if (!np) {
+ pr_err("Unable to locate struct iscsi_np from"
+ " struct iscsi_tpg_np\n");
+ return -EINVAL;
+ }
+
+ tpg = tpg_np->tpg;
+ if (iscsit_get_tpg(tpg) < 0)
+ return -EINVAL;
+
+ if (op) {
+ /*
+ * Use existing np->np_sockaddr for SCTP network portal reference
+ */
+ tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
+ np->np_ip, tpg_np, ISCSI_SCTP_TCP);
+ if (!tpg_np_sctp || IS_ERR(tpg_np_sctp))
+ goto out;
+ } else {
+ tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
+ if (!tpg_np_sctp)
+ goto out;
+
+ ret = iscsit_tpg_del_network_portal(tpg, tpg_np_sctp);
+ if (ret < 0)
+ goto out;
+ }
+
+ iscsit_put_tpg(tpg);
+ return count;
+out:
+ iscsit_put_tpg(tpg);
+ return -EINVAL;
+}
+
+TF_NP_BASE_ATTR(lio_target, sctp, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_portal_attrs[] = {
+ &lio_target_np_sctp.attr,
+ NULL,
+};
+
+/* Stop items for lio_target_portal_cit */
+
+/* Start items for lio_target_np_cit */
+
+#define MAX_PORTAL_LEN 256
+
+struct se_tpg_np *lio_target_call_addnptotpg(
+ struct se_portal_group *se_tpg,
+ struct config_group *group,
+ const char *name)
+{
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np;
+ char *str, *str2, *ip_str, *port_str;
+ struct __kernel_sockaddr_storage sockaddr;
+ struct sockaddr_in *sock_in;
+ struct sockaddr_in6 *sock_in6;
+ unsigned long port;
+ int ret;
+ char buf[MAX_PORTAL_LEN + 1];
+
+ if (strlen(name) > MAX_PORTAL_LEN) {
+ pr_err("strlen(name): %d exceeds MAX_PORTAL_LEN: %d\n",
+ (int)strlen(name), MAX_PORTAL_LEN);
+ return ERR_PTR(-EOVERFLOW);
+ }
+ memset(buf, 0, MAX_PORTAL_LEN + 1);
+ snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name);
+
+ memset(&sockaddr, 0, sizeof(struct __kernel_sockaddr_storage));
+
+ str = strstr(buf, "[");
+ if (str) {
+ const char *end;
+
+ str2 = strstr(str, "]");
+ if (!str2) {
+ pr_err("Unable to locate trailing \"]\""
+ " in IPv6 iSCSI network portal address\n");
+ return ERR_PTR(-EINVAL);
+ }
+ str++; /* Skip over leading "[" */
+ *str2 = '\0'; /* Terminate the IPv6 address */
+ str2++; /* Skip over the "]" */
+ port_str = strstr(str2, ":");
+ if (!port_str) {
+ pr_err("Unable to locate \":port\""
+ " in IPv6 iSCSI network portal address\n");
+ return ERR_PTR(-EINVAL);
+ }
+ *port_str = '\0'; /* Terminate string for IP */
+ port_str++; /* Skip over ":" */
+
+ ret = strict_strtoul(port_str, 0, &port);
+ if (ret < 0) {
+ pr_err("strict_strtoul() failed for port_str: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+ sock_in6 = (struct sockaddr_in6 *)&sockaddr;
+ sock_in6->sin6_family = AF_INET6;
+ sock_in6->sin6_port = htons((unsigned short)port);
+ ret = in6_pton(str, IPV6_ADDRESS_SPACE,
+ (void *)&sock_in6->sin6_addr.in6_u, -1, &end);
+ if (ret <= 0) {
+ pr_err("in6_pton returned: %d\n", ret);
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ str = ip_str = &buf[0];
+ port_str = strstr(ip_str, ":");
+ if (!port_str) {
+ pr_err("Unable to locate \":port\""
+ " in IPv4 iSCSI network portal address\n");
+ return ERR_PTR(-EINVAL);
+ }
+ *port_str = '\0'; /* Terminate string for IP */
+ port_str++; /* Skip over ":" */
+
+ ret = strict_strtoul(port_str, 0, &port);
+ if (ret < 0) {
+ pr_err("strict_strtoul() failed for port_str: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+ sock_in = (struct sockaddr_in *)&sockaddr;
+ sock_in->sin_family = AF_INET;
+ sock_in->sin_port = htons((unsigned short)port);
+ sock_in->sin_addr.s_addr = in_aton(ip_str);
+ }
+ tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+ ret = iscsit_get_tpg(tpg);
+ if (ret < 0)
+ return ERR_PTR(-EINVAL);
+
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> %s TPGT: %hu"
+ " PORTAL: %s\n",
+ config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
+ tpg->tpgt, name);
+ /*
+ * Assume ISCSI_TCP by default. Other network portals for other
+ * iSCSI fabrics:
+ *
+ * Traditional iSCSI over SCTP (initial support)
+ * iSER/TCP (TODO, hardware available)
+ * iSER/SCTP (TODO, software emulation with osc-iwarp)
+ * iSER/IB (TODO, hardware available)
+ *
+ * can be enabled with atributes under
+ * sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/
+ *
+ */
+ tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, str, NULL,
+ ISCSI_TCP);
+ if (IS_ERR(tpg_np)) {
+ iscsit_put_tpg(tpg);
+ return ERR_CAST(tpg_np);
+ }
+ pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n");
+
+ iscsit_put_tpg(tpg);
+ return &tpg_np->se_tpg_np;
+}
+
+static void lio_target_call_delnpfromtpg(
+ struct se_tpg_np *se_tpg_np)
+{
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np;
+ struct se_portal_group *se_tpg;
+ int ret;
+
+ tpg_np = container_of(se_tpg_np, struct iscsi_tpg_np, se_tpg_np);
+ tpg = tpg_np->tpg;
+ ret = iscsit_get_tpg(tpg);
+ if (ret < 0)
+ return;
+
+ se_tpg = &tpg->tpg_se_tpg;
+ pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s TPGT: %hu"
+ " PORTAL: %s:%hu\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
+ tpg->tpgt, tpg_np->tpg_np->np_ip, tpg_np->tpg_np->np_port);
+
+ ret = iscsit_tpg_del_network_portal(tpg, tpg_np);
+ if (ret < 0)
+ goto out;
+
+ pr_debug("LIO_Target_ConfigFS: delnpfromtpg done!\n");
+out:
+ iscsit_put_tpg(tpg);
+}
+
+/* End items for lio_target_np_cit */
+
+/* Start items for lio_target_nacl_attrib_cit */
+
+#define DEF_NACL_ATTRIB(name) \
+static ssize_t iscsi_nacl_attrib_show_##name( \
+ struct se_node_acl *se_nacl, \
+ char *page) \
+{ \
+ struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
+ se_node_acl); \
+ \
+ return sprintf(page, "%u\n", ISCSI_NODE_ATTRIB(nacl)->name); \
+} \
+ \
+static ssize_t iscsi_nacl_attrib_store_##name( \
+ struct se_node_acl *se_nacl, \
+ const char *page, \
+ size_t count) \
+{ \
+ struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
+ se_node_acl); \
+ char *endptr; \
+ u32 val; \
+ int ret; \
+ \
+ val = simple_strtoul(page, &endptr, 0); \
+ ret = iscsit_na_##name(nacl, val); \
+ if (ret < 0) \
+ return ret; \
+ \
+ return count; \
+}
+
+#define NACL_ATTR(_name, _mode) TF_NACL_ATTRIB_ATTR(iscsi, _name, _mode);
+/*
+ * Define iscsi_node_attrib_s_dataout_timeout
+ */
+DEF_NACL_ATTRIB(dataout_timeout);
+NACL_ATTR(dataout_timeout, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_dataout_timeout_retries
+ */
+DEF_NACL_ATTRIB(dataout_timeout_retries);
+NACL_ATTR(dataout_timeout_retries, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_default_erl
+ */
+DEF_NACL_ATTRIB(default_erl);
+NACL_ATTR(default_erl, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_nopin_timeout
+ */
+DEF_NACL_ATTRIB(nopin_timeout);
+NACL_ATTR(nopin_timeout, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_nopin_response_timeout
+ */
+DEF_NACL_ATTRIB(nopin_response_timeout);
+NACL_ATTR(nopin_response_timeout, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_random_datain_pdu_offsets
+ */
+DEF_NACL_ATTRIB(random_datain_pdu_offsets);
+NACL_ATTR(random_datain_pdu_offsets, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_random_datain_seq_offsets
+ */
+DEF_NACL_ATTRIB(random_datain_seq_offsets);
+NACL_ATTR(random_datain_seq_offsets, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_random_r2t_offsets
+ */
+DEF_NACL_ATTRIB(random_r2t_offsets);
+NACL_ATTR(random_r2t_offsets, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_nacl_attrib_attrs[] = {
+ &iscsi_nacl_attrib_dataout_timeout.attr,
+ &iscsi_nacl_attrib_dataout_timeout_retries.attr,
+ &iscsi_nacl_attrib_default_erl.attr,
+ &iscsi_nacl_attrib_nopin_timeout.attr,
+ &iscsi_nacl_attrib_nopin_response_timeout.attr,
+ &iscsi_nacl_attrib_random_datain_pdu_offsets.attr,
+ &iscsi_nacl_attrib_random_datain_seq_offsets.attr,
+ &iscsi_nacl_attrib_random_r2t_offsets.attr,
+ NULL,
+};
+
+/* End items for lio_target_nacl_attrib_cit */
+
+/* Start items for lio_target_nacl_auth_cit */
+
+#define __DEF_NACL_AUTH_STR(prefix, name, flags) \
+static ssize_t __iscsi_##prefix##_show_##name( \
+ struct iscsi_node_acl *nacl, \
+ char *page) \
+{ \
+ struct iscsi_node_auth *auth = &nacl->node_auth; \
+ \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ return snprintf(page, PAGE_SIZE, "%s\n", auth->name); \
+} \
+ \
+static ssize_t __iscsi_##prefix##_store_##name( \
+ struct iscsi_node_acl *nacl, \
+ const char *page, \
+ size_t count) \
+{ \
+ struct iscsi_node_auth *auth = &nacl->node_auth; \
+ \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ \
+ snprintf(auth->name, PAGE_SIZE, "%s", page); \
+ if (!strncmp("NULL", auth->name, 4)) \
+ auth->naf_flags &= ~flags; \
+ else \
+ auth->naf_flags |= flags; \
+ \
+ if ((auth->naf_flags & NAF_USERID_IN_SET) && \
+ (auth->naf_flags & NAF_PASSWORD_IN_SET)) \
+ auth->authenticate_target = 1; \
+ else \
+ auth->authenticate_target = 0; \
+ \
+ return count; \
+}
+
+#define __DEF_NACL_AUTH_INT(prefix, name) \
+static ssize_t __iscsi_##prefix##_show_##name( \
+ struct iscsi_node_acl *nacl, \
+ char *page) \
+{ \
+ struct iscsi_node_auth *auth = &nacl->node_auth; \
+ \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ \
+ return snprintf(page, PAGE_SIZE, "%d\n", auth->name); \
+}
+
+#define DEF_NACL_AUTH_STR(name, flags) \
+ __DEF_NACL_AUTH_STR(nacl_auth, name, flags) \
+static ssize_t iscsi_nacl_auth_show_##name( \
+ struct se_node_acl *nacl, \
+ char *page) \
+{ \
+ return __iscsi_nacl_auth_show_##name(container_of(nacl, \
+ struct iscsi_node_acl, se_node_acl), page); \
+} \
+static ssize_t iscsi_nacl_auth_store_##name( \
+ struct se_node_acl *nacl, \
+ const char *page, \
+ size_t count) \
+{ \
+ return __iscsi_nacl_auth_store_##name(container_of(nacl, \
+ struct iscsi_node_acl, se_node_acl), page, count); \
+}
+
+#define DEF_NACL_AUTH_INT(name) \
+ __DEF_NACL_AUTH_INT(nacl_auth, name) \
+static ssize_t iscsi_nacl_auth_show_##name( \
+ struct se_node_acl *nacl, \
+ char *page) \
+{ \
+ return __iscsi_nacl_auth_show_##name(container_of(nacl, \
+ struct iscsi_node_acl, se_node_acl), page); \
+}
+
+#define AUTH_ATTR(_name, _mode) TF_NACL_AUTH_ATTR(iscsi, _name, _mode);
+#define AUTH_ATTR_RO(_name) TF_NACL_AUTH_ATTR_RO(iscsi, _name);
+
+/*
+ * One-way authentication userid
+ */
+DEF_NACL_AUTH_STR(userid, NAF_USERID_SET);
+AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
+/*
+ * One-way authentication password
+ */
+DEF_NACL_AUTH_STR(password, NAF_PASSWORD_SET);
+AUTH_ATTR(password, S_IRUGO | S_IWUSR);
+/*
+ * Enforce mutual authentication
+ */
+DEF_NACL_AUTH_INT(authenticate_target);
+AUTH_ATTR_RO(authenticate_target);
+/*
+ * Mutual authentication userid
+ */
+DEF_NACL_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
+AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
+/*
+ * Mutual authentication password
+ */
+DEF_NACL_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
+AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_nacl_auth_attrs[] = {
+ &iscsi_nacl_auth_userid.attr,
+ &iscsi_nacl_auth_password.attr,
+ &iscsi_nacl_auth_authenticate_target.attr,
+ &iscsi_nacl_auth_userid_mutual.attr,
+ &iscsi_nacl_auth_password_mutual.attr,
+ NULL,
+};
+
+/* End items for lio_target_nacl_auth_cit */
+
+/* Start items for lio_target_nacl_param_cit */
+
+#define DEF_NACL_PARAM(name) \
+static ssize_t iscsi_nacl_param_show_##name( \
+ struct se_node_acl *se_nacl, \
+ char *page) \
+{ \
+ struct iscsi_session *sess; \
+ struct se_session *se_sess; \
+ ssize_t rb; \
+ \
+ spin_lock_bh(&se_nacl->nacl_sess_lock); \
+ se_sess = se_nacl->nacl_sess; \
+ if (!se_sess) { \
+ rb = snprintf(page, PAGE_SIZE, \
+ "No Active iSCSI Session\n"); \
+ } else { \
+ sess = se_sess->fabric_sess_ptr; \
+ rb = snprintf(page, PAGE_SIZE, "%u\n", \
+ (u32)sess->sess_ops->name); \
+ } \
+ spin_unlock_bh(&se_nacl->nacl_sess_lock); \
+ \
+ return rb; \
+}
+
+#define NACL_PARAM_ATTR(_name) TF_NACL_PARAM_ATTR_RO(iscsi, _name);
+
+DEF_NACL_PARAM(MaxConnections);
+NACL_PARAM_ATTR(MaxConnections);
+
+DEF_NACL_PARAM(InitialR2T);
+NACL_PARAM_ATTR(InitialR2T);
+
+DEF_NACL_PARAM(ImmediateData);
+NACL_PARAM_ATTR(ImmediateData);
+
+DEF_NACL_PARAM(MaxBurstLength);
+NACL_PARAM_ATTR(MaxBurstLength);
+
+DEF_NACL_PARAM(FirstBurstLength);
+NACL_PARAM_ATTR(FirstBurstLength);
+
+DEF_NACL_PARAM(DefaultTime2Wait);
+NACL_PARAM_ATTR(DefaultTime2Wait);
+
+DEF_NACL_PARAM(DefaultTime2Retain);
+NACL_PARAM_ATTR(DefaultTime2Retain);
+
+DEF_NACL_PARAM(MaxOutstandingR2T);
+NACL_PARAM_ATTR(MaxOutstandingR2T);
+
+DEF_NACL_PARAM(DataPDUInOrder);
+NACL_PARAM_ATTR(DataPDUInOrder);
+
+DEF_NACL_PARAM(DataSequenceInOrder);
+NACL_PARAM_ATTR(DataSequenceInOrder);
+
+DEF_NACL_PARAM(ErrorRecoveryLevel);
+NACL_PARAM_ATTR(ErrorRecoveryLevel);
+
+static struct configfs_attribute *lio_target_nacl_param_attrs[] = {
+ &iscsi_nacl_param_MaxConnections.attr,
+ &iscsi_nacl_param_InitialR2T.attr,
+ &iscsi_nacl_param_ImmediateData.attr,
+ &iscsi_nacl_param_MaxBurstLength.attr,
+ &iscsi_nacl_param_FirstBurstLength.attr,
+ &iscsi_nacl_param_DefaultTime2Wait.attr,
+ &iscsi_nacl_param_DefaultTime2Retain.attr,
+ &iscsi_nacl_param_MaxOutstandingR2T.attr,
+ &iscsi_nacl_param_DataPDUInOrder.attr,
+ &iscsi_nacl_param_DataSequenceInOrder.attr,
+ &iscsi_nacl_param_ErrorRecoveryLevel.attr,
+ NULL,
+};
+
+/* End items for lio_target_nacl_param_cit */
+
+/* Start items for lio_target_acl_cit */
+
+static ssize_t lio_target_nacl_show_info(
+ struct se_node_acl *se_nacl,
+ char *page)
+{
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+ struct se_session *se_sess;
+ ssize_t rb = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (!se_sess) {
+ rb += sprintf(page+rb, "No active iSCSI Session for Initiator"
+ " Endpoint: %s\n", se_nacl->initiatorname);
+ } else {
+ sess = se_sess->fabric_sess_ptr;
+
+ if (sess->sess_ops->InitiatorName)
+ rb += sprintf(page+rb, "InitiatorName: %s\n",
+ sess->sess_ops->InitiatorName);
+ if (sess->sess_ops->InitiatorAlias)
+ rb += sprintf(page+rb, "InitiatorAlias: %s\n",
+ sess->sess_ops->InitiatorAlias);
+
+ rb += sprintf(page+rb, "LIO Session ID: %u "
+ "ISID: 0x%02x %02x %02x %02x %02x %02x "
+ "TSIH: %hu ", sess->sid,
+ sess->isid[0], sess->isid[1], sess->isid[2],
+ sess->isid[3], sess->isid[4], sess->isid[5],
+ sess->tsih);
+ rb += sprintf(page+rb, "SessionType: %s\n",
+ (sess->sess_ops->SessionType) ?
+ "Discovery" : "Normal");
+ rb += sprintf(page+rb, "Session State: ");
+ switch (sess->session_state) {
+ case TARG_SESS_STATE_FREE:
+ rb += sprintf(page+rb, "TARG_SESS_FREE\n");
+ break;
+ case TARG_SESS_STATE_ACTIVE:
+ rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n");
+ break;
+ case TARG_SESS_STATE_LOGGED_IN:
+ rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n");
+ break;
+ case TARG_SESS_STATE_FAILED:
+ rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n");
+ break;
+ case TARG_SESS_STATE_IN_CONTINUE:
+ rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n");
+ break;
+ default:
+ rb += sprintf(page+rb, "ERROR: Unknown Session"
+ " State!\n");
+ break;
+ }
+
+ rb += sprintf(page+rb, "---------------------[iSCSI Session"
+ " Values]-----------------------\n");
+ rb += sprintf(page+rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN"
+ " : MaxCmdSN : ITT : TTT\n");
+ rb += sprintf(page+rb, " 0x%08x 0x%08x 0x%08x 0x%08x"
+ " 0x%08x 0x%08x\n",
+ sess->cmdsn_window,
+ (sess->max_cmd_sn - sess->exp_cmd_sn) + 1,
+ sess->exp_cmd_sn, sess->max_cmd_sn,
+ sess->init_task_tag, sess->targ_xfer_tag);
+ rb += sprintf(page+rb, "----------------------[iSCSI"
+ " Connections]-------------------------\n");
+
+ spin_lock(&sess->conn_lock);
+ list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+ rb += sprintf(page+rb, "CID: %hu Connection"
+ " State: ", conn->cid);
+ switch (conn->conn_state) {
+ case TARG_CONN_STATE_FREE:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_FREE\n");
+ break;
+ case TARG_CONN_STATE_XPT_UP:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_XPT_UP\n");
+ break;
+ case TARG_CONN_STATE_IN_LOGIN:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_IN_LOGIN\n");
+ break;
+ case TARG_CONN_STATE_LOGGED_IN:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_LOGGED_IN\n");
+ break;
+ case TARG_CONN_STATE_IN_LOGOUT:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_IN_LOGOUT\n");
+ break;
+ case TARG_CONN_STATE_LOGOUT_REQUESTED:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_LOGOUT_REQUESTED\n");
+ break;
+ case TARG_CONN_STATE_CLEANUP_WAIT:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_CLEANUP_WAIT\n");
+ break;
+ default:
+ rb += sprintf(page+rb,
+ "ERROR: Unknown Connection State!\n");
+ break;
+ }
+
+ rb += sprintf(page+rb, " Address %s %s", conn->login_ip,
+ (conn->network_transport == ISCSI_TCP) ?
+ "TCP" : "SCTP");
+ rb += sprintf(page+rb, " StatSN: 0x%08x\n",
+ conn->stat_sn);
+ }
+ spin_unlock(&sess->conn_lock);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return rb;
+}
+
+TF_NACL_BASE_ATTR_RO(lio_target, info);
+
+static ssize_t lio_target_nacl_show_cmdsn_depth(
+ struct se_node_acl *se_nacl,
+ char *page)
+{
+ return sprintf(page, "%u\n", se_nacl->queue_depth);
+}
+
+static ssize_t lio_target_nacl_store_cmdsn_depth(
+ struct se_node_acl *se_nacl,
+ const char *page,
+ size_t count)
+{
+ struct se_portal_group *se_tpg = se_nacl->se_tpg;
+ struct iscsi_portal_group *tpg = container_of(se_tpg,
+ struct iscsi_portal_group, tpg_se_tpg);
+ struct config_item *acl_ci, *tpg_ci, *wwn_ci;
+ char *endptr;
+ u32 cmdsn_depth = 0;
+ int ret;
+
+ cmdsn_depth = simple_strtoul(page, &endptr, 0);
+ if (cmdsn_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
+ pr_err("Passed cmdsn_depth: %u exceeds"
+ " TA_DEFAULT_CMDSN_DEPTH_MAX: %u\n", cmdsn_depth,
+ TA_DEFAULT_CMDSN_DEPTH_MAX);
+ return -EINVAL;
+ }
+ acl_ci = &se_nacl->acl_group.cg_item;
+ if (!acl_ci) {
+ pr_err("Unable to locatel acl_ci\n");
+ return -EINVAL;
+ }
+ tpg_ci = &acl_ci->ci_parent->ci_group->cg_item;
+ if (!tpg_ci) {
+ pr_err("Unable to locate tpg_ci\n");
+ return -EINVAL;
+ }
+ wwn_ci = &tpg_ci->ci_group->cg_item;
+ if (!wwn_ci) {
+ pr_err("Unable to locate config_item wwn_ci\n");
+ return -EINVAL;
+ }
+
+ if (iscsit_get_tpg(tpg) < 0)
+ return -EINVAL;
+ /*
+ * iscsit_tpg_set_initiator_node_queue_depth() assumes force=1
+ */
+ ret = iscsit_tpg_set_initiator_node_queue_depth(tpg,
+ config_item_name(acl_ci), cmdsn_depth, 1);
+
+ pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for"
+ "InitiatorName: %s\n", config_item_name(wwn_ci),
+ config_item_name(tpg_ci), cmdsn_depth,
+ config_item_name(acl_ci));
+
+ iscsit_put_tpg(tpg);
+ return (!ret) ? count : (ssize_t)ret;
+}
+
+TF_NACL_BASE_ATTR(lio_target, cmdsn_depth, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_initiator_attrs[] = {
+ &lio_target_nacl_info.attr,
+ &lio_target_nacl_cmdsn_depth.attr,
+ NULL,
+};
+
+static struct se_node_acl *lio_tpg_alloc_fabric_acl(
+ struct se_portal_group *se_tpg)
+{
+ struct iscsi_node_acl *acl;
+
+ acl = kzalloc(sizeof(struct iscsi_node_acl), GFP_KERNEL);
+ if (!acl) {
+ pr_err("Unable to allocate memory for struct iscsi_node_acl\n");
+ return NULL;
+ }
+
+ return &acl->se_node_acl;
+}
+
+static struct se_node_acl *lio_target_make_nodeacl(
+ struct se_portal_group *se_tpg,
+ struct config_group *group,
+ const char *name)
+{
+ struct config_group *stats_cg;
+ struct iscsi_node_acl *acl;
+ struct se_node_acl *se_nacl_new, *se_nacl;
+ struct iscsi_portal_group *tpg = container_of(se_tpg,
+ struct iscsi_portal_group, tpg_se_tpg);
+ u32 cmdsn_depth;
+
+ se_nacl_new = lio_tpg_alloc_fabric_acl(se_tpg);
+ if (!se_nacl_new)
+ return ERR_PTR(-ENOMEM);
+
+ acl = container_of(se_nacl_new, struct iscsi_node_acl,
+ se_node_acl);
+
+ cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
+ /*
+ * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+ * when converting a NdoeACL from demo mode -> explict
+ */
+ se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+ name, cmdsn_depth);
+ if (IS_ERR(se_nacl))
+ return se_nacl;
+
+ stats_cg = &acl->se_node_acl.acl_fabric_stat_group;
+
+ stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+ if (!stats_cg->default_groups) {
+ pr_err("Unable to allocate memory for"
+ " stats_cg->default_groups\n");
+ core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
+ kfree(acl);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ stats_cg->default_groups[0] = &NODE_STAT_GRPS(acl)->iscsi_sess_stats_group;
+ stats_cg->default_groups[1] = NULL;
+ config_group_init_type_name(&NODE_STAT_GRPS(acl)->iscsi_sess_stats_group,
+ "iscsi_sess_stats", &iscsi_stat_sess_cit);
+
+ return se_nacl;
+}
+
+static void lio_target_drop_nodeacl(
+ struct se_node_acl *se_nacl)
+{
+ struct se_portal_group *se_tpg = se_nacl->se_tpg;
+ struct iscsi_node_acl *acl = container_of(se_nacl,
+ struct iscsi_node_acl, se_node_acl);
+ struct config_item *df_item;
+ struct config_group *stats_cg;
+ int i;
+
+ stats_cg = &acl->se_node_acl.acl_fabric_stat_group;
+ for (i = 0; stats_cg->default_groups[i]; i++) {
+ df_item = &stats_cg->default_groups[i]->cg_item;
+ stats_cg->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+ kfree(stats_cg->default_groups);
+
+ core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
+ kfree(acl);
+}
+
+/* End items for lio_target_acl_cit */
+
+/* Start items for lio_target_tpg_attrib_cit */
+
+#define DEF_TPG_ATTRIB(name) \
+ \
+static ssize_t iscsi_tpg_attrib_show_##name( \
+ struct se_portal_group *se_tpg, \
+ char *page) \
+{ \
+ struct iscsi_portal_group *tpg = container_of(se_tpg, \
+ struct iscsi_portal_group, tpg_se_tpg); \
+ ssize_t rb; \
+ \
+ if (iscsit_get_tpg(tpg) < 0) \
+ return -EINVAL; \
+ \
+ rb = sprintf(page, "%u\n", ISCSI_TPG_ATTRIB(tpg)->name); \
+ iscsit_put_tpg(tpg); \
+ return rb; \
+} \
+ \
+static ssize_t iscsi_tpg_attrib_store_##name( \
+ struct se_portal_group *se_tpg, \
+ const char *page, \
+ size_t count) \
+{ \
+ struct iscsi_portal_group *tpg = container_of(se_tpg, \
+ struct iscsi_portal_group, tpg_se_tpg); \
+ char *endptr; \
+ u32 val; \
+ int ret; \
+ \
+ if (iscsit_get_tpg(tpg) < 0) \
+ return -EINVAL; \
+ \
+ val = simple_strtoul(page, &endptr, 0); \
+ ret = iscsit_ta_##name(tpg, val); \
+ if (ret < 0) \
+ goto out; \
+ \
+ iscsit_put_tpg(tpg); \
+ return count; \
+out: \
+ iscsit_put_tpg(tpg); \
+ return ret; \
+}
+
+#define TPG_ATTR(_name, _mode) TF_TPG_ATTRIB_ATTR(iscsi, _name, _mode);
+
+/*
+ * Define iscsi_tpg_attrib_s_authentication
+ */
+DEF_TPG_ATTRIB(authentication);
+TPG_ATTR(authentication, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_login_timeout
+ */
+DEF_TPG_ATTRIB(login_timeout);
+TPG_ATTR(login_timeout, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_netif_timeout
+ */
+DEF_TPG_ATTRIB(netif_timeout);
+TPG_ATTR(netif_timeout, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_generate_node_acls
+ */
+DEF_TPG_ATTRIB(generate_node_acls);
+TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_default_cmdsn_depth
+ */
+DEF_TPG_ATTRIB(default_cmdsn_depth);
+TPG_ATTR(default_cmdsn_depth, S_IRUGO | S_IWUSR);
+/*
+ Define iscsi_tpg_attrib_s_cache_dynamic_acls
+ */
+DEF_TPG_ATTRIB(cache_dynamic_acls);
+TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_demo_mode_write_protect
+ */
+DEF_TPG_ATTRIB(demo_mode_write_protect);
+TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_prod_mode_write_protect
+ */
+DEF_TPG_ATTRIB(prod_mode_write_protect);
+TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
+ &iscsi_tpg_attrib_authentication.attr,
+ &iscsi_tpg_attrib_login_timeout.attr,
+ &iscsi_tpg_attrib_netif_timeout.attr,
+ &iscsi_tpg_attrib_generate_node_acls.attr,
+ &iscsi_tpg_attrib_default_cmdsn_depth.attr,
+ &iscsi_tpg_attrib_cache_dynamic_acls.attr,
+ &iscsi_tpg_attrib_demo_mode_write_protect.attr,
+ &iscsi_tpg_attrib_prod_mode_write_protect.attr,
+ NULL,
+};
+
+/* End items for lio_target_tpg_attrib_cit */
+
+/* Start items for lio_target_tpg_param_cit */
+
+#define DEF_TPG_PARAM(name) \
+static ssize_t iscsi_tpg_param_show_##name( \
+ struct se_portal_group *se_tpg, \
+ char *page) \
+{ \
+ struct iscsi_portal_group *tpg = container_of(se_tpg, \
+ struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_param *param; \
+ ssize_t rb; \
+ \
+ if (iscsit_get_tpg(tpg) < 0) \
+ return -EINVAL; \
+ \
+ param = iscsi_find_param_from_key(__stringify(name), \
+ tpg->param_list); \
+ if (!param) { \
+ iscsit_put_tpg(tpg); \
+ return -EINVAL; \
+ } \
+ rb = snprintf(page, PAGE_SIZE, "%s\n", param->value); \
+ \
+ iscsit_put_tpg(tpg); \
+ return rb; \
+} \
+static ssize_t iscsi_tpg_param_store_##name( \
+ struct se_portal_group *se_tpg, \
+ const char *page, \
+ size_t count) \
+{ \
+ struct iscsi_portal_group *tpg = container_of(se_tpg, \
+ struct iscsi_portal_group, tpg_se_tpg); \
+ char *buf; \
+ int ret; \
+ \
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL); \
+ if (!buf) \
+ return -ENOMEM; \
+ snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page); \
+ buf[strlen(buf)-1] = '\0'; /* Kill newline */ \
+ \
+ if (iscsit_get_tpg(tpg) < 0) { \
+ kfree(buf); \
+ return -EINVAL; \
+ } \
+ \
+ ret = iscsi_change_param_value(buf, tpg->param_list, 1); \
+ if (ret < 0) \
+ goto out; \
+ \
+ kfree(buf); \
+ iscsit_put_tpg(tpg); \
+ return count; \
+out: \
+ kfree(buf); \
+ iscsit_put_tpg(tpg); \
+ return -EINVAL; \
+}
+
+#define TPG_PARAM_ATTR(_name, _mode) TF_TPG_PARAM_ATTR(iscsi, _name, _mode);
+
+DEF_TPG_PARAM(AuthMethod);
+TPG_PARAM_ATTR(AuthMethod, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(HeaderDigest);
+TPG_PARAM_ATTR(HeaderDigest, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(DataDigest);
+TPG_PARAM_ATTR(DataDigest, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(MaxConnections);
+TPG_PARAM_ATTR(MaxConnections, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(TargetAlias);
+TPG_PARAM_ATTR(TargetAlias, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(InitialR2T);
+TPG_PARAM_ATTR(InitialR2T, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(ImmediateData);
+TPG_PARAM_ATTR(ImmediateData, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(MaxRecvDataSegmentLength);
+TPG_PARAM_ATTR(MaxRecvDataSegmentLength, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(MaxBurstLength);
+TPG_PARAM_ATTR(MaxBurstLength, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(FirstBurstLength);
+TPG_PARAM_ATTR(FirstBurstLength, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(DefaultTime2Wait);
+TPG_PARAM_ATTR(DefaultTime2Wait, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(DefaultTime2Retain);
+TPG_PARAM_ATTR(DefaultTime2Retain, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(MaxOutstandingR2T);
+TPG_PARAM_ATTR(MaxOutstandingR2T, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(DataPDUInOrder);
+TPG_PARAM_ATTR(DataPDUInOrder, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(DataSequenceInOrder);
+TPG_PARAM_ATTR(DataSequenceInOrder, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(ErrorRecoveryLevel);
+TPG_PARAM_ATTR(ErrorRecoveryLevel, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(IFMarker);
+TPG_PARAM_ATTR(IFMarker, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(OFMarker);
+TPG_PARAM_ATTR(OFMarker, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(IFMarkInt);
+TPG_PARAM_ATTR(IFMarkInt, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(OFMarkInt);
+TPG_PARAM_ATTR(OFMarkInt, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_tpg_param_attrs[] = {
+ &iscsi_tpg_param_AuthMethod.attr,
+ &iscsi_tpg_param_HeaderDigest.attr,
+ &iscsi_tpg_param_DataDigest.attr,
+ &iscsi_tpg_param_MaxConnections.attr,
+ &iscsi_tpg_param_TargetAlias.attr,
+ &iscsi_tpg_param_InitialR2T.attr,
+ &iscsi_tpg_param_ImmediateData.attr,
+ &iscsi_tpg_param_MaxRecvDataSegmentLength.attr,
+ &iscsi_tpg_param_MaxBurstLength.attr,
+ &iscsi_tpg_param_FirstBurstLength.attr,
+ &iscsi_tpg_param_DefaultTime2Wait.attr,
+ &iscsi_tpg_param_DefaultTime2Retain.attr,
+ &iscsi_tpg_param_MaxOutstandingR2T.attr,
+ &iscsi_tpg_param_DataPDUInOrder.attr,
+ &iscsi_tpg_param_DataSequenceInOrder.attr,
+ &iscsi_tpg_param_ErrorRecoveryLevel.attr,
+ &iscsi_tpg_param_IFMarker.attr,
+ &iscsi_tpg_param_OFMarker.attr,
+ &iscsi_tpg_param_IFMarkInt.attr,
+ &iscsi_tpg_param_OFMarkInt.attr,
+ NULL,
+};
+
+/* End items for lio_target_tpg_param_cit */
+
+/* Start items for lio_target_tpg_cit */
+
+static ssize_t lio_target_tpg_show_enable(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct iscsi_portal_group *tpg = container_of(se_tpg,
+ struct iscsi_portal_group, tpg_se_tpg);
+ ssize_t len;
+
+ spin_lock(&tpg->tpg_state_lock);
+ len = sprintf(page, "%d\n",
+ (tpg->tpg_state == TPG_STATE_ACTIVE) ? 1 : 0);
+ spin_unlock(&tpg->tpg_state_lock);
+
+ return len;
+}
+
+static ssize_t lio_target_tpg_store_enable(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct iscsi_portal_group *tpg = container_of(se_tpg,
+ struct iscsi_portal_group, tpg_se_tpg);
+ char *endptr;
+ u32 op;
+ int ret = 0;
+
+ op = simple_strtoul(page, &endptr, 0);
+ if ((op != 1) && (op != 0)) {
+ pr_err("Illegal value for tpg_enable: %u\n", op);
+ return -EINVAL;
+ }
+
+ ret = iscsit_get_tpg(tpg);
+ if (ret < 0)
+ return -EINVAL;
+
+ if (op) {
+ ret = iscsit_tpg_enable_portal_group(tpg);
+ if (ret < 0)
+ goto out;
+ } else {
+ /*
+ * iscsit_tpg_disable_portal_group() assumes force=1
+ */
+ ret = iscsit_tpg_disable_portal_group(tpg, 1);
+ if (ret < 0)
+ goto out;
+ }
+
+ iscsit_put_tpg(tpg);
+ return count;
+out:
+ iscsit_put_tpg(tpg);
+ return -EINVAL;
+}
+
+TF_TPG_BASE_ATTR(lio_target, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_tpg_attrs[] = {
+ &lio_target_tpg_enable.attr,
+ NULL,
+};
+
+/* End items for lio_target_tpg_cit */
+
+/* Start items for lio_target_tiqn_cit */
+
+struct se_portal_group *lio_target_tiqn_addtpg(
+ struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tiqn *tiqn;
+ char *tpgt_str, *end_ptr;
+ int ret = 0;
+ unsigned short int tpgt;
+
+ tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
+ /*
+ * Only tpgt_# directory groups can be created below
+ * target/iscsi/iqn.superturodiskarry/
+ */
+ tpgt_str = strstr(name, "tpgt_");
+ if (!tpgt_str) {
+ pr_err("Unable to locate \"tpgt_#\" directory"
+ " group\n");
+ return NULL;
+ }
+ tpgt_str += 5; /* Skip ahead of "tpgt_" */
+ tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
+
+ tpg = iscsit_alloc_portal_group(tiqn, tpgt);
+ if (!tpg)
+ return NULL;
+
+ ret = core_tpg_register(
+ &lio_target_fabric_configfs->tf_ops,
+ wwn, &tpg->tpg_se_tpg, (void *)tpg,
+ TRANSPORT_TPG_TYPE_NORMAL);
+ if (ret < 0)
+ return NULL;
+
+ ret = iscsit_tpg_add_portal_group(tiqn, tpg);
+ if (ret != 0)
+ goto out;
+
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated TPG: %s\n",
+ name);
+ return &tpg->tpg_se_tpg;
+out:
+ core_tpg_deregister(&tpg->tpg_se_tpg);
+ kfree(tpg);
+ return NULL;
+}
+
+void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tiqn *tiqn;
+
+ tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+ tiqn = tpg->tpg_tiqn;
+ /*
+ * iscsit_tpg_del_portal_group() assumes force=1
+ */
+ pr_debug("LIO_Target_ConfigFS: DEREGISTER -> Releasing TPG\n");
+ iscsit_tpg_del_portal_group(tiqn, tpg, 1);
+}
+
+/* End items for lio_target_tiqn_cit */
+
+/* Start LIO-Target TIQN struct contig_item lio_target_cit */
+
+static ssize_t lio_target_wwn_show_attr_lio_version(
+ struct target_fabric_configfs *tf,
+ char *page)
+{
+ return sprintf(page, "RisingTide Systems Linux-iSCSI Target "ISCSIT_VERSION"\n");
+}
+
+TF_WWN_ATTR_RO(lio_target, lio_version);
+
+static struct configfs_attribute *lio_target_wwn_attrs[] = {
+ &lio_target_wwn_lio_version.attr,
+ NULL,
+};
+
+struct se_wwn *lio_target_call_coreaddtiqn(
+ struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct config_group *stats_cg;
+ struct iscsi_tiqn *tiqn;
+
+ tiqn = iscsit_add_tiqn((unsigned char *)name);
+ if (IS_ERR(tiqn))
+ return ERR_CAST(tiqn);
+ /*
+ * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group.
+ */
+ stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
+
+ stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
+ GFP_KERNEL);
+ if (!stats_cg->default_groups) {
+ pr_err("Unable to allocate memory for"
+ " stats_cg->default_groups\n");
+ iscsit_del_tiqn(tiqn);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ stats_cg->default_groups[0] = &WWN_STAT_GRPS(tiqn)->iscsi_instance_group;
+ stats_cg->default_groups[1] = &WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group;
+ stats_cg->default_groups[2] = &WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group;
+ stats_cg->default_groups[3] = &WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group;
+ stats_cg->default_groups[4] = &WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group;
+ stats_cg->default_groups[5] = NULL;
+ config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_instance_group,
+ "iscsi_instance", &iscsi_stat_instance_cit);
+ config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group,
+ "iscsi_sess_err", &iscsi_stat_sess_err_cit);
+ config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group,
+ "iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit);
+ config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group,
+ "iscsi_login_stats", &iscsi_stat_login_cit);
+ config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group,
+ "iscsi_logout_stats", &iscsi_stat_logout_cit);
+
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
+ " %s\n", name);
+ return &tiqn->tiqn_wwn;
+}
+
+void lio_target_call_coredeltiqn(
+ struct se_wwn *wwn)
+{
+ struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
+ struct config_item *df_item;
+ struct config_group *stats_cg;
+ int i;
+
+ stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
+ for (i = 0; stats_cg->default_groups[i]; i++) {
+ df_item = &stats_cg->default_groups[i]->cg_item;
+ stats_cg->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+ kfree(stats_cg->default_groups);
+
+ pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n",
+ tiqn->tiqn);
+ iscsit_del_tiqn(tiqn);
+}
+
+/* End LIO-Target TIQN struct contig_lio_target_cit */
+
+/* Start lio_target_discovery_auth_cit */
+
+#define DEF_DISC_AUTH_STR(name, flags) \
+ __DEF_NACL_AUTH_STR(disc, name, flags) \
+static ssize_t iscsi_disc_show_##name( \
+ struct target_fabric_configfs *tf, \
+ char *page) \
+{ \
+ return __iscsi_disc_show_##name(&iscsit_global->discovery_acl, \
+ page); \
+} \
+static ssize_t iscsi_disc_store_##name( \
+ struct target_fabric_configfs *tf, \
+ const char *page, \
+ size_t count) \
+{ \
+ return __iscsi_disc_store_##name(&iscsit_global->discovery_acl, \
+ page, count); \
+}
+
+#define DEF_DISC_AUTH_INT(name) \
+ __DEF_NACL_AUTH_INT(disc, name) \
+static ssize_t iscsi_disc_show_##name( \
+ struct target_fabric_configfs *tf, \
+ char *page) \
+{ \
+ return __iscsi_disc_show_##name(&iscsit_global->discovery_acl, \
+ page); \
+}
+
+#define DISC_AUTH_ATTR(_name, _mode) TF_DISC_ATTR(iscsi, _name, _mode)
+#define DISC_AUTH_ATTR_RO(_name) TF_DISC_ATTR_RO(iscsi, _name)
+
+/*
+ * One-way authentication userid
+ */
+DEF_DISC_AUTH_STR(userid, NAF_USERID_SET);
+DISC_AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
+/*
+ * One-way authentication password
+ */
+DEF_DISC_AUTH_STR(password, NAF_PASSWORD_SET);
+DISC_AUTH_ATTR(password, S_IRUGO | S_IWUSR);
+/*
+ * Enforce mutual authentication
+ */
+DEF_DISC_AUTH_INT(authenticate_target);
+DISC_AUTH_ATTR_RO(authenticate_target);
+/*
+ * Mutual authentication userid
+ */
+DEF_DISC_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
+DISC_AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
+/*
+ * Mutual authentication password
+ */
+DEF_DISC_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
+DISC_AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
+
+/*
+ * enforce_discovery_auth
+ */
+static ssize_t iscsi_disc_show_enforce_discovery_auth(
+ struct target_fabric_configfs *tf,
+ char *page)
+{
+ struct iscsi_node_auth *discovery_auth = &iscsit_global->discovery_acl.node_auth;
+
+ return sprintf(page, "%d\n", discovery_auth->enforce_discovery_auth);
+}
+
+static ssize_t iscsi_disc_store_enforce_discovery_auth(
+ struct target_fabric_configfs *tf,
+ const char *page,
+ size_t count)
+{
+ struct iscsi_param *param;
+ struct iscsi_portal_group *discovery_tpg = iscsit_global->discovery_tpg;
+ char *endptr;
+ u32 op;
+
+ op = simple_strtoul(page, &endptr, 0);
+ if ((op != 1) && (op != 0)) {
+ pr_err("Illegal value for enforce_discovery_auth:"
+ " %u\n", op);
+ return -EINVAL;
+ }
+
+ if (!discovery_tpg) {
+ pr_err("iscsit_global->discovery_tpg is NULL\n");
+ return -EINVAL;
+ }
+
+ param = iscsi_find_param_from_key(AUTHMETHOD,
+ discovery_tpg->param_list);
+ if (!param)
+ return -EINVAL;
+
+ if (op) {
+ /*
+ * Reset the AuthMethod key to CHAP.
+ */
+ if (iscsi_update_param_value(param, CHAP) < 0)
+ return -EINVAL;
+
+ discovery_tpg->tpg_attrib.authentication = 1;
+ iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 1;
+ pr_debug("LIO-CORE[0] Successfully enabled"
+ " authentication enforcement for iSCSI"
+ " Discovery TPG\n");
+ } else {
+ /*
+ * Reset the AuthMethod key to CHAP,None
+ */
+ if (iscsi_update_param_value(param, "CHAP,None") < 0)
+ return -EINVAL;
+
+ discovery_tpg->tpg_attrib.authentication = 0;
+ iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 0;
+ pr_debug("LIO-CORE[0] Successfully disabled"
+ " authentication enforcement for iSCSI"
+ " Discovery TPG\n");
+ }
+
+ return count;
+}
+
+DISC_AUTH_ATTR(enforce_discovery_auth, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_discovery_auth_attrs[] = {
+ &iscsi_disc_userid.attr,
+ &iscsi_disc_password.attr,
+ &iscsi_disc_authenticate_target.attr,
+ &iscsi_disc_userid_mutual.attr,
+ &iscsi_disc_password_mutual.attr,
+ &iscsi_disc_enforce_discovery_auth.attr,
+ NULL,
+};
+
+/* End lio_target_discovery_auth_cit */
+
+/* Start functions for target_core_fabric_ops */
+
+static char *iscsi_get_fabric_name(void)
+{
+ return "iSCSI";
+}
+
+static u32 iscsi_get_task_tag(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ return cmd->init_task_tag;
+}
+
+static int iscsi_get_cmd_state(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ return cmd->i_state;
+}
+
+static int iscsi_is_state_remove(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ return (cmd->i_state == ISTATE_REMOVE);
+}
+
+static int lio_sess_logged_in(struct se_session *se_sess)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+ int ret;
+ /*
+ * Called with spin_lock_bh(&tpg_lock); and
+ * spin_lock(&se_tpg->session_lock); held.
+ */
+ spin_lock(&sess->conn_lock);
+ ret = (sess->session_state != TARG_SESS_STATE_LOGGED_IN);
+ spin_unlock(&sess->conn_lock);
+
+ return ret;
+}
+
+static u32 lio_sess_get_index(struct se_session *se_sess)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+
+ return sess->session_index;
+}
+
+static u32 lio_sess_get_initiator_sid(
+ struct se_session *se_sess,
+ unsigned char *buf,
+ u32 size)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+ /*
+ * iSCSI Initiator Session Identifier from RFC-3720.
+ */
+ return snprintf(buf, size, "%02x%02x%02x%02x%02x%02x",
+ sess->isid[0], sess->isid[1], sess->isid[2],
+ sess->isid[3], sess->isid[4], sess->isid[5]);
+}
+
+static int lio_queue_data_in(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ cmd->i_state = ISTATE_SEND_DATAIN;
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+ return 0;
+}
+
+static int lio_write_pending(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ if (!cmd->immediate_data && !cmd->unsolicited_data)
+ return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 1);
+
+ return 0;
+}
+
+static int lio_write_pending_status(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ int ret;
+
+ spin_lock_bh(&cmd->istate_lock);
+ ret = !(cmd->cmd_flags & ICF_GOT_LAST_DATAOUT);
+ spin_unlock_bh(&cmd->istate_lock);
+
+ return ret;
+}
+
+static int lio_queue_status(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ cmd->i_state = ISTATE_SEND_STATUS;
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+ return 0;
+}
+
+static u16 lio_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
+{
+ unsigned char *buffer = se_cmd->sense_buffer;
+ /*
+ * From RFC-3720 10.4.7. Data Segment - Sense and Response Data Segment
+ * 16-bit SenseLength.
+ */
+ buffer[0] = ((sense_length >> 8) & 0xff);
+ buffer[1] = (sense_length & 0xff);
+ /*
+ * Return two byte offset into allocated sense_buffer.
+ */
+ return 2;
+}
+
+static u16 lio_get_fabric_sense_len(void)
+{
+ /*
+ * Return two byte offset into allocated sense_buffer.
+ */
+ return 2;
+}
+
+static int lio_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ cmd->i_state = ISTATE_SEND_TASKMGTRSP;
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+ return 0;
+}
+
+static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return &tpg->tpg_tiqn->tiqn[0];
+}
+
+static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return tpg->tpgt;
+}
+
+static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
+}
+
+static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return ISCSI_TPG_ATTRIB(tpg)->generate_node_acls;
+}
+
+static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls;
+}
+
+static int lio_tpg_check_demo_mode_write_protect(
+ struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return ISCSI_TPG_ATTRIB(tpg)->demo_mode_write_protect;
+}
+
+static int lio_tpg_check_prod_mode_write_protect(
+ struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return ISCSI_TPG_ATTRIB(tpg)->prod_mode_write_protect;
+}
+
+static void lio_tpg_release_fabric_acl(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_acl)
+{
+ struct iscsi_node_acl *acl = container_of(se_acl,
+ struct iscsi_node_acl, se_node_acl);
+ kfree(acl);
+}
+
+/*
+ * Called with spin_lock_bh(struct se_portal_group->session_lock) held..
+ *
+ * Also, this function calls iscsit_inc_session_usage_count() on the
+ * struct iscsi_session in question.
+ */
+static int lio_tpg_shutdown_session(struct se_session *se_sess)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+
+ spin_lock(&sess->conn_lock);
+ if (atomic_read(&sess->session_fall_back_to_erl0) ||
+ atomic_read(&sess->session_logout) ||
+ (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+ spin_unlock(&sess->conn_lock);
+ return 0;
+ }
+ atomic_set(&sess->session_reinstatement, 1);
+ spin_unlock(&sess->conn_lock);
+
+ iscsit_inc_session_usage_count(sess);
+ iscsit_stop_time2retain_timer(sess);
+
+ return 1;
+}
+
+/*
+ * Calls iscsit_dec_session_usage_count() as inverse of
+ * lio_tpg_shutdown_session()
+ */
+static void lio_tpg_close_session(struct se_session *se_sess)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+ /*
+ * If the iSCSI Session for the iSCSI Initiator Node exists,
+ * forcefully shutdown the iSCSI NEXUS.
+ */
+ iscsit_stop_session(sess, 1, 1);
+ iscsit_dec_session_usage_count(sess);
+ iscsit_close_session(sess);
+}
+
+static void lio_tpg_stop_session(
+ struct se_session *se_sess,
+ int sess_sleep,
+ int conn_sleep)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+
+ iscsit_stop_session(sess, sess_sleep, conn_sleep);
+}
+
+static void lio_tpg_fall_back_to_erl0(struct se_session *se_sess)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+
+ iscsit_fall_back_to_erl0(sess);
+}
+
+static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return tpg->tpg_tiqn->tiqn_index;
+}
+
+static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
+{
+ struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl,
+ se_node_acl);
+
+ ISCSI_NODE_ATTRIB(acl)->nacl = acl;
+ iscsit_set_default_node_attribues(acl);
+}
+
+static void lio_release_cmd(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ iscsit_release_cmd(cmd);
+}
+
+/* End functions for target_core_fabric_ops */
+
+int iscsi_target_register_configfs(void)
+{
+ struct target_fabric_configfs *fabric;
+ int ret;
+
+ lio_target_fabric_configfs = NULL;
+ fabric = target_fabric_configfs_init(THIS_MODULE, "iscsi");
+ if (IS_ERR(fabric)) {
+ pr_err("target_fabric_configfs_init() for"
+ " LIO-Target failed!\n");
+ return PTR_ERR(fabric);
+ }
+ /*
+ * Setup the fabric API of function pointers used by target_core_mod..
+ */
+ fabric->tf_ops.get_fabric_name = &iscsi_get_fabric_name;
+ fabric->tf_ops.get_fabric_proto_ident = &iscsi_get_fabric_proto_ident;
+ fabric->tf_ops.tpg_get_wwn = &lio_tpg_get_endpoint_wwn;
+ fabric->tf_ops.tpg_get_tag = &lio_tpg_get_tag;
+ fabric->tf_ops.tpg_get_default_depth = &lio_tpg_get_default_depth;
+ fabric->tf_ops.tpg_get_pr_transport_id = &iscsi_get_pr_transport_id;
+ fabric->tf_ops.tpg_get_pr_transport_id_len =
+ &iscsi_get_pr_transport_id_len;
+ fabric->tf_ops.tpg_parse_pr_out_transport_id =
+ &iscsi_parse_pr_out_transport_id;
+ fabric->tf_ops.tpg_check_demo_mode = &lio_tpg_check_demo_mode;
+ fabric->tf_ops.tpg_check_demo_mode_cache =
+ &lio_tpg_check_demo_mode_cache;
+ fabric->tf_ops.tpg_check_demo_mode_write_protect =
+ &lio_tpg_check_demo_mode_write_protect;
+ fabric->tf_ops.tpg_check_prod_mode_write_protect =
+ &lio_tpg_check_prod_mode_write_protect;
+ fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl;
+ fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl;
+ fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index;
+ fabric->tf_ops.release_cmd = &lio_release_cmd;
+ fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session;
+ fabric->tf_ops.close_session = &lio_tpg_close_session;
+ fabric->tf_ops.stop_session = &lio_tpg_stop_session;
+ fabric->tf_ops.fall_back_to_erl0 = &lio_tpg_fall_back_to_erl0;
+ fabric->tf_ops.sess_logged_in = &lio_sess_logged_in;
+ fabric->tf_ops.sess_get_index = &lio_sess_get_index;
+ fabric->tf_ops.sess_get_initiator_sid = &lio_sess_get_initiator_sid;
+ fabric->tf_ops.write_pending = &lio_write_pending;
+ fabric->tf_ops.write_pending_status = &lio_write_pending_status;
+ fabric->tf_ops.set_default_node_attributes =
+ &lio_set_default_node_attributes;
+ fabric->tf_ops.get_task_tag = &iscsi_get_task_tag;
+ fabric->tf_ops.get_cmd_state = &iscsi_get_cmd_state;
+ fabric->tf_ops.queue_data_in = &lio_queue_data_in;
+ fabric->tf_ops.queue_status = &lio_queue_status;
+ fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
+ fabric->tf_ops.set_fabric_sense_len = &lio_set_fabric_sense_len;
+ fabric->tf_ops.get_fabric_sense_len = &lio_get_fabric_sense_len;
+ fabric->tf_ops.is_state_remove = &iscsi_is_state_remove;
+ /*
+ * Setup function pointers for generic logic in target_core_fabric_configfs.c
+ */
+ fabric->tf_ops.fabric_make_wwn = &lio_target_call_coreaddtiqn;
+ fabric->tf_ops.fabric_drop_wwn = &lio_target_call_coredeltiqn;
+ fabric->tf_ops.fabric_make_tpg = &lio_target_tiqn_addtpg;
+ fabric->tf_ops.fabric_drop_tpg = &lio_target_tiqn_deltpg;
+ fabric->tf_ops.fabric_post_link = NULL;
+ fabric->tf_ops.fabric_pre_unlink = NULL;
+ fabric->tf_ops.fabric_make_np = &lio_target_call_addnptotpg;
+ fabric->tf_ops.fabric_drop_np = &lio_target_call_delnpfromtpg;
+ fabric->tf_ops.fabric_make_nodeacl = &lio_target_make_nodeacl;
+ fabric->tf_ops.fabric_drop_nodeacl = &lio_target_drop_nodeacl;
+ /*
+ * Setup default attribute lists for various fabric->tf_cit_tmpl
+ * sturct config_item_type's
+ */
+ TF_CIT_TMPL(fabric)->tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
+ TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
+
+ ret = target_fabric_configfs_register(fabric);
+ if (ret < 0) {
+ pr_err("target_fabric_configfs_register() for"
+ " LIO-Target failed!\n");
+ target_fabric_configfs_free(fabric);
+ return ret;
+ }
+
+ lio_target_fabric_configfs = fabric;
+ pr_debug("LIO_TARGET[0] - Set fabric ->"
+ " lio_target_fabric_configfs\n");
+ return 0;
+}
+
+
+void iscsi_target_deregister_configfs(void)
+{
+ if (!lio_target_fabric_configfs)
+ return;
+ /*
+ * Shutdown discovery sessions and disable discovery TPG
+ */
+ if (iscsit_global->discovery_tpg)
+ iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
+
+ target_fabric_configfs_deregister(lio_target_fabric_configfs);
+ lio_target_fabric_configfs = NULL;
+ pr_debug("LIO_TARGET[0] - Cleared"
+ " lio_target_fabric_configfs\n");
+}
diff --git a/drivers/target/iscsi/iscsi_target_configfs.h b/drivers/target/iscsi/iscsi_target_configfs.h
new file mode 100644
index 0000000..8cd5a63
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_configfs.h
@@ -0,0 +1,7 @@
+#ifndef ISCSI_TARGET_CONFIGFS_H
+#define ISCSI_TARGET_CONFIGFS_H
+
+extern int iscsi_target_register_configfs(void);
+extern void iscsi_target_deregister_configfs(void);
+
+#endif /* ISCSI_TARGET_CONFIGFS_H */
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
new file mode 100644
index 0000000..470ed55
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -0,0 +1,859 @@
+#ifndef ISCSI_TARGET_CORE_H
+#define ISCSI_TARGET_CORE_H
+
+#include <linux/in.h>
+#include <linux/configfs.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+
+#define ISCSIT_VERSION "v4.1.0-rc1"
+#define ISCSI_MAX_DATASN_MISSING_COUNT 16
+#define ISCSI_TX_THREAD_TCP_TIMEOUT 2
+#define ISCSI_RX_THREAD_TCP_TIMEOUT 2
+#define SECONDS_FOR_ASYNC_LOGOUT 10
+#define SECONDS_FOR_ASYNC_TEXT 10
+#define SECONDS_FOR_LOGOUT_COMP 15
+#define WHITE_SPACE " \t\v\f\n\r"
+
+/* struct iscsi_node_attrib sanity values */
+#define NA_DATAOUT_TIMEOUT 3
+#define NA_DATAOUT_TIMEOUT_MAX 60
+#define NA_DATAOUT_TIMEOUT_MIX 2
+#define NA_DATAOUT_TIMEOUT_RETRIES 5
+#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
+#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
+#define NA_NOPIN_TIMEOUT 5
+#define NA_NOPIN_TIMEOUT_MAX 60
+#define NA_NOPIN_TIMEOUT_MIN 3
+#define NA_NOPIN_RESPONSE_TIMEOUT 5
+#define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60
+#define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3
+#define NA_RANDOM_DATAIN_PDU_OFFSETS 0
+#define NA_RANDOM_DATAIN_SEQ_OFFSETS 0
+#define NA_RANDOM_R2T_OFFSETS 0
+#define NA_DEFAULT_ERL 0
+#define NA_DEFAULT_ERL_MAX 2
+#define NA_DEFAULT_ERL_MIN 0
+
+/* struct iscsi_tpg_attrib sanity values */
+#define TA_AUTHENTICATION 1
+#define TA_LOGIN_TIMEOUT 15
+#define TA_LOGIN_TIMEOUT_MAX 30
+#define TA_LOGIN_TIMEOUT_MIN 5
+#define TA_NETIF_TIMEOUT 2
+#define TA_NETIF_TIMEOUT_MAX 15
+#define TA_NETIF_TIMEOUT_MIN 2
+#define TA_GENERATE_NODE_ACLS 0
+#define TA_DEFAULT_CMDSN_DEPTH 16
+#define TA_DEFAULT_CMDSN_DEPTH_MAX 512
+#define TA_DEFAULT_CMDSN_DEPTH_MIN 1
+#define TA_CACHE_DYNAMIC_ACLS 0
+/* Enabled by default in demo mode (generic_node_acls=1) */
+#define TA_DEMO_MODE_WRITE_PROTECT 1
+/* Disabled by default in production mode w/ explict ACLs */
+#define TA_PROD_MODE_WRITE_PROTECT 0
+#define TA_CACHE_CORE_NPS 0
+
+enum tpg_np_network_transport_table {
+ ISCSI_TCP = 0,
+ ISCSI_SCTP_TCP = 1,
+ ISCSI_SCTP_UDP = 2,
+ ISCSI_IWARP_TCP = 3,
+ ISCSI_IWARP_SCTP = 4,
+ ISCSI_INFINIBAND = 5,
+};
+
+/* RFC-3720 7.1.4 Standard Connection State Diagram for a Target */
+enum target_conn_state_table {
+ TARG_CONN_STATE_FREE = 0x1,
+ TARG_CONN_STATE_XPT_UP = 0x3,
+ TARG_CONN_STATE_IN_LOGIN = 0x4,
+ TARG_CONN_STATE_LOGGED_IN = 0x5,
+ TARG_CONN_STATE_IN_LOGOUT = 0x6,
+ TARG_CONN_STATE_LOGOUT_REQUESTED = 0x7,
+ TARG_CONN_STATE_CLEANUP_WAIT = 0x8,
+};
+
+/* RFC-3720 7.3.2 Session State Diagram for a Target */
+enum target_sess_state_table {
+ TARG_SESS_STATE_FREE = 0x1,
+ TARG_SESS_STATE_ACTIVE = 0x2,
+ TARG_SESS_STATE_LOGGED_IN = 0x3,
+ TARG_SESS_STATE_FAILED = 0x4,
+ TARG_SESS_STATE_IN_CONTINUE = 0x5,
+};
+
+/* struct iscsi_data_count->type */
+enum data_count_type {
+ ISCSI_RX_DATA = 1,
+ ISCSI_TX_DATA = 2,
+};
+
+/* struct iscsi_datain_req->dr_complete */
+enum datain_req_comp_table {
+ DATAIN_COMPLETE_NORMAL = 1,
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY = 2,
+ DATAIN_COMPLETE_CONNECTION_RECOVERY = 3,
+};
+
+/* struct iscsi_datain_req->recovery */
+enum datain_req_rec_table {
+ DATAIN_WITHIN_COMMAND_RECOVERY = 1,
+ DATAIN_CONNECTION_RECOVERY = 2,
+};
+
+/* struct iscsi_portal_group->state */
+enum tpg_state_table {
+ TPG_STATE_FREE = 0,
+ TPG_STATE_ACTIVE = 1,
+ TPG_STATE_INACTIVE = 2,
+ TPG_STATE_COLD_RESET = 3,
+};
+
+/* struct iscsi_tiqn->tiqn_state */
+enum tiqn_state_table {
+ TIQN_STATE_ACTIVE = 1,
+ TIQN_STATE_SHUTDOWN = 2,
+};
+
+/* struct iscsi_cmd->cmd_flags */
+enum cmd_flags_table {
+ ICF_GOT_LAST_DATAOUT = 0x00000001,
+ ICF_GOT_DATACK_SNACK = 0x00000002,
+ ICF_NON_IMMEDIATE_UNSOLICITED_DATA = 0x00000004,
+ ICF_SENT_LAST_R2T = 0x00000008,
+ ICF_WITHIN_COMMAND_RECOVERY = 0x00000010,
+ ICF_CONTIG_MEMORY = 0x00000020,
+ ICF_ATTACHED_TO_RQUEUE = 0x00000040,
+ ICF_OOO_CMDSN = 0x00000080,
+ ICF_REJECT_FAIL_CONN = 0x00000100,
+};
+
+/* struct iscsi_cmd->i_state */
+enum cmd_i_state_table {
+ ISTATE_NO_STATE = 0,
+ ISTATE_NEW_CMD = 1,
+ ISTATE_DEFERRED_CMD = 2,
+ ISTATE_UNSOLICITED_DATA = 3,
+ ISTATE_RECEIVE_DATAOUT = 4,
+ ISTATE_RECEIVE_DATAOUT_RECOVERY = 5,
+ ISTATE_RECEIVED_LAST_DATAOUT = 6,
+ ISTATE_WITHIN_DATAOUT_RECOVERY = 7,
+ ISTATE_IN_CONNECTION_RECOVERY = 8,
+ ISTATE_RECEIVED_TASKMGT = 9,
+ ISTATE_SEND_ASYNCMSG = 10,
+ ISTATE_SENT_ASYNCMSG = 11,
+ ISTATE_SEND_DATAIN = 12,
+ ISTATE_SEND_LAST_DATAIN = 13,
+ ISTATE_SENT_LAST_DATAIN = 14,
+ ISTATE_SEND_LOGOUTRSP = 15,
+ ISTATE_SENT_LOGOUTRSP = 16,
+ ISTATE_SEND_NOPIN = 17,
+ ISTATE_SENT_NOPIN = 18,
+ ISTATE_SEND_REJECT = 19,
+ ISTATE_SENT_REJECT = 20,
+ ISTATE_SEND_R2T = 21,
+ ISTATE_SENT_R2T = 22,
+ ISTATE_SEND_R2T_RECOVERY = 23,
+ ISTATE_SENT_R2T_RECOVERY = 24,
+ ISTATE_SEND_LAST_R2T = 25,
+ ISTATE_SENT_LAST_R2T = 26,
+ ISTATE_SEND_LAST_R2T_RECOVERY = 27,
+ ISTATE_SENT_LAST_R2T_RECOVERY = 28,
+ ISTATE_SEND_STATUS = 29,
+ ISTATE_SEND_STATUS_BROKEN_PC = 30,
+ ISTATE_SENT_STATUS = 31,
+ ISTATE_SEND_STATUS_RECOVERY = 32,
+ ISTATE_SENT_STATUS_RECOVERY = 33,
+ ISTATE_SEND_TASKMGTRSP = 34,
+ ISTATE_SENT_TASKMGTRSP = 35,
+ ISTATE_SEND_TEXTRSP = 36,
+ ISTATE_SENT_TEXTRSP = 37,
+ ISTATE_SEND_NOPIN_WANT_RESPONSE = 38,
+ ISTATE_SENT_NOPIN_WANT_RESPONSE = 39,
+ ISTATE_SEND_NOPIN_NO_RESPONSE = 40,
+ ISTATE_REMOVE = 41,
+ ISTATE_FREE = 42,
+};
+
+/* Used for iscsi_recover_cmdsn() return values */
+enum recover_cmdsn_ret_table {
+ CMDSN_ERROR_CANNOT_RECOVER = -1,
+ CMDSN_NORMAL_OPERATION = 0,
+ CMDSN_LOWER_THAN_EXP = 1,
+ CMDSN_HIGHER_THAN_EXP = 2,
+};
+
+/* Used for iscsi_handle_immediate_data() return values */
+enum immedate_data_ret_table {
+ IMMEDIATE_DATA_CANNOT_RECOVER = -1,
+ IMMEDIATE_DATA_NORMAL_OPERATION = 0,
+ IMMEDIATE_DATA_ERL1_CRC_FAILURE = 1,
+};
+
+/* Used for iscsi_decide_dataout_action() return values */
+enum dataout_action_ret_table {
+ DATAOUT_CANNOT_RECOVER = -1,
+ DATAOUT_NORMAL = 0,
+ DATAOUT_SEND_R2T = 1,
+ DATAOUT_SEND_TO_TRANSPORT = 2,
+ DATAOUT_WITHIN_COMMAND_RECOVERY = 3,
+};
+
+/* Used for struct iscsi_node_auth->naf_flags */
+enum naf_flags_table {
+ NAF_USERID_SET = 0x01,
+ NAF_PASSWORD_SET = 0x02,
+ NAF_USERID_IN_SET = 0x04,
+ NAF_PASSWORD_IN_SET = 0x08,
+};
+
+/* Used by various struct timer_list to manage iSCSI specific state */
+enum iscsi_timer_flags_table {
+ ISCSI_TF_RUNNING = 0x01,
+ ISCSI_TF_STOP = 0x02,
+ ISCSI_TF_EXPIRED = 0x04,
+};
+
+/* Used for struct iscsi_np->np_flags */
+enum np_flags_table {
+ NPF_IP_NETWORK = 0x00,
+ NPF_SCTP_STRUCT_FILE = 0x01 /* Bugfix */
+};
+
+/* Used for struct iscsi_np->np_thread_state */
+enum np_thread_state_table {
+ ISCSI_NP_THREAD_ACTIVE = 1,
+ ISCSI_NP_THREAD_INACTIVE = 2,
+ ISCSI_NP_THREAD_RESET = 3,
+ ISCSI_NP_THREAD_SHUTDOWN = 4,
+ ISCSI_NP_THREAD_EXIT = 5,
+};
+
+struct iscsi_conn_ops {
+ u8 HeaderDigest; /* [0,1] == [None,CRC32C] */
+ u8 DataDigest; /* [0,1] == [None,CRC32C] */
+ u32 MaxRecvDataSegmentLength; /* [512..2**24-1] */
+ u8 OFMarker; /* [0,1] == [No,Yes] */
+ u8 IFMarker; /* [0,1] == [No,Yes] */
+ u32 OFMarkInt; /* [1..65535] */
+ u32 IFMarkInt; /* [1..65535] */
+};
+
+struct iscsi_sess_ops {
+ char InitiatorName[224];
+ char InitiatorAlias[256];
+ char TargetName[224];
+ char TargetAlias[256];
+ char TargetAddress[256];
+ u16 TargetPortalGroupTag; /* [0..65535] */
+ u16 MaxConnections; /* [1..65535] */
+ u8 InitialR2T; /* [0,1] == [No,Yes] */
+ u8 ImmediateData; /* [0,1] == [No,Yes] */
+ u32 MaxBurstLength; /* [512..2**24-1] */
+ u32 FirstBurstLength; /* [512..2**24-1] */
+ u16 DefaultTime2Wait; /* [0..3600] */
+ u16 DefaultTime2Retain; /* [0..3600] */
+ u16 MaxOutstandingR2T; /* [1..65535] */
+ u8 DataPDUInOrder; /* [0,1] == [No,Yes] */
+ u8 DataSequenceInOrder; /* [0,1] == [No,Yes] */
+ u8 ErrorRecoveryLevel; /* [0..2] */
+ u8 SessionType; /* [0,1] == [Normal,Discovery]*/
+};
+
+struct iscsi_queue_req {
+ int state;
+ struct iscsi_cmd *cmd;
+ struct list_head qr_list;
+};
+
+struct iscsi_data_count {
+ int data_length;
+ int sync_and_steering;
+ enum data_count_type type;
+ u32 iov_count;
+ u32 ss_iov_count;
+ u32 ss_marker_count;
+ struct kvec *iov;
+};
+
+struct iscsi_param_list {
+ struct list_head param_list;
+ struct list_head extra_response_list;
+};
+
+struct iscsi_datain_req {
+ enum datain_req_comp_table dr_complete;
+ int generate_recovery_values;
+ enum datain_req_rec_table recovery;
+ u32 begrun;
+ u32 runlength;
+ u32 data_length;
+ u32 data_offset;
+ u32 data_offset_end;
+ u32 data_sn;
+ u32 next_burst_len;
+ u32 read_data_done;
+ u32 seq_send_order;
+ struct list_head dr_list;
+} ____cacheline_aligned;
+
+struct iscsi_ooo_cmdsn {
+ u16 cid;
+ u32 batch_count;
+ u32 cmdsn;
+ u32 exp_cmdsn;
+ struct iscsi_cmd *cmd;
+ struct list_head ooo_list;
+} ____cacheline_aligned;
+
+struct iscsi_datain {
+ u8 flags;
+ u32 data_sn;
+ u32 length;
+ u32 offset;
+} ____cacheline_aligned;
+
+struct iscsi_r2t {
+ int seq_complete;
+ int recovery_r2t;
+ int sent_r2t;
+ u32 r2t_sn;
+ u32 offset;
+ u32 targ_xfer_tag;
+ u32 xfer_len;
+ struct list_head r2t_list;
+} ____cacheline_aligned;
+
+struct iscsi_cmd {
+ enum iscsi_timer_flags_table dataout_timer_flags;
+ /* DataOUT timeout retries */
+ u8 dataout_timeout_retries;
+ /* Within command recovery count */
+ u8 error_recovery_count;
+ /* iSCSI dependent state for out or order CmdSNs */
+ enum cmd_i_state_table deferred_i_state;
+ /* iSCSI dependent state */
+ enum cmd_i_state_table i_state;
+ /* Command is an immediate command (ISCSI_OP_IMMEDIATE set) */
+ u8 immediate_cmd;
+ /* Immediate data present */
+ u8 immediate_data;
+ /* iSCSI Opcode */
+ u8 iscsi_opcode;
+ /* iSCSI Response Code */
+ u8 iscsi_response;
+ /* Logout reason when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
+ u8 logout_reason;
+ /* Logout response code when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
+ u8 logout_response;
+ /* MaxCmdSN has been incremented */
+ u8 maxcmdsn_inc;
+ /* Immediate Unsolicited Dataout */
+ u8 unsolicited_data;
+ /* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
+ u16 logout_cid;
+ /* Command flags */
+ enum cmd_flags_table cmd_flags;
+ /* Initiator Task Tag assigned from Initiator */
+ u32 init_task_tag;
+ /* Target Transfer Tag assigned from Target */
+ u32 targ_xfer_tag;
+ /* CmdSN assigned from Initiator */
+ u32 cmd_sn;
+ /* ExpStatSN assigned from Initiator */
+ u32 exp_stat_sn;
+ /* StatSN assigned to this ITT */
+ u32 stat_sn;
+ /* DataSN Counter */
+ u32 data_sn;
+ /* R2TSN Counter */
+ u32 r2t_sn;
+ /* Last DataSN acknowledged via DataAck SNACK */
+ u32 acked_data_sn;
+ /* Used for echoing NOPOUT ping data */
+ u32 buf_ptr_size;
+ /* Used to store DataDigest */
+ u32 data_crc;
+ /* Total size in bytes associated with command */
+ u32 data_length;
+ /* Counter for MaxOutstandingR2T */
+ u32 outstanding_r2ts;
+ /* Next R2T Offset when DataSequenceInOrder=Yes */
+ u32 r2t_offset;
+ /* Iovec current and orig count for iscsi_cmd->iov_data */
+ u32 iov_data_count;
+ u32 orig_iov_data_count;
+ /* Number of miscellaneous iovecs used for IP stack calls */
+ u32 iov_misc_count;
+ /* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+ u32 pdu_count;
+ /* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */
+ u32 pdu_send_order;
+ /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+ u32 pdu_start;
+ u32 residual_count;
+ /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
+ u32 seq_send_order;
+ /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
+ u32 seq_count;
+ /* Current struct iscsi_seq in struct iscsi_cmd->seq_list */
+ u32 seq_no;
+ /* Lowest offset in current DataOUT sequence */
+ u32 seq_start_offset;
+ /* Highest offset in current DataOUT sequence */
+ u32 seq_end_offset;
+ /* Total size in bytes received so far of READ data */
+ u32 read_data_done;
+ /* Total size in bytes received so far of WRITE data */
+ u32 write_data_done;
+ /* Counter for FirstBurstLength key */
+ u32 first_burst_len;
+ /* Counter for MaxBurstLength key */
+ u32 next_burst_len;
+ /* Transfer size used for IP stack calls */
+ u32 tx_size;
+ /* Buffer used for various purposes */
+ void *buf_ptr;
+ /* See include/linux/dma-mapping.h */
+ enum dma_data_direction data_direction;
+ /* iSCSI PDU Header + CRC */
+ unsigned char pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN];
+ /* Number of times struct iscsi_cmd is present in immediate queue */
+ atomic_t immed_queue_count;
+ atomic_t response_queue_count;
+ atomic_t transport_sent;
+ spinlock_t datain_lock;
+ spinlock_t dataout_timeout_lock;
+ /* spinlock for protecting struct iscsi_cmd->i_state */
+ spinlock_t istate_lock;
+ /* spinlock for adding within command recovery entries */
+ spinlock_t error_lock;
+ /* spinlock for adding R2Ts */
+ spinlock_t r2t_lock;
+ /* DataIN List */
+ struct list_head datain_list;
+ /* R2T List */
+ struct list_head cmd_r2t_list;
+ struct completion reject_comp;
+ /* Timer for DataOUT */
+ struct timer_list dataout_timer;
+ /* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
+ struct kvec *iov_data;
+ /* Iovecs for miscellaneous purposes */
+#define ISCSI_MISC_IOVECS 5
+ struct kvec iov_misc[ISCSI_MISC_IOVECS];
+ /* Array of struct iscsi_pdu used for DataPDUInOrder=No */
+ struct iscsi_pdu *pdu_list;
+ /* Current struct iscsi_pdu used for DataPDUInOrder=No */
+ struct iscsi_pdu *pdu_ptr;
+ /* Array of struct iscsi_seq used for DataSequenceInOrder=No */
+ struct iscsi_seq *seq_list;
+ /* Current struct iscsi_seq used for DataSequenceInOrder=No */
+ struct iscsi_seq *seq_ptr;
+ /* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */
+ struct iscsi_tmr_req *tmr_req;
+ /* Connection this command is alligient to */
+ struct iscsi_conn *conn;
+ /* Pointer to connection recovery entry */
+ struct iscsi_conn_recovery *cr;
+ /* Session the command is part of, used for connection recovery */
+ struct iscsi_session *sess;
+ /* list_head for connection list */
+ struct list_head i_list;
+ /* The TCM I/O descriptor that is accessed via container_of() */
+ struct se_cmd se_cmd;
+ /* Sense buffer that will be mapped into outgoing status */
+#define ISCSI_SENSE_BUFFER_LEN (TRANSPORT_SENSE_BUFFER + 2)
+ unsigned char sense_buffer[ISCSI_SENSE_BUFFER_LEN];
+
+ struct scatterlist *t_mem_sg;
+ u32 t_mem_sg_nents;
+
+ u32 padding;
+ u8 pad_bytes[4];
+
+ struct scatterlist *first_data_sg;
+ u32 first_data_sg_off;
+ u32 kmapped_nents;
+
+} ____cacheline_aligned;
+
+struct iscsi_tmr_req {
+ bool task_reassign:1;
+ u32 ref_cmd_sn;
+ u32 exp_data_sn;
+ struct iscsi_conn_recovery *conn_recovery;
+ struct se_tmr_req *se_tmr_req;
+};
+
+struct iscsi_conn {
+ /* Authentication Successful for this connection */
+ u8 auth_complete;
+ /* State connection is currently in */
+ u8 conn_state;
+ u8 conn_logout_reason;
+ u8 network_transport;
+ enum iscsi_timer_flags_table nopin_timer_flags;
+ enum iscsi_timer_flags_table nopin_response_timer_flags;
+ u8 tx_immediate_queue;
+ u8 tx_response_queue;
+ /* Used to know what thread encountered a transport failure */
+ u8 which_thread;
+ /* connection id assigned by the Initiator */
+ u16 cid;
+ /* Remote TCP Port */
+ u16 login_port;
+ int net_size;
+ u32 auth_id;
+#define CONNFLAG_SCTP_STRUCT_FILE 0x01
+ u32 conn_flags;
+ /* Used for iscsi_tx_login_rsp() */
+ u32 login_itt;
+ u32 exp_statsn;
+ /* Per connection status sequence number */
+ u32 stat_sn;
+ /* IFMarkInt's Current Value */
+ u32 if_marker;
+ /* OFMarkInt's Current Value */
+ u32 of_marker;
+ /* Used for calculating OFMarker offset to next PDU */
+ u32 of_marker_offset;
+ /* Complete Bad PDU for sending reject */
+ unsigned char bad_hdr[ISCSI_HDR_LEN];
+#define IPV6_ADDRESS_SPACE 48
+ unsigned char login_ip[IPV6_ADDRESS_SPACE];
+ int conn_usage_count;
+ int conn_waiting_on_uc;
+ atomic_t check_immediate_queue;
+ atomic_t conn_logout_remove;
+ atomic_t connection_exit;
+ atomic_t connection_recovery;
+ atomic_t connection_reinstatement;
+ atomic_t connection_wait;
+ atomic_t connection_wait_rcfr;
+ atomic_t sleep_on_conn_wait_comp;
+ atomic_t transport_failed;
+ struct completion conn_post_wait_comp;
+ struct completion conn_wait_comp;
+ struct completion conn_wait_rcfr_comp;
+ struct completion conn_waiting_on_uc_comp;
+ struct completion conn_logout_comp;
+ struct completion tx_half_close_comp;
+ struct completion rx_half_close_comp;
+ /* socket used by this connection */
+ struct socket *sock;
+ struct timer_list nopin_timer;
+ struct timer_list nopin_response_timer;
+ struct timer_list transport_timer;
+ /* Spinlock used for add/deleting cmd's from conn_cmd_list */
+ spinlock_t cmd_lock;
+ spinlock_t conn_usage_lock;
+ spinlock_t immed_queue_lock;
+ spinlock_t nopin_timer_lock;
+ spinlock_t response_queue_lock;
+ spinlock_t state_lock;
+ /* libcrypto RX and TX contexts for crc32c */
+ struct hash_desc conn_rx_hash;
+ struct hash_desc conn_tx_hash;
+ /* Used for scheduling TX and RX connection kthreads */
+ cpumask_var_t conn_cpumask;
+ int conn_rx_reset_cpumask:1;
+ int conn_tx_reset_cpumask:1;
+ /* list_head of struct iscsi_cmd for this connection */
+ struct list_head conn_cmd_list;
+ struct list_head immed_queue_list;
+ struct list_head response_queue_list;
+ struct iscsi_conn_ops *conn_ops;
+ struct iscsi_param_list *param_list;
+ /* Used for per connection auth state machine */
+ void *auth_protocol;
+ struct iscsi_login_thread_s *login_thread;
+ struct iscsi_portal_group *tpg;
+ /* Pointer to parent session */
+ struct iscsi_session *sess;
+ /* Pointer to thread_set in use for this conn's threads */
+ struct iscsi_thread_set *thread_set;
+ /* list_head for session connection list */
+ struct list_head conn_list;
+} ____cacheline_aligned;
+
+struct iscsi_conn_recovery {
+ u16 cid;
+ u32 cmd_count;
+ u32 maxrecvdatasegmentlength;
+ int ready_for_reallegiance;
+ struct list_head conn_recovery_cmd_list;
+ spinlock_t conn_recovery_cmd_lock;
+ struct timer_list time2retain_timer;
+ struct iscsi_session *sess;
+ struct list_head cr_list;
+} ____cacheline_aligned;
+
+struct iscsi_session {
+ u8 initiator_vendor;
+ u8 isid[6];
+ enum iscsi_timer_flags_table time2retain_timer_flags;
+ u8 version_active;
+ u16 cid_called;
+ u16 conn_recovery_count;
+ u16 tsih;
+ /* state session is currently in */
+ u32 session_state;
+ /* session wide counter: initiator assigned task tag */
+ u32 init_task_tag;
+ /* session wide counter: target assigned task tag */
+ u32 targ_xfer_tag;
+ u32 cmdsn_window;
+
+ /* protects cmdsn values */
+ struct mutex cmdsn_mutex;
+ /* session wide counter: expected command sequence number */
+ u32 exp_cmd_sn;
+ /* session wide counter: maximum allowed command sequence number */
+ u32 max_cmd_sn;
+ struct list_head sess_ooo_cmdsn_list;
+
+ /* LIO specific session ID */
+ u32 sid;
+ char auth_type[8];
+ /* unique within the target */
+ int session_index;
+ /* Used for session reference counting */
+ int session_usage_count;
+ int session_waiting_on_uc;
+ u32 cmd_pdus;
+ u32 rsp_pdus;
+ u64 tx_data_octets;
+ u64 rx_data_octets;
+ u32 conn_digest_errors;
+ u32 conn_timeout_errors;
+ u64 creation_time;
+ spinlock_t session_stats_lock;
+ /* Number of active connections */
+ atomic_t nconn;
+ atomic_t session_continuation;
+ atomic_t session_fall_back_to_erl0;
+ atomic_t session_logout;
+ atomic_t session_reinstatement;
+ atomic_t session_stop_active;
+ atomic_t sleep_on_sess_wait_comp;
+ atomic_t transport_wait_cmds;
+ /* connection list */
+ struct list_head sess_conn_list;
+ struct list_head cr_active_list;
+ struct list_head cr_inactive_list;
+ spinlock_t conn_lock;
+ spinlock_t cr_a_lock;
+ spinlock_t cr_i_lock;
+ spinlock_t session_usage_lock;
+ spinlock_t ttt_lock;
+ struct completion async_msg_comp;
+ struct completion reinstatement_comp;
+ struct completion session_wait_comp;
+ struct completion session_waiting_on_uc_comp;
+ struct timer_list time2retain_timer;
+ struct iscsi_sess_ops *sess_ops;
+ struct se_session *se_sess;
+ struct iscsi_portal_group *tpg;
+} ____cacheline_aligned;
+
+struct iscsi_login {
+ u8 auth_complete;
+ u8 checked_for_existing;
+ u8 current_stage;
+ u8 leading_connection;
+ u8 first_request;
+ u8 version_min;
+ u8 version_max;
+ char isid[6];
+ u32 cmd_sn;
+ u32 init_task_tag;
+ u32 initial_exp_statsn;
+ u32 rsp_length;
+ u16 cid;
+ u16 tsih;
+ char *req;
+ char *rsp;
+ char *req_buf;
+ char *rsp_buf;
+} ____cacheline_aligned;
+
+struct iscsi_node_attrib {
+ u32 dataout_timeout;
+ u32 dataout_timeout_retries;
+ u32 default_erl;
+ u32 nopin_timeout;
+ u32 nopin_response_timeout;
+ u32 random_datain_pdu_offsets;
+ u32 random_datain_seq_offsets;
+ u32 random_r2t_offsets;
+ u32 tmr_cold_reset;
+ u32 tmr_warm_reset;
+ struct iscsi_node_acl *nacl;
+};
+
+struct se_dev_entry_s;
+
+struct iscsi_node_auth {
+ enum naf_flags_table naf_flags;
+ int authenticate_target;
+ /* Used for iscsit_global->discovery_auth,
+ * set to zero (auth disabled) by default */
+ int enforce_discovery_auth;
+#define MAX_USER_LEN 256
+#define MAX_PASS_LEN 256
+ char userid[MAX_USER_LEN];
+ char password[MAX_PASS_LEN];
+ char userid_mutual[MAX_USER_LEN];
+ char password_mutual[MAX_PASS_LEN];
+};
+
+#include "iscsi_target_stat.h"
+
+struct iscsi_node_stat_grps {
+ struct config_group iscsi_sess_stats_group;
+ struct config_group iscsi_conn_stats_group;
+};
+
+struct iscsi_node_acl {
+ struct iscsi_node_attrib node_attrib;
+ struct iscsi_node_auth node_auth;
+ struct iscsi_node_stat_grps node_stat_grps;
+ struct se_node_acl se_node_acl;
+};
+
+#define NODE_STAT_GRPS(nacl) (&(nacl)->node_stat_grps)
+
+#define ISCSI_NODE_ATTRIB(t) (&(t)->node_attrib)
+#define ISCSI_NODE_AUTH(t) (&(t)->node_auth)
+
+struct iscsi_tpg_attrib {
+ u32 authentication;
+ u32 login_timeout;
+ u32 netif_timeout;
+ u32 generate_node_acls;
+ u32 cache_dynamic_acls;
+ u32 default_cmdsn_depth;
+ u32 demo_mode_write_protect;
+ u32 prod_mode_write_protect;
+ struct iscsi_portal_group *tpg;
+};
+
+struct iscsi_np {
+ int np_network_transport;
+ int np_ip_proto;
+ int np_sock_type;
+ enum np_thread_state_table np_thread_state;
+ enum iscsi_timer_flags_table np_login_timer_flags;
+ u32 np_exports;
+ enum np_flags_table np_flags;
+ unsigned char np_ip[IPV6_ADDRESS_SPACE];
+ u16 np_port;
+ spinlock_t np_thread_lock;
+ struct completion np_restart_comp;
+ struct socket *np_socket;
+ struct __kernel_sockaddr_storage np_sockaddr;
+ struct task_struct *np_thread;
+ struct timer_list np_login_timer;
+ struct iscsi_portal_group *np_login_tpg;
+ struct list_head np_list;
+} ____cacheline_aligned;
+
+struct iscsi_tpg_np {
+ struct iscsi_np *tpg_np;
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np_parent;
+ struct list_head tpg_np_list;
+ struct list_head tpg_np_child_list;
+ struct list_head tpg_np_parent_list;
+ struct se_tpg_np se_tpg_np;
+ spinlock_t tpg_np_parent_lock;
+};
+
+struct iscsi_portal_group {
+ unsigned char tpg_chap_id;
+ /* TPG State */
+ enum tpg_state_table tpg_state;
+ /* Target Portal Group Tag */
+ u16 tpgt;
+ /* Id assigned to target sessions */
+ u16 ntsih;
+ /* Number of active sessions */
+ u32 nsessions;
+ /* Number of Network Portals available for this TPG */
+ u32 num_tpg_nps;
+ /* Per TPG LIO specific session ID. */
+ u32 sid;
+ /* Spinlock for adding/removing Network Portals */
+ spinlock_t tpg_np_lock;
+ spinlock_t tpg_state_lock;
+ struct se_portal_group tpg_se_tpg;
+ struct mutex tpg_access_lock;
+ struct mutex np_login_lock;
+ struct iscsi_tpg_attrib tpg_attrib;
+ /* Pointer to default list of iSCSI parameters for TPG */
+ struct iscsi_param_list *param_list;
+ struct iscsi_tiqn *tpg_tiqn;
+ struct list_head tpg_gnp_list;
+ struct list_head tpg_list;
+} ____cacheline_aligned;
+
+#define ISCSI_TPG_C(c) ((struct iscsi_portal_group *)(c)->tpg)
+#define ISCSI_TPG_LUN(c, l) ((iscsi_tpg_list_t *)(c)->tpg->tpg_lun_list_t[l])
+#define ISCSI_TPG_S(s) ((struct iscsi_portal_group *)(s)->tpg)
+#define ISCSI_TPG_ATTRIB(t) (&(t)->tpg_attrib)
+#define SE_TPG(tpg) (&(tpg)->tpg_se_tpg)
+
+struct iscsi_wwn_stat_grps {
+ struct config_group iscsi_stat_group;
+ struct config_group iscsi_instance_group;
+ struct config_group iscsi_sess_err_group;
+ struct config_group iscsi_tgt_attr_group;
+ struct config_group iscsi_login_stats_group;
+ struct config_group iscsi_logout_stats_group;
+};
+
+struct iscsi_tiqn {
+#define ISCSI_IQN_LEN 224
+ unsigned char tiqn[ISCSI_IQN_LEN];
+ enum tiqn_state_table tiqn_state;
+ int tiqn_access_count;
+ u32 tiqn_active_tpgs;
+ u32 tiqn_ntpgs;
+ u32 tiqn_num_tpg_nps;
+ u32 tiqn_nsessions;
+ struct list_head tiqn_list;
+ struct list_head tiqn_tpg_list;
+ spinlock_t tiqn_state_lock;
+ spinlock_t tiqn_tpg_lock;
+ struct se_wwn tiqn_wwn;
+ struct iscsi_wwn_stat_grps tiqn_stat_grps;
+ int tiqn_index;
+ struct iscsi_sess_err_stats sess_err_stats;
+ struct iscsi_login_stats login_stats;
+ struct iscsi_logout_stats logout_stats;
+} ____cacheline_aligned;
+
+#define WWN_STAT_GRPS(tiqn) (&(tiqn)->tiqn_stat_grps)
+
+struct iscsit_global {
+ /* In core shutdown */
+ u32 in_shutdown;
+ u32 active_ts;
+ /* Unique identifier used for the authentication daemon */
+ u32 auth_id;
+ u32 inactive_ts;
+ /* Thread Set bitmap count */
+ int ts_bitmap_count;
+ /* Thread Set bitmap pointer */
+ unsigned long *ts_bitmap;
+ /* Used for iSCSI discovery session authentication */
+ struct iscsi_node_acl discovery_acl;
+ struct iscsi_portal_group *discovery_tpg;
+};
+
+#endif /* ISCSI_TARGET_CORE_H */
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
new file mode 100644
index 0000000..8c04951
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -0,0 +1,531 @@
+/*******************************************************************************
+ * This file contains the iSCSI Target DataIN value generation functions.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <scsi/iscsi_proto.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_datain_values.h"
+
+struct iscsi_datain_req *iscsit_allocate_datain_req(void)
+{
+ struct iscsi_datain_req *dr;
+
+ dr = kmem_cache_zalloc(lio_dr_cache, GFP_ATOMIC);
+ if (!dr) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_datain_req\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(&dr->dr_list);
+
+ return dr;
+}
+
+void iscsit_attach_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
+{
+ spin_lock(&cmd->datain_lock);
+ list_add_tail(&dr->dr_list, &cmd->datain_list);
+ spin_unlock(&cmd->datain_lock);
+}
+
+void iscsit_free_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
+{
+ spin_lock(&cmd->datain_lock);
+ list_del(&dr->dr_list);
+ spin_unlock(&cmd->datain_lock);
+
+ kmem_cache_free(lio_dr_cache, dr);
+}
+
+void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd)
+{
+ struct iscsi_datain_req *dr, *dr_tmp;
+
+ spin_lock(&cmd->datain_lock);
+ list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, dr_list) {
+ list_del(&dr->dr_list);
+ kmem_cache_free(lio_dr_cache, dr);
+ }
+ spin_unlock(&cmd->datain_lock);
+}
+
+struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *cmd)
+{
+ struct iscsi_datain_req *dr;
+
+ if (list_empty(&cmd->datain_list)) {
+ pr_err("cmd->datain_list is empty for ITT:"
+ " 0x%08x\n", cmd->init_task_tag);
+ return NULL;
+ }
+ list_for_each_entry(dr, &cmd->datain_list, dr_list)
+ break;
+
+ return dr;
+}
+
+/*
+ * For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=Yes.
+ */
+static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_yes(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain *datain)
+{
+ u32 next_burst_len, read_data_done, read_data_left;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_datain_req *dr;
+
+ dr = iscsit_get_datain_req(cmd);
+ if (!dr)
+ return NULL;
+
+ if (dr->recovery && dr->generate_recovery_values) {
+ if (iscsit_create_recovery_datain_values_datasequenceinorder_yes(
+ cmd, dr) < 0)
+ return NULL;
+
+ dr->generate_recovery_values = 0;
+ }
+
+ next_burst_len = (!dr->recovery) ?
+ cmd->next_burst_len : dr->next_burst_len;
+ read_data_done = (!dr->recovery) ?
+ cmd->read_data_done : dr->read_data_done;
+
+ read_data_left = (cmd->data_length - read_data_done);
+ if (!read_data_left) {
+ pr_err("ITT: 0x%08x read_data_left is zero!\n",
+ cmd->init_task_tag);
+ return NULL;
+ }
+
+ if ((read_data_left <= conn->conn_ops->MaxRecvDataSegmentLength) &&
+ (read_data_left <= (conn->sess->sess_ops->MaxBurstLength -
+ next_burst_len))) {
+ datain->length = read_data_left;
+
+ datain->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ datain->flags |= ISCSI_FLAG_DATA_ACK;
+ } else {
+ if ((next_burst_len +
+ conn->conn_ops->MaxRecvDataSegmentLength) <
+ conn->sess->sess_ops->MaxBurstLength) {
+ datain->length =
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ next_burst_len += datain->length;
+ } else {
+ datain->length = (conn->sess->sess_ops->MaxBurstLength -
+ next_burst_len);
+ next_burst_len = 0;
+
+ datain->flags |= ISCSI_FLAG_CMD_FINAL;
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ datain->flags |= ISCSI_FLAG_DATA_ACK;
+ }
+ }
+
+ datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
+ datain->offset = read_data_done;
+
+ if (!dr->recovery) {
+ cmd->next_burst_len = next_burst_len;
+ cmd->read_data_done += datain->length;
+ } else {
+ dr->next_burst_len = next_burst_len;
+ dr->read_data_done += datain->length;
+ }
+
+ if (!dr->recovery) {
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS)
+ dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+ return dr;
+ }
+
+ if (!dr->runlength) {
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ } else {
+ if ((dr->begrun + dr->runlength) == dr->data_sn) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ }
+
+ return dr;
+}
+
+/*
+ * For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=Yes.
+ */
+static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain *datain)
+{
+ u32 offset, read_data_done, read_data_left, seq_send_order;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_datain_req *dr;
+ struct iscsi_seq *seq;
+
+ dr = iscsit_get_datain_req(cmd);
+ if (!dr)
+ return NULL;
+
+ if (dr->recovery && dr->generate_recovery_values) {
+ if (iscsit_create_recovery_datain_values_datasequenceinorder_no(
+ cmd, dr) < 0)
+ return NULL;
+
+ dr->generate_recovery_values = 0;
+ }
+
+ read_data_done = (!dr->recovery) ?
+ cmd->read_data_done : dr->read_data_done;
+ seq_send_order = (!dr->recovery) ?
+ cmd->seq_send_order : dr->seq_send_order;
+
+ read_data_left = (cmd->data_length - read_data_done);
+ if (!read_data_left) {
+ pr_err("ITT: 0x%08x read_data_left is zero!\n",
+ cmd->init_task_tag);
+ return NULL;
+ }
+
+ seq = iscsit_get_seq_holder_for_datain(cmd, seq_send_order);
+ if (!seq)
+ return NULL;
+
+ seq->sent = 1;
+
+ if (!dr->recovery && !seq->next_burst_len)
+ seq->first_datasn = cmd->data_sn;
+
+ offset = (seq->offset + seq->next_burst_len);
+
+ if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
+ cmd->data_length) {
+ datain->length = (cmd->data_length - offset);
+ datain->offset = offset;
+
+ datain->flags |= ISCSI_FLAG_CMD_FINAL;
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ datain->flags |= ISCSI_FLAG_DATA_ACK;
+
+ seq->next_burst_len = 0;
+ seq_send_order++;
+ } else {
+ if ((seq->next_burst_len +
+ conn->conn_ops->MaxRecvDataSegmentLength) <
+ conn->sess->sess_ops->MaxBurstLength) {
+ datain->length =
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ datain->offset = (seq->offset + seq->next_burst_len);
+
+ seq->next_burst_len += datain->length;
+ } else {
+ datain->length = (conn->sess->sess_ops->MaxBurstLength -
+ seq->next_burst_len);
+ datain->offset = (seq->offset + seq->next_burst_len);
+
+ datain->flags |= ISCSI_FLAG_CMD_FINAL;
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ datain->flags |= ISCSI_FLAG_DATA_ACK;
+
+ seq->next_burst_len = 0;
+ seq_send_order++;
+ }
+ }
+
+ if ((read_data_done + datain->length) == cmd->data_length)
+ datain->flags |= ISCSI_FLAG_DATA_STATUS;
+
+ datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
+ if (!dr->recovery) {
+ cmd->seq_send_order = seq_send_order;
+ cmd->read_data_done += datain->length;
+ } else {
+ dr->seq_send_order = seq_send_order;
+ dr->read_data_done += datain->length;
+ }
+
+ if (!dr->recovery) {
+ if (datain->flags & ISCSI_FLAG_CMD_FINAL)
+ seq->last_datasn = datain->data_sn;
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS)
+ dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+ return dr;
+ }
+
+ if (!dr->runlength) {
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ } else {
+ if ((dr->begrun + dr->runlength) == dr->data_sn) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ }
+
+ return dr;
+}
+
+/*
+ * For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=No.
+ */
+static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain *datain)
+{
+ u32 next_burst_len, read_data_done, read_data_left;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_datain_req *dr;
+ struct iscsi_pdu *pdu;
+
+ dr = iscsit_get_datain_req(cmd);
+ if (!dr)
+ return NULL;
+
+ if (dr->recovery && dr->generate_recovery_values) {
+ if (iscsit_create_recovery_datain_values_datasequenceinorder_yes(
+ cmd, dr) < 0)
+ return NULL;
+
+ dr->generate_recovery_values = 0;
+ }
+
+ next_burst_len = (!dr->recovery) ?
+ cmd->next_burst_len : dr->next_burst_len;
+ read_data_done = (!dr->recovery) ?
+ cmd->read_data_done : dr->read_data_done;
+
+ read_data_left = (cmd->data_length - read_data_done);
+ if (!read_data_left) {
+ pr_err("ITT: 0x%08x read_data_left is zero!\n",
+ cmd->init_task_tag);
+ return dr;
+ }
+
+ pdu = iscsit_get_pdu_holder_for_seq(cmd, NULL);
+ if (!pdu)
+ return dr;
+
+ if ((read_data_done + pdu->length) == cmd->data_length) {
+ pdu->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ pdu->flags |= ISCSI_FLAG_DATA_ACK;
+
+ next_burst_len = 0;
+ } else {
+ if ((next_burst_len + conn->conn_ops->MaxRecvDataSegmentLength) <
+ conn->sess->sess_ops->MaxBurstLength)
+ next_burst_len += pdu->length;
+ else {
+ pdu->flags |= ISCSI_FLAG_CMD_FINAL;
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ pdu->flags |= ISCSI_FLAG_DATA_ACK;
+
+ next_burst_len = 0;
+ }
+ }
+
+ pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
+ if (!dr->recovery) {
+ cmd->next_burst_len = next_burst_len;
+ cmd->read_data_done += pdu->length;
+ } else {
+ dr->next_burst_len = next_burst_len;
+ dr->read_data_done += pdu->length;
+ }
+
+ datain->flags = pdu->flags;
+ datain->length = pdu->length;
+ datain->offset = pdu->offset;
+ datain->data_sn = pdu->data_sn;
+
+ if (!dr->recovery) {
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS)
+ dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+ return dr;
+ }
+
+ if (!dr->runlength) {
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ } else {
+ if ((dr->begrun + dr->runlength) == dr->data_sn) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ }
+
+ return dr;
+}
+
+/*
+ * For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=No.
+ */
+static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain *datain)
+{
+ u32 read_data_done, read_data_left, seq_send_order;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_datain_req *dr;
+ struct iscsi_pdu *pdu;
+ struct iscsi_seq *seq = NULL;
+
+ dr = iscsit_get_datain_req(cmd);
+ if (!dr)
+ return NULL;
+
+ if (dr->recovery && dr->generate_recovery_values) {
+ if (iscsit_create_recovery_datain_values_datasequenceinorder_no(
+ cmd, dr) < 0)
+ return NULL;
+
+ dr->generate_recovery_values = 0;
+ }
+
+ read_data_done = (!dr->recovery) ?
+ cmd->read_data_done : dr->read_data_done;
+ seq_send_order = (!dr->recovery) ?
+ cmd->seq_send_order : dr->seq_send_order;
+
+ read_data_left = (cmd->data_length - read_data_done);
+ if (!read_data_left) {
+ pr_err("ITT: 0x%08x read_data_left is zero!\n",
+ cmd->init_task_tag);
+ return NULL;
+ }
+
+ seq = iscsit_get_seq_holder_for_datain(cmd, seq_send_order);
+ if (!seq)
+ return NULL;
+
+ seq->sent = 1;
+
+ if (!dr->recovery && !seq->next_burst_len)
+ seq->first_datasn = cmd->data_sn;
+
+ pdu = iscsit_get_pdu_holder_for_seq(cmd, seq);
+ if (!pdu)
+ return NULL;
+
+ if (seq->pdu_send_order == seq->pdu_count) {
+ pdu->flags |= ISCSI_FLAG_CMD_FINAL;
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ pdu->flags |= ISCSI_FLAG_DATA_ACK;
+
+ seq->next_burst_len = 0;
+ seq_send_order++;
+ } else
+ seq->next_burst_len += pdu->length;
+
+ if ((read_data_done + pdu->length) == cmd->data_length)
+ pdu->flags |= ISCSI_FLAG_DATA_STATUS;
+
+ pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
+ if (!dr->recovery) {
+ cmd->seq_send_order = seq_send_order;
+ cmd->read_data_done += pdu->length;
+ } else {
+ dr->seq_send_order = seq_send_order;
+ dr->read_data_done += pdu->length;
+ }
+
+ datain->flags = pdu->flags;
+ datain->length = pdu->length;
+ datain->offset = pdu->offset;
+ datain->data_sn = pdu->data_sn;
+
+ if (!dr->recovery) {
+ if (datain->flags & ISCSI_FLAG_CMD_FINAL)
+ seq->last_datasn = datain->data_sn;
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS)
+ dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+ return dr;
+ }
+
+ if (!dr->runlength) {
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ } else {
+ if ((dr->begrun + dr->runlength) == dr->data_sn) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ }
+
+ return dr;
+}
+
+struct iscsi_datain_req *iscsit_get_datain_values(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain *datain)
+{
+ struct iscsi_conn *conn = cmd->conn;
+
+ if (conn->sess->sess_ops->DataSequenceInOrder &&
+ conn->sess->sess_ops->DataPDUInOrder)
+ return iscsit_set_datain_values_yes_and_yes(cmd, datain);
+ else if (!conn->sess->sess_ops->DataSequenceInOrder &&
+ conn->sess->sess_ops->DataPDUInOrder)
+ return iscsit_set_datain_values_no_and_yes(cmd, datain);
+ else if (conn->sess->sess_ops->DataSequenceInOrder &&
+ !conn->sess->sess_ops->DataPDUInOrder)
+ return iscsit_set_datain_values_yes_and_no(cmd, datain);
+ else if (!conn->sess->sess_ops->DataSequenceInOrder &&
+ !conn->sess->sess_ops->DataPDUInOrder)
+ return iscsit_set_datain_values_no_and_no(cmd, datain);
+
+ return NULL;
+}
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.h b/drivers/target/iscsi/iscsi_target_datain_values.h
new file mode 100644
index 0000000..646429a
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_datain_values.h
@@ -0,0 +1,12 @@
+#ifndef ISCSI_TARGET_DATAIN_VALUES_H
+#define ISCSI_TARGET_DATAIN_VALUES_H
+
+extern struct iscsi_datain_req *iscsit_allocate_datain_req(void);
+extern void iscsit_attach_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
+extern void iscsit_free_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
+extern void iscsit_free_all_datain_reqs(struct iscsi_cmd *);
+extern struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *);
+extern struct iscsi_datain_req *iscsit_get_datain_values(struct iscsi_cmd *,
+ struct iscsi_datain *);
+
+#endif /*** ISCSI_TARGET_DATAIN_VALUES_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
new file mode 100644
index 0000000..a19fa5e
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -0,0 +1,87 @@
+/*******************************************************************************
+ * This file contains the iSCSI Virtual Device and Disk Transport
+ * agnostic related functions.
+ *
+ \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <scsi/scsi_device.h>
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+
+int iscsit_get_lun_for_tmr(
+ struct iscsi_cmd *cmd,
+ u64 lun)
+{
+ u32 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ return transport_lookup_tmr_lun(&cmd->se_cmd, unpacked_lun);
+}
+
+int iscsit_get_lun_for_cmd(
+ struct iscsi_cmd *cmd,
+ unsigned char *cdb,
+ u64 lun)
+{
+ u32 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ return transport_lookup_cmd_lun(&cmd->se_cmd, unpacked_lun);
+}
+
+void iscsit_determine_maxcmdsn(struct iscsi_session *sess)
+{
+ struct se_node_acl *se_nacl;
+
+ /*
+ * This is a discovery session, the single queue slot was already
+ * assigned in iscsi_login_zero_tsih(). Since only Logout and
+ * Text Opcodes are allowed during discovery we do not have to worry
+ * about the HBA's queue depth here.
+ */
+ if (sess->sess_ops->SessionType)
+ return;
+
+ se_nacl = sess->se_sess->se_node_acl;
+
+ /*
+ * This is a normal session, set the Session's CmdSN window to the
+ * struct se_node_acl->queue_depth. The value in struct se_node_acl->queue_depth
+ * has already been validated as a legal value in
+ * core_set_queue_depth_for_node().
+ */
+ sess->cmdsn_window = se_nacl->queue_depth;
+ sess->max_cmd_sn = (sess->max_cmd_sn + se_nacl->queue_depth) - 1;
+}
+
+void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess)
+{
+ if (cmd->immediate_cmd || cmd->maxcmdsn_inc)
+ return;
+
+ cmd->maxcmdsn_inc = 1;
+
+ mutex_lock(&sess->cmdsn_mutex);
+ sess->max_cmd_sn += 1;
+ pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
+ mutex_unlock(&sess->cmdsn_mutex);
+}
diff --git a/drivers/target/iscsi/iscsi_target_device.h b/drivers/target/iscsi/iscsi_target_device.h
new file mode 100644
index 0000000..bef1cad
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_device.h
@@ -0,0 +1,9 @@
+#ifndef ISCSI_TARGET_DEVICE_H
+#define ISCSI_TARGET_DEVICE_H
+
+extern int iscsit_get_lun_for_tmr(struct iscsi_cmd *, u64);
+extern int iscsit_get_lun_for_cmd(struct iscsi_cmd *, unsigned char *, u64);
+extern void iscsit_determine_maxcmdsn(struct iscsi_session *);
+extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
+
+#endif /* ISCSI_TARGET_DEVICE_H */
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
new file mode 100644
index 0000000..b7ffc3c
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -0,0 +1,1004 @@
+/******************************************************************************
+ * This file contains error recovery level zero functions used by
+ * the iSCSI Target driver.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_tq.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+
+/*
+ * Used to set values in struct iscsi_cmd that iscsit_dataout_check_sequence()
+ * checks against to determine a PDU's Offset+Length is within the current
+ * DataOUT Sequence. Used for DataSequenceInOrder=Yes only.
+ */
+void iscsit_set_dataout_sequence_values(
+ struct iscsi_cmd *cmd)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ /*
+ * Still set seq_start_offset and seq_end_offset for Unsolicited
+ * DataOUT, even if DataSequenceInOrder=No.
+ */
+ if (cmd->unsolicited_data) {
+ cmd->seq_start_offset = cmd->write_data_done;
+ cmd->seq_end_offset = (cmd->write_data_done +
+ (cmd->data_length >
+ conn->sess->sess_ops->FirstBurstLength) ?
+ conn->sess->sess_ops->FirstBurstLength : cmd->data_length);
+ return;
+ }
+
+ if (!conn->sess->sess_ops->DataSequenceInOrder)
+ return;
+
+ if (!cmd->seq_start_offset && !cmd->seq_end_offset) {
+ cmd->seq_start_offset = cmd->write_data_done;
+ cmd->seq_end_offset = (cmd->data_length >
+ conn->sess->sess_ops->MaxBurstLength) ?
+ (cmd->write_data_done +
+ conn->sess->sess_ops->MaxBurstLength) : cmd->data_length;
+ } else {
+ cmd->seq_start_offset = cmd->seq_end_offset;
+ cmd->seq_end_offset = ((cmd->seq_end_offset +
+ conn->sess->sess_ops->MaxBurstLength) >=
+ cmd->data_length) ? cmd->data_length :
+ (cmd->seq_end_offset +
+ conn->sess->sess_ops->MaxBurstLength);
+ }
+}
+
+static int iscsit_dataout_within_command_recovery_check(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ /*
+ * We do the within-command recovery checks here as it is
+ * the first function called in iscsi_check_pre_dataout().
+ * Basically, if we are in within-command recovery and
+ * the PDU does not contain the offset the sequence needs,
+ * dump the payload.
+ *
+ * This only applies to DataPDUInOrder=Yes, for
+ * DataPDUInOrder=No we only re-request the failed PDU
+ * and check that all PDUs in a sequence are received
+ * upon end of sequence.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if ((cmd->cmd_flags & ICF_WITHIN_COMMAND_RECOVERY) &&
+ (cmd->write_data_done != hdr->offset))
+ goto dump;
+
+ cmd->cmd_flags &= ~ICF_WITHIN_COMMAND_RECOVERY;
+ } else {
+ struct iscsi_seq *seq;
+
+ seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length);
+ if (!seq)
+ return DATAOUT_CANNOT_RECOVER;
+ /*
+ * Set the struct iscsi_seq pointer to reuse later.
+ */
+ cmd->seq_ptr = seq;
+
+ if (conn->sess->sess_ops->DataPDUInOrder) {
+ if ((seq->status ==
+ DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) &&
+ ((seq->offset != hdr->offset) ||
+ (seq->data_sn != hdr->datasn)))
+ goto dump;
+ } else {
+ if ((seq->status ==
+ DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) &&
+ (seq->data_sn != hdr->datasn))
+ goto dump;
+ }
+
+ if (seq->status == DATAOUT_SEQUENCE_COMPLETE)
+ goto dump;
+
+ if (seq->status != DATAOUT_SEQUENCE_COMPLETE)
+ seq->status = 0;
+ }
+
+ return DATAOUT_NORMAL;
+
+dump:
+ pr_err("Dumping DataOUT PDU Offset: %u Length: %d DataSN:"
+ " 0x%08x\n", hdr->offset, payload_length, hdr->datasn);
+ return iscsit_dump_data_payload(conn, payload_length, 1);
+}
+
+static int iscsit_dataout_check_unsolicited_sequence(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ u32 first_burst_len;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+
+ if ((hdr->offset < cmd->seq_start_offset) ||
+ ((hdr->offset + payload_length) > cmd->seq_end_offset)) {
+ pr_err("Command ITT: 0x%08x with Offset: %u,"
+ " Length: %u outside of Unsolicited Sequence %u:%u while"
+ " DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
+ hdr->offset, payload_length, cmd->seq_start_offset,
+ cmd->seq_end_offset);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+ first_burst_len = (cmd->first_burst_len + payload_length);
+
+ if (first_burst_len > conn->sess->sess_ops->FirstBurstLength) {
+ pr_err("Total %u bytes exceeds FirstBurstLength: %u"
+ " for this Unsolicited DataOut Burst.\n",
+ first_burst_len, conn->sess->sess_ops->FirstBurstLength);
+ transport_send_check_condition_and_sense(&cmd->se_cmd,
+ TCM_INCORRECT_AMOUNT_OF_DATA, 0);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+ /*
+ * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity
+ * checks for the current Unsolicited DataOUT Sequence.
+ */
+ if (hdr->flags & ISCSI_FLAG_CMD_FINAL) {
+ /*
+ * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of
+ * sequence checks are handled in
+ * iscsit_dataout_datapduinorder_no_fbit().
+ */
+ if (!conn->sess->sess_ops->DataPDUInOrder)
+ goto out;
+
+ if ((first_burst_len != cmd->data_length) &&
+ (first_burst_len != conn->sess->sess_ops->FirstBurstLength)) {
+ pr_err("Unsolicited non-immediate data"
+ " received %u does not equal FirstBurstLength: %u, and"
+ " does not equal ExpXferLen %u.\n", first_burst_len,
+ conn->sess->sess_ops->FirstBurstLength,
+ cmd->data_length);
+ transport_send_check_condition_and_sense(&cmd->se_cmd,
+ TCM_INCORRECT_AMOUNT_OF_DATA, 0);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ } else {
+ if (first_burst_len == conn->sess->sess_ops->FirstBurstLength) {
+ pr_err("Command ITT: 0x%08x reached"
+ " FirstBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
+ " error.\n", cmd->init_task_tag,
+ conn->sess->sess_ops->FirstBurstLength);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ if (first_burst_len == cmd->data_length) {
+ pr_err("Command ITT: 0x%08x reached"
+ " ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
+ " error.\n", cmd->init_task_tag, cmd->data_length);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ }
+
+out:
+ return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_check_sequence(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ u32 next_burst_len;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_seq *seq = NULL;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ /*
+ * For DataSequenceInOrder=Yes: Check that the offset and offset+length
+ * is within range as defined by iscsi_set_dataout_sequence_values().
+ *
+ * For DataSequenceInOrder=No: Check that an struct iscsi_seq exists for
+ * offset+length tuple.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ /*
+ * Due to possibility of recovery DataOUT sent by the initiator
+ * fullfilling an Recovery R2T, it's best to just dump the
+ * payload here, instead of erroring out.
+ */
+ if ((hdr->offset < cmd->seq_start_offset) ||
+ ((hdr->offset + payload_length) > cmd->seq_end_offset)) {
+ pr_err("Command ITT: 0x%08x with Offset: %u,"
+ " Length: %u outside of Sequence %u:%u while"
+ " DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
+ hdr->offset, payload_length, cmd->seq_start_offset,
+ cmd->seq_end_offset);
+
+ if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+ return DATAOUT_WITHIN_COMMAND_RECOVERY;
+ }
+
+ next_burst_len = (cmd->next_burst_len + payload_length);
+ } else {
+ seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length);
+ if (!seq)
+ return DATAOUT_CANNOT_RECOVER;
+ /*
+ * Set the struct iscsi_seq pointer to reuse later.
+ */
+ cmd->seq_ptr = seq;
+
+ if (seq->status == DATAOUT_SEQUENCE_COMPLETE) {
+ if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+ return DATAOUT_WITHIN_COMMAND_RECOVERY;
+ }
+
+ next_burst_len = (seq->next_burst_len + payload_length);
+ }
+
+ if (next_burst_len > conn->sess->sess_ops->MaxBurstLength) {
+ pr_err("Command ITT: 0x%08x, NextBurstLength: %u and"
+ " Length: %u exceeds MaxBurstLength: %u. protocol"
+ " error.\n", cmd->init_task_tag,
+ (next_burst_len - payload_length),
+ payload_length, conn->sess->sess_ops->MaxBurstLength);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+ /*
+ * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity
+ * checks for the current DataOUT Sequence.
+ */
+ if (hdr->flags & ISCSI_FLAG_CMD_FINAL) {
+ /*
+ * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of
+ * sequence checks are handled in
+ * iscsit_dataout_datapduinorder_no_fbit().
+ */
+ if (!conn->sess->sess_ops->DataPDUInOrder)
+ goto out;
+
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if ((next_burst_len <
+ conn->sess->sess_ops->MaxBurstLength) &&
+ ((cmd->write_data_done + payload_length) <
+ cmd->data_length)) {
+ pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
+ " before end of DataOUT sequence, protocol"
+ " error.\n", cmd->init_task_tag);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ } else {
+ if (next_burst_len < seq->xfer_len) {
+ pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
+ " before end of DataOUT sequence, protocol"
+ " error.\n", cmd->init_task_tag);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ }
+ } else {
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if (next_burst_len ==
+ conn->sess->sess_ops->MaxBurstLength) {
+ pr_err("Command ITT: 0x%08x reached"
+ " MaxBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is"
+ " not set, protocol error.", cmd->init_task_tag,
+ conn->sess->sess_ops->MaxBurstLength);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ if ((cmd->write_data_done + payload_length) ==
+ cmd->data_length) {
+ pr_err("Command ITT: 0x%08x reached"
+ " last DataOUT PDU in sequence but ISCSI_FLAG_"
+ "CMD_FINAL is not set, protocol error.\n",
+ cmd->init_task_tag);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ } else {
+ if (next_burst_len == seq->xfer_len) {
+ pr_err("Command ITT: 0x%08x reached"
+ " last DataOUT PDU in sequence but ISCSI_FLAG_"
+ "CMD_FINAL is not set, protocol error.\n",
+ cmd->init_task_tag);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ }
+ }
+
+out:
+ return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_check_datasn(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ int dump = 0, recovery = 0;
+ u32 data_sn = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ /*
+ * Considering the target has no method of re-requesting DataOUT
+ * by DataSN, if we receieve a greater DataSN than expected we
+ * assume the functions for DataPDUInOrder=[Yes,No] below will
+ * handle it.
+ *
+ * If the DataSN is less than expected, dump the payload.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder)
+ data_sn = cmd->data_sn;
+ else {
+ struct iscsi_seq *seq = cmd->seq_ptr;
+ data_sn = seq->data_sn;
+ }
+
+ if (hdr->datasn > data_sn) {
+ pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
+ " higher than expected 0x%08x.\n", cmd->init_task_tag,
+ hdr->datasn, data_sn);
+ recovery = 1;
+ goto recover;
+ } else if (hdr->datasn < data_sn) {
+ pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
+ " lower than expected 0x%08x, discarding payload.\n",
+ cmd->init_task_tag, hdr->datasn, data_sn);
+ dump = 1;
+ goto dump;
+ }
+
+ return DATAOUT_NORMAL;
+
+recover:
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to perform within-command recovery"
+ " while ERL=0.\n");
+ return DATAOUT_CANNOT_RECOVER;
+ }
+dump:
+ if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+
+ return (recovery || dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY :
+ DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_pre_datapduinorder_yes(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ int dump = 0, recovery = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ /*
+ * For DataSequenceInOrder=Yes: If the offset is greater than the global
+ * DataPDUInOrder=Yes offset counter in struct iscsi_cmd a protcol error has
+ * occured and fail the connection.
+ *
+ * For DataSequenceInOrder=No: If the offset is greater than the per
+ * sequence DataPDUInOrder=Yes offset counter in struct iscsi_seq a protocol
+ * error has occured and fail the connection.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if (hdr->offset != cmd->write_data_done) {
+ pr_err("Command ITT: 0x%08x, received offset"
+ " %u different than expected %u.\n", cmd->init_task_tag,
+ hdr->offset, cmd->write_data_done);
+ recovery = 1;
+ goto recover;
+ }
+ } else {
+ struct iscsi_seq *seq = cmd->seq_ptr;
+
+ if (hdr->offset > seq->offset) {
+ pr_err("Command ITT: 0x%08x, received offset"
+ " %u greater than expected %u.\n", cmd->init_task_tag,
+ hdr->offset, seq->offset);
+ recovery = 1;
+ goto recover;
+ } else if (hdr->offset < seq->offset) {
+ pr_err("Command ITT: 0x%08x, received offset"
+ " %u less than expected %u, discarding payload.\n",
+ cmd->init_task_tag, hdr->offset, seq->offset);
+ dump = 1;
+ goto dump;
+ }
+ }
+
+ return DATAOUT_NORMAL;
+
+recover:
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to perform within-command recovery"
+ " while ERL=0.\n");
+ return DATAOUT_CANNOT_RECOVER;
+ }
+dump:
+ if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+
+ return (recovery) ? iscsit_recover_dataout_sequence(cmd,
+ hdr->offset, payload_length) :
+ (dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_pre_datapduinorder_no(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ struct iscsi_pdu *pdu;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ pdu = iscsit_get_pdu_holder(cmd, hdr->offset, payload_length);
+ if (!pdu)
+ return DATAOUT_CANNOT_RECOVER;
+
+ cmd->pdu_ptr = pdu;
+
+ switch (pdu->status) {
+ case ISCSI_PDU_NOT_RECEIVED:
+ case ISCSI_PDU_CRC_FAILED:
+ case ISCSI_PDU_TIMED_OUT:
+ break;
+ case ISCSI_PDU_RECEIVED_OK:
+ pr_err("Command ITT: 0x%08x received already gotten"
+ " Offset: %u, Length: %u\n", cmd->init_task_tag,
+ hdr->offset, payload_length);
+ return iscsit_dump_data_payload(cmd->conn, payload_length, 1);
+ default:
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+ return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_update_r2t(struct iscsi_cmd *cmd, u32 offset, u32 length)
+{
+ struct iscsi_r2t *r2t;
+
+ if (cmd->unsolicited_data)
+ return 0;
+
+ r2t = iscsit_get_r2t_for_eos(cmd, offset, length);
+ if (!r2t)
+ return -1;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ r2t->seq_complete = 1;
+ cmd->outstanding_r2ts--;
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ return 0;
+}
+
+static int iscsit_dataout_update_datapduinorder_no(
+ struct iscsi_cmd *cmd,
+ u32 data_sn,
+ int f_bit)
+{
+ int ret = 0;
+ struct iscsi_pdu *pdu = cmd->pdu_ptr;
+
+ pdu->data_sn = data_sn;
+
+ switch (pdu->status) {
+ case ISCSI_PDU_NOT_RECEIVED:
+ pdu->status = ISCSI_PDU_RECEIVED_OK;
+ break;
+ case ISCSI_PDU_CRC_FAILED:
+ pdu->status = ISCSI_PDU_RECEIVED_OK;
+ break;
+ case ISCSI_PDU_TIMED_OUT:
+ pdu->status = ISCSI_PDU_RECEIVED_OK;
+ break;
+ default:
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+ if (f_bit) {
+ ret = iscsit_dataout_datapduinorder_no_fbit(cmd, pdu);
+ if (ret == DATAOUT_CANNOT_RECOVER)
+ return ret;
+ }
+
+ return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_post_crc_passed(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ int ret, send_r2t = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_seq *seq = NULL;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ if (cmd->unsolicited_data) {
+ if ((cmd->first_burst_len + payload_length) ==
+ conn->sess->sess_ops->FirstBurstLength) {
+ if (iscsit_dataout_update_r2t(cmd, hdr->offset,
+ payload_length) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+ send_r2t = 1;
+ }
+
+ if (!conn->sess->sess_ops->DataPDUInOrder) {
+ ret = iscsit_dataout_update_datapduinorder_no(cmd,
+ hdr->datasn, (hdr->flags & ISCSI_FLAG_CMD_FINAL));
+ if (ret == DATAOUT_CANNOT_RECOVER)
+ return ret;
+ }
+
+ cmd->first_burst_len += payload_length;
+
+ if (conn->sess->sess_ops->DataSequenceInOrder)
+ cmd->data_sn++;
+ else {
+ seq = cmd->seq_ptr;
+ seq->data_sn++;
+ seq->offset += payload_length;
+ }
+
+ if (send_r2t) {
+ if (seq)
+ seq->status = DATAOUT_SEQUENCE_COMPLETE;
+ cmd->first_burst_len = 0;
+ cmd->unsolicited_data = 0;
+ }
+ } else {
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if ((cmd->next_burst_len + payload_length) ==
+ conn->sess->sess_ops->MaxBurstLength) {
+ if (iscsit_dataout_update_r2t(cmd, hdr->offset,
+ payload_length) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+ send_r2t = 1;
+ }
+
+ if (!conn->sess->sess_ops->DataPDUInOrder) {
+ ret = iscsit_dataout_update_datapduinorder_no(
+ cmd, hdr->datasn,
+ (hdr->flags & ISCSI_FLAG_CMD_FINAL));
+ if (ret == DATAOUT_CANNOT_RECOVER)
+ return ret;
+ }
+
+ cmd->next_burst_len += payload_length;
+ cmd->data_sn++;
+
+ if (send_r2t)
+ cmd->next_burst_len = 0;
+ } else {
+ seq = cmd->seq_ptr;
+
+ if ((seq->next_burst_len + payload_length) ==
+ seq->xfer_len) {
+ if (iscsit_dataout_update_r2t(cmd, hdr->offset,
+ payload_length) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+ send_r2t = 1;
+ }
+
+ if (!conn->sess->sess_ops->DataPDUInOrder) {
+ ret = iscsit_dataout_update_datapduinorder_no(
+ cmd, hdr->datasn,
+ (hdr->flags & ISCSI_FLAG_CMD_FINAL));
+ if (ret == DATAOUT_CANNOT_RECOVER)
+ return ret;
+ }
+
+ seq->data_sn++;
+ seq->offset += payload_length;
+ seq->next_burst_len += payload_length;
+
+ if (send_r2t) {
+ seq->next_burst_len = 0;
+ seq->status = DATAOUT_SEQUENCE_COMPLETE;
+ }
+ }
+ }
+
+ if (send_r2t && conn->sess->sess_ops->DataSequenceInOrder)
+ cmd->data_sn = 0;
+
+ cmd->write_data_done += payload_length;
+
+ return (cmd->write_data_done == cmd->data_length) ?
+ DATAOUT_SEND_TO_TRANSPORT : (send_r2t) ?
+ DATAOUT_SEND_R2T : DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_post_crc_failed(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_pdu *pdu;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ if (conn->sess->sess_ops->DataPDUInOrder)
+ goto recover;
+ /*
+ * The rest of this function is only called when DataPDUInOrder=No.
+ */
+ pdu = cmd->pdu_ptr;
+
+ switch (pdu->status) {
+ case ISCSI_PDU_NOT_RECEIVED:
+ pdu->status = ISCSI_PDU_CRC_FAILED;
+ break;
+ case ISCSI_PDU_CRC_FAILED:
+ break;
+ case ISCSI_PDU_TIMED_OUT:
+ pdu->status = ISCSI_PDU_CRC_FAILED;
+ break;
+ default:
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+recover:
+ return iscsit_recover_dataout_sequence(cmd, hdr->offset, payload_length);
+}
+
+/*
+ * Called from iscsit_handle_data_out() before DataOUT Payload is received
+ * and CRC computed.
+ */
+extern int iscsit_check_pre_dataout(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ int ret;
+ struct iscsi_conn *conn = cmd->conn;
+
+ ret = iscsit_dataout_within_command_recovery_check(cmd, buf);
+ if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
+ (ret == DATAOUT_CANNOT_RECOVER))
+ return ret;
+
+ ret = iscsit_dataout_check_datasn(cmd, buf);
+ if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
+ (ret == DATAOUT_CANNOT_RECOVER))
+ return ret;
+
+ if (cmd->unsolicited_data) {
+ ret = iscsit_dataout_check_unsolicited_sequence(cmd, buf);
+ if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
+ (ret == DATAOUT_CANNOT_RECOVER))
+ return ret;
+ } else {
+ ret = iscsit_dataout_check_sequence(cmd, buf);
+ if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
+ (ret == DATAOUT_CANNOT_RECOVER))
+ return ret;
+ }
+
+ return (conn->sess->sess_ops->DataPDUInOrder) ?
+ iscsit_dataout_pre_datapduinorder_yes(cmd, buf) :
+ iscsit_dataout_pre_datapduinorder_no(cmd, buf);
+}
+
+/*
+ * Called from iscsit_handle_data_out() after DataOUT Payload is received
+ * and CRC computed.
+ */
+int iscsit_check_post_dataout(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf,
+ u8 data_crc_failed)
+{
+ struct iscsi_conn *conn = cmd->conn;
+
+ cmd->dataout_timeout_retries = 0;
+
+ if (!data_crc_failed)
+ return iscsit_dataout_post_crc_passed(cmd, buf);
+ else {
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from DataOUT CRC"
+ " failure while ERL=0, closing session.\n");
+ iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
+ 1, 0, buf, cmd);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+ iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
+ 0, 0, buf, cmd);
+ return iscsit_dataout_post_crc_failed(cmd, buf);
+ }
+}
+
+static void iscsit_handle_time2retain_timeout(unsigned long data)
+{
+ struct iscsi_session *sess = (struct iscsi_session *) data;
+ struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+
+ spin_lock_bh(&se_tpg->session_lock);
+ if (sess->time2retain_timer_flags & ISCSI_TF_STOP) {
+ spin_unlock_bh(&se_tpg->session_lock);
+ return;
+ }
+ if (atomic_read(&sess->session_reinstatement)) {
+ pr_err("Exiting Time2Retain handler because"
+ " session_reinstatement=1\n");
+ spin_unlock_bh(&se_tpg->session_lock);
+ return;
+ }
+ sess->time2retain_timer_flags |= ISCSI_TF_EXPIRED;
+
+ pr_err("Time2Retain timer expired for SID: %u, cleaning up"
+ " iSCSI session.\n", sess->sid);
+ {
+ struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+ if (tiqn) {
+ spin_lock(&tiqn->sess_err_stats.lock);
+ strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
+ (void *)sess->sess_ops->InitiatorName);
+ tiqn->sess_err_stats.last_sess_failure_type =
+ ISCSI_SESS_ERR_CXN_TIMEOUT;
+ tiqn->sess_err_stats.cxn_timeout_errors++;
+ sess->conn_timeout_errors++;
+ spin_unlock(&tiqn->sess_err_stats.lock);
+ }
+ }
+
+ spin_unlock_bh(&se_tpg->session_lock);
+ iscsit_close_session(sess);
+}
+
+extern void iscsit_start_time2retain_handler(struct iscsi_session *sess)
+{
+ int tpg_active;
+ /*
+ * Only start Time2Retain timer when the assoicated TPG is still in
+ * an ACTIVE (eg: not disabled or shutdown) state.
+ */
+ spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock);
+ tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE);
+ spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock);
+
+ if (!tpg_active)
+ return;
+
+ if (sess->time2retain_timer_flags & ISCSI_TF_RUNNING)
+ return;
+
+ pr_debug("Starting Time2Retain timer for %u seconds on"
+ " SID: %u\n", sess->sess_ops->DefaultTime2Retain, sess->sid);
+
+ init_timer(&sess->time2retain_timer);
+ sess->time2retain_timer.expires =
+ (get_jiffies_64() + sess->sess_ops->DefaultTime2Retain * HZ);
+ sess->time2retain_timer.data = (unsigned long)sess;
+ sess->time2retain_timer.function = iscsit_handle_time2retain_timeout;
+ sess->time2retain_timer_flags &= ~ISCSI_TF_STOP;
+ sess->time2retain_timer_flags |= ISCSI_TF_RUNNING;
+ add_timer(&sess->time2retain_timer);
+}
+
+/*
+ * Called with spin_lock_bh(&struct se_portal_group->session_lock) held
+ */
+extern int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
+{
+ struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+
+ if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)
+ return -1;
+
+ if (!(sess->time2retain_timer_flags & ISCSI_TF_RUNNING))
+ return 0;
+
+ sess->time2retain_timer_flags |= ISCSI_TF_STOP;
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ del_timer_sync(&sess->time2retain_timer);
+
+ spin_lock_bh(&se_tpg->session_lock);
+ sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
+ pr_debug("Stopped Time2Retain Timer for SID: %u\n",
+ sess->sid);
+ return 0;
+}
+
+void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->state_lock);
+ if (atomic_read(&conn->connection_exit)) {
+ spin_unlock_bh(&conn->state_lock);
+ goto sleep;
+ }
+
+ if (atomic_read(&conn->transport_failed)) {
+ spin_unlock_bh(&conn->state_lock);
+ goto sleep;
+ }
+ spin_unlock_bh(&conn->state_lock);
+
+ iscsi_thread_set_force_reinstatement(conn);
+
+sleep:
+ wait_for_completion(&conn->conn_wait_rcfr_comp);
+ complete(&conn->conn_post_wait_comp);
+}
+
+void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
+{
+ spin_lock_bh(&conn->state_lock);
+ if (atomic_read(&conn->connection_exit)) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+
+ if (atomic_read(&conn->transport_failed)) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+
+ if (atomic_read(&conn->connection_reinstatement)) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+
+ if (iscsi_thread_set_force_reinstatement(conn) < 0) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+
+ atomic_set(&conn->connection_reinstatement, 1);
+ if (!sleep) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+
+ atomic_set(&conn->sleep_on_conn_wait_comp, 1);
+ spin_unlock_bh(&conn->state_lock);
+
+ wait_for_completion(&conn->conn_wait_comp);
+ complete(&conn->conn_post_wait_comp);
+}
+
+void iscsit_fall_back_to_erl0(struct iscsi_session *sess)
+{
+ pr_debug("Falling back to ErrorRecoveryLevel=0 for SID:"
+ " %u\n", sess->sid);
+
+ atomic_set(&sess->session_fall_back_to_erl0, 1);
+}
+
+static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+
+ if ((sess->sess_ops->ErrorRecoveryLevel == 2) &&
+ !atomic_read(&sess->session_reinstatement) &&
+ !atomic_read(&sess->session_fall_back_to_erl0))
+ iscsit_connection_recovery_transport_reset(conn);
+ else {
+ pr_debug("Performing cleanup for failed iSCSI"
+ " Connection ID: %hu from %s\n", conn->cid,
+ sess->sess_ops->InitiatorName);
+ iscsit_close_connection(conn);
+ }
+}
+
+extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->state_lock);
+ if (atomic_read(&conn->connection_exit)) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+ atomic_set(&conn->connection_exit, 1);
+
+ if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
+ spin_unlock_bh(&conn->state_lock);
+ iscsit_close_connection(conn);
+ return;
+ }
+
+ if (conn->conn_state == TARG_CONN_STATE_CLEANUP_WAIT) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+
+ pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
+ conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
+ spin_unlock_bh(&conn->state_lock);
+
+ iscsit_handle_connection_cleanup(conn);
+}
+
+/*
+ * This is the simple function that makes the magic of
+ * sync and steering happen in the follow paradoxical order:
+ *
+ * 0) Receive conn->of_marker (bytes left until next OFMarker)
+ * bytes into an offload buffer. When we pass the exact number
+ * of bytes in conn->of_marker, iscsit_dump_data_payload() and hence
+ * rx_data() will automatically receive the identical u32 marker
+ * values and store it in conn->of_marker_offset;
+ * 1) Now conn->of_marker_offset will contain the offset to the start
+ * of the next iSCSI PDU. Dump these remaining bytes into another
+ * offload buffer.
+ * 2) We are done!
+ * Next byte in the TCP stream will contain the next iSCSI PDU!
+ * Cool Huh?!
+ */
+int iscsit_recover_from_unknown_opcode(struct iscsi_conn *conn)
+{
+ /*
+ * Make sure the remaining bytes to next maker is a sane value.
+ */
+ if (conn->of_marker > (conn->conn_ops->OFMarkInt * 4)) {
+ pr_err("Remaining bytes to OFMarker: %u exceeds"
+ " OFMarkInt bytes: %u.\n", conn->of_marker,
+ conn->conn_ops->OFMarkInt * 4);
+ return -1;
+ }
+
+ pr_debug("Advancing %u bytes in TCP stream to get to the"
+ " next OFMarker.\n", conn->of_marker);
+
+ if (iscsit_dump_data_payload(conn, conn->of_marker, 0) < 0)
+ return -1;
+
+ /*
+ * Make sure the offset marker we retrived is a valid value.
+ */
+ if (conn->of_marker_offset > (ISCSI_HDR_LEN + (ISCSI_CRC_LEN * 2) +
+ conn->conn_ops->MaxRecvDataSegmentLength)) {
+ pr_err("OfMarker offset value: %u exceeds limit.\n",
+ conn->of_marker_offset);
+ return -1;
+ }
+
+ pr_debug("Discarding %u bytes of TCP stream to get to the"
+ " next iSCSI Opcode.\n", conn->of_marker_offset);
+
+ if (iscsit_dump_data_payload(conn, conn->of_marker_offset, 0) < 0)
+ return -1;
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h
new file mode 100644
index 0000000..21acc9a
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl0.h
@@ -0,0 +1,15 @@
+#ifndef ISCSI_TARGET_ERL0_H
+#define ISCSI_TARGET_ERL0_H
+
+extern void iscsit_set_dataout_sequence_values(struct iscsi_cmd *);
+extern int iscsit_check_pre_dataout(struct iscsi_cmd *, unsigned char *);
+extern int iscsit_check_post_dataout(struct iscsi_cmd *, unsigned char *, u8);
+extern void iscsit_start_time2retain_handler(struct iscsi_session *);
+extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
+extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
+extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
+extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
+extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
+extern int iscsit_recover_from_unknown_opcode(struct iscsi_conn *);
+
+#endif /*** ISCSI_TARGET_ERL0_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
new file mode 100644
index 0000000..c4c68da
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -0,0 +1,1299 @@
+/*******************************************************************************
+ * This file contains error recovery level one used by the iSCSI Target driver.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/list.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target.h"
+
+#define OFFLOAD_BUF_SIZE 32768
+
+/*
+ * Used to dump excess datain payload for certain error recovery
+ * situations. Receive in OFFLOAD_BUF_SIZE max of datain per rx_data().
+ *
+ * dump_padding_digest denotes if padding and data digests need
+ * to be dumped.
+ */
+int iscsit_dump_data_payload(
+ struct iscsi_conn *conn,
+ u32 buf_len,
+ int dump_padding_digest)
+{
+ char *buf, pad_bytes[4];
+ int ret = DATAOUT_WITHIN_COMMAND_RECOVERY, rx_got;
+ u32 length, padding, offset = 0, size;
+ struct kvec iov;
+
+ length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len;
+
+ buf = kzalloc(length, GFP_ATOMIC);
+ if (!buf) {
+ pr_err("Unable to allocate %u bytes for offload"
+ " buffer.\n", length);
+ return -1;
+ }
+ memset(&iov, 0, sizeof(struct kvec));
+
+ while (offset < buf_len) {
+ size = ((offset + length) > buf_len) ?
+ (buf_len - offset) : length;
+
+ iov.iov_len = size;
+ iov.iov_base = buf;
+
+ rx_got = rx_data(conn, &iov, 1, size);
+ if (rx_got != size) {
+ ret = DATAOUT_CANNOT_RECOVER;
+ goto out;
+ }
+
+ offset += size;
+ }
+
+ if (!dump_padding_digest)
+ goto out;
+
+ padding = ((-buf_len) & 3);
+ if (padding != 0) {
+ iov.iov_len = padding;
+ iov.iov_base = pad_bytes;
+
+ rx_got = rx_data(conn, &iov, 1, padding);
+ if (rx_got != padding) {
+ ret = DATAOUT_CANNOT_RECOVER;
+ goto out;
+ }
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ u32 data_crc;
+
+ iov.iov_len = ISCSI_CRC_LEN;
+ iov.iov_base = &data_crc;
+
+ rx_got = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
+ if (rx_got != ISCSI_CRC_LEN) {
+ ret = DATAOUT_CANNOT_RECOVER;
+ goto out;
+ }
+ }
+
+out:
+ kfree(buf);
+ return ret;
+}
+
+/*
+ * Used for retransmitting R2Ts from a R2T SNACK request.
+ */
+static int iscsit_send_recovery_r2t_for_snack(
+ struct iscsi_cmd *cmd,
+ struct iscsi_r2t *r2t)
+{
+ /*
+ * If the struct iscsi_r2t has not been sent yet, we can safely
+ * ignore retransmission
+ * of the R2TSN in question.
+ */
+ spin_lock_bh(&cmd->r2t_lock);
+ if (!r2t->sent_r2t) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return 0;
+ }
+ r2t->sent_r2t = 0;
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
+
+ return 0;
+}
+
+static int iscsit_handle_r2t_snack(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf,
+ u32 begrun,
+ u32 runlength)
+{
+ u32 last_r2tsn;
+ struct iscsi_r2t *r2t;
+
+ /*
+ * Make sure the initiator is not requesting retransmission
+ * of R2TSNs already acknowledged by a TMR TASK_REASSIGN.
+ */
+ if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
+ (begrun <= cmd->acked_data_sn)) {
+ pr_err("ITT: 0x%08x, R2T SNACK requesting"
+ " retransmission of R2TSN: 0x%08x to 0x%08x but already"
+ " acked to R2TSN: 0x%08x by TMR TASK_REASSIGN,"
+ " protocol error.\n", cmd->init_task_tag, begrun,
+ (begrun + runlength), cmd->acked_data_sn);
+
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+ }
+
+ if (runlength) {
+ if ((begrun + runlength) > cmd->r2t_sn) {
+ pr_err("Command ITT: 0x%08x received R2T SNACK"
+ " with BegRun: 0x%08x, RunLength: 0x%08x, exceeds"
+ " current R2TSN: 0x%08x, protocol error.\n",
+ cmd->init_task_tag, begrun, runlength, cmd->r2t_sn);
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_INVALID, 1, 0, buf, cmd);
+ }
+ last_r2tsn = (begrun + runlength);
+ } else
+ last_r2tsn = cmd->r2t_sn;
+
+ while (begrun < last_r2tsn) {
+ r2t = iscsit_get_holder_for_r2tsn(cmd, begrun);
+ if (!r2t)
+ return -1;
+ if (iscsit_send_recovery_r2t_for_snack(cmd, r2t) < 0)
+ return -1;
+
+ begrun++;
+ }
+
+ return 0;
+}
+
+/*
+ * Generates Offsets and NextBurstLength based on Begrun and Runlength
+ * carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN.
+ *
+ * For DataSequenceInOrder=Yes and DataPDUInOrder=[Yes,No] only.
+ *
+ * FIXME: How is this handled for a RData SNACK?
+ */
+int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain_req *dr)
+{
+ u32 data_sn = 0, data_sn_count = 0;
+ u32 pdu_start = 0, seq_no = 0;
+ u32 begrun = dr->begrun;
+ struct iscsi_conn *conn = cmd->conn;
+
+ while (begrun > data_sn++) {
+ data_sn_count++;
+ if ((dr->next_burst_len +
+ conn->conn_ops->MaxRecvDataSegmentLength) <
+ conn->sess->sess_ops->MaxBurstLength) {
+ dr->read_data_done +=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ dr->next_burst_len +=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ } else {
+ dr->read_data_done +=
+ (conn->sess->sess_ops->MaxBurstLength -
+ dr->next_burst_len);
+ dr->next_burst_len = 0;
+ pdu_start += data_sn_count;
+ data_sn_count = 0;
+ seq_no++;
+ }
+ }
+
+ if (!conn->sess->sess_ops->DataPDUInOrder) {
+ cmd->seq_no = seq_no;
+ cmd->pdu_start = pdu_start;
+ cmd->pdu_send_order = data_sn_count;
+ }
+
+ return 0;
+}
+
+/*
+ * Generates Offsets and NextBurstLength based on Begrun and Runlength
+ * carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN.
+ *
+ * For DataSequenceInOrder=No and DataPDUInOrder=[Yes,No] only.
+ *
+ * FIXME: How is this handled for a RData SNACK?
+ */
+int iscsit_create_recovery_datain_values_datasequenceinorder_no(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain_req *dr)
+{
+ int found_seq = 0, i;
+ u32 data_sn, read_data_done = 0, seq_send_order = 0;
+ u32 begrun = dr->begrun;
+ u32 runlength = dr->runlength;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_seq *first_seq = NULL, *seq = NULL;
+
+ if (!cmd->seq_list) {
+ pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+ return -1;
+ }
+
+ /*
+ * Calculate read_data_done for all sequences containing a
+ * first_datasn and last_datasn less than the BegRun.
+ *
+ * Locate the struct iscsi_seq the BegRun lies within and calculate
+ * NextBurstLenghth up to the DataSN based on MaxRecvDataSegmentLength.
+ *
+ * Also use struct iscsi_seq->seq_send_order to determine where to start.
+ */
+ for (i = 0; i < cmd->seq_count; i++) {
+ seq = &cmd->seq_list[i];
+
+ if (!seq->seq_send_order)
+ first_seq = seq;
+
+ /*
+ * No data has been transferred for this DataIN sequence, so the
+ * seq->first_datasn and seq->last_datasn have not been set.
+ */
+ if (!seq->sent) {
+#if 0
+ pr_err("Ignoring non-sent sequence 0x%08x ->"
+ " 0x%08x\n\n", seq->first_datasn,
+ seq->last_datasn);
+#endif
+ continue;
+ }
+
+ /*
+ * This DataIN sequence is precedes the received BegRun, add the
+ * total xfer_len of the sequence to read_data_done and reset
+ * seq->pdu_send_order.
+ */
+ if ((seq->first_datasn < begrun) &&
+ (seq->last_datasn < begrun)) {
+#if 0
+ pr_err("Pre BegRun sequence 0x%08x ->"
+ " 0x%08x\n", seq->first_datasn,
+ seq->last_datasn);
+#endif
+ read_data_done += cmd->seq_list[i].xfer_len;
+ seq->next_burst_len = seq->pdu_send_order = 0;
+ continue;
+ }
+
+ /*
+ * The BegRun lies within this DataIN sequence.
+ */
+ if ((seq->first_datasn <= begrun) &&
+ (seq->last_datasn >= begrun)) {
+#if 0
+ pr_err("Found sequence begrun: 0x%08x in"
+ " 0x%08x -> 0x%08x\n", begrun,
+ seq->first_datasn, seq->last_datasn);
+#endif
+ seq_send_order = seq->seq_send_order;
+ data_sn = seq->first_datasn;
+ seq->next_burst_len = seq->pdu_send_order = 0;
+ found_seq = 1;
+
+ /*
+ * For DataPDUInOrder=Yes, while the first DataSN of
+ * the sequence is less than the received BegRun, add
+ * the MaxRecvDataSegmentLength to read_data_done and
+ * to the sequence's next_burst_len;
+ *
+ * For DataPDUInOrder=No, while the first DataSN of the
+ * sequence is less than the received BegRun, find the
+ * struct iscsi_pdu of the DataSN in question and add the
+ * MaxRecvDataSegmentLength to read_data_done and to the
+ * sequence's next_burst_len;
+ */
+ if (conn->sess->sess_ops->DataPDUInOrder) {
+ while (data_sn < begrun) {
+ seq->pdu_send_order++;
+ read_data_done +=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ seq->next_burst_len +=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ data_sn++;
+ }
+ } else {
+ int j;
+ struct iscsi_pdu *pdu;
+
+ while (data_sn < begrun) {
+ seq->pdu_send_order++;
+
+ for (j = 0; j < seq->pdu_count; j++) {
+ pdu = &cmd->pdu_list[
+ seq->pdu_start + j];
+ if (pdu->data_sn == data_sn) {
+ read_data_done +=
+ pdu->length;
+ seq->next_burst_len +=
+ pdu->length;
+ }
+ }
+ data_sn++;
+ }
+ }
+ continue;
+ }
+
+ /*
+ * This DataIN sequence is larger than the received BegRun,
+ * reset seq->pdu_send_order and continue.
+ */
+ if ((seq->first_datasn > begrun) ||
+ (seq->last_datasn > begrun)) {
+#if 0
+ pr_err("Post BegRun sequence 0x%08x -> 0x%08x\n",
+ seq->first_datasn, seq->last_datasn);
+#endif
+ seq->next_burst_len = seq->pdu_send_order = 0;
+ continue;
+ }
+ }
+
+ if (!found_seq) {
+ if (!begrun) {
+ if (!first_seq) {
+ pr_err("ITT: 0x%08x, Begrun: 0x%08x"
+ " but first_seq is NULL\n",
+ cmd->init_task_tag, begrun);
+ return -1;
+ }
+ seq_send_order = first_seq->seq_send_order;
+ seq->next_burst_len = seq->pdu_send_order = 0;
+ goto done;
+ }
+
+ pr_err("Unable to locate struct iscsi_seq for ITT: 0x%08x,"
+ " BegRun: 0x%08x, RunLength: 0x%08x while"
+ " DataSequenceInOrder=No and DataPDUInOrder=%s.\n",
+ cmd->init_task_tag, begrun, runlength,
+ (conn->sess->sess_ops->DataPDUInOrder) ? "Yes" : "No");
+ return -1;
+ }
+
+done:
+ dr->read_data_done = read_data_done;
+ dr->seq_send_order = seq_send_order;
+
+ return 0;
+}
+
+static int iscsit_handle_recovery_datain(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf,
+ u32 begrun,
+ u32 runlength)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_datain_req *dr;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ if (!atomic_read(&se_cmd->t_transport_complete)) {
+ pr_err("Ignoring ITT: 0x%08x Data SNACK\n",
+ cmd->init_task_tag);
+ return 0;
+ }
+
+ /*
+ * Make sure the initiator is not requesting retransmission
+ * of DataSNs already acknowledged by a Data ACK SNACK.
+ */
+ if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
+ (begrun <= cmd->acked_data_sn)) {
+ pr_err("ITT: 0x%08x, Data SNACK requesting"
+ " retransmission of DataSN: 0x%08x to 0x%08x but"
+ " already acked to DataSN: 0x%08x by Data ACK SNACK,"
+ " protocol error.\n", cmd->init_task_tag, begrun,
+ (begrun + runlength), cmd->acked_data_sn);
+
+ return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+ }
+
+ /*
+ * Make sure BegRun and RunLength in the Data SNACK are sane.
+ * Note: (cmd->data_sn - 1) will carry the maximum DataSN sent.
+ */
+ if ((begrun + runlength) > (cmd->data_sn - 1)) {
+ pr_err("Initiator requesting BegRun: 0x%08x, RunLength"
+ ": 0x%08x greater than maximum DataSN: 0x%08x.\n",
+ begrun, runlength, (cmd->data_sn - 1));
+ return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
+ 1, 0, buf, cmd);
+ }
+
+ dr = iscsit_allocate_datain_req();
+ if (!dr)
+ return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 0, buf, cmd);
+
+ dr->data_sn = dr->begrun = begrun;
+ dr->runlength = runlength;
+ dr->generate_recovery_values = 1;
+ dr->recovery = DATAIN_WITHIN_COMMAND_RECOVERY;
+
+ iscsit_attach_datain_req(cmd, dr);
+
+ cmd->i_state = ISTATE_SEND_DATAIN;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+ return 0;
+}
+
+int iscsit_handle_recovery_datain_or_r2t(
+ struct iscsi_conn *conn,
+ unsigned char *buf,
+ u32 init_task_tag,
+ u32 targ_xfer_tag,
+ u32 begrun,
+ u32 runlength)
+{
+ struct iscsi_cmd *cmd;
+
+ cmd = iscsit_find_cmd_from_itt(conn, init_task_tag);
+ if (!cmd)
+ return 0;
+
+ /*
+ * FIXME: This will not work for bidi commands.
+ */
+ switch (cmd->data_direction) {
+ case DMA_TO_DEVICE:
+ return iscsit_handle_r2t_snack(cmd, buf, begrun, runlength);
+ case DMA_FROM_DEVICE:
+ return iscsit_handle_recovery_datain(cmd, buf, begrun,
+ runlength);
+ default:
+ pr_err("Unknown cmd->data_direction: 0x%02x\n",
+ cmd->data_direction);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* #warning FIXME: Status SNACK needs to be dependent on OPCODE!!! */
+int iscsit_handle_status_snack(
+ struct iscsi_conn *conn,
+ u32 init_task_tag,
+ u32 targ_xfer_tag,
+ u32 begrun,
+ u32 runlength)
+{
+ struct iscsi_cmd *cmd = NULL;
+ u32 last_statsn;
+ int found_cmd;
+
+ if (conn->exp_statsn > begrun) {
+ pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:"
+ " 0x%08x but already got ExpStatSN: 0x%08x on CID:"
+ " %hu.\n", begrun, runlength, conn->exp_statsn,
+ conn->cid);
+ return 0;
+ }
+
+ last_statsn = (!runlength) ? conn->stat_sn : (begrun + runlength);
+
+ while (begrun < last_statsn) {
+ found_cmd = 0;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ if (cmd->stat_sn == begrun) {
+ found_cmd = 1;
+ break;
+ }
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (!found_cmd) {
+ pr_err("Unable to find StatSN: 0x%08x for"
+ " a Status SNACK, assuming this was a"
+ " protactic SNACK for an untransmitted"
+ " StatSN, ignoring.\n", begrun);
+ begrun++;
+ continue;
+ }
+
+ spin_lock_bh(&cmd->istate_lock);
+ if (cmd->i_state == ISTATE_SEND_DATAIN) {
+ spin_unlock_bh(&cmd->istate_lock);
+ pr_err("Ignoring Status SNACK for BegRun:"
+ " 0x%08x, RunLength: 0x%08x, assuming this was"
+ " a protactic SNACK for an untransmitted"
+ " StatSN\n", begrun, runlength);
+ begrun++;
+ continue;
+ }
+ spin_unlock_bh(&cmd->istate_lock);
+
+ cmd->i_state = ISTATE_SEND_STATUS_RECOVERY;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ begrun++;
+ }
+
+ return 0;
+}
+
+int iscsit_handle_data_ack(
+ struct iscsi_conn *conn,
+ u32 targ_xfer_tag,
+ u32 begrun,
+ u32 runlength)
+{
+ struct iscsi_cmd *cmd = NULL;
+
+ cmd = iscsit_find_cmd_from_ttt(conn, targ_xfer_tag);
+ if (!cmd) {
+ pr_err("Data ACK SNACK for TTT: 0x%08x is"
+ " invalid.\n", targ_xfer_tag);
+ return -1;
+ }
+
+ if (begrun <= cmd->acked_data_sn) {
+ pr_err("ITT: 0x%08x Data ACK SNACK BegRUN: 0x%08x is"
+ " less than the already acked DataSN: 0x%08x.\n",
+ cmd->init_task_tag, begrun, cmd->acked_data_sn);
+ return -1;
+ }
+
+ /*
+ * For Data ACK SNACK, BegRun is the next expected DataSN.
+ * (see iSCSI v19: 10.16.6)
+ */
+ cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
+ cmd->acked_data_sn = (begrun - 1);
+
+ pr_debug("Received Data ACK SNACK for ITT: 0x%08x,"
+ " updated acked DataSN to 0x%08x.\n",
+ cmd->init_task_tag, cmd->acked_data_sn);
+
+ return 0;
+}
+
+static int iscsit_send_recovery_r2t(
+ struct iscsi_cmd *cmd,
+ u32 offset,
+ u32 xfer_len)
+{
+ int ret;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ ret = iscsit_add_r2t_to_list(cmd, offset, xfer_len, 1, 0);
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ return ret;
+}
+
+int iscsit_dataout_datapduinorder_no_fbit(
+ struct iscsi_cmd *cmd,
+ struct iscsi_pdu *pdu)
+{
+ int i, send_recovery_r2t = 0, recovery = 0;
+ u32 length = 0, offset = 0, pdu_count = 0, xfer_len = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_pdu *first_pdu = NULL;
+
+ /*
+ * Get an struct iscsi_pdu pointer to the first PDU, and total PDU count
+ * of the DataOUT sequence.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ for (i = 0; i < cmd->pdu_count; i++) {
+ if (cmd->pdu_list[i].seq_no == pdu->seq_no) {
+ if (!first_pdu)
+ first_pdu = &cmd->pdu_list[i];
+ xfer_len += cmd->pdu_list[i].length;
+ pdu_count++;
+ } else if (pdu_count)
+ break;
+ }
+ } else {
+ struct iscsi_seq *seq = cmd->seq_ptr;
+
+ first_pdu = &cmd->pdu_list[seq->pdu_start];
+ pdu_count = seq->pdu_count;
+ }
+
+ if (!first_pdu || !pdu_count)
+ return DATAOUT_CANNOT_RECOVER;
+
+ /*
+ * Loop through the ending DataOUT Sequence checking each struct iscsi_pdu.
+ * The following ugly logic does batching of not received PDUs.
+ */
+ for (i = 0; i < pdu_count; i++) {
+ if (first_pdu[i].status == ISCSI_PDU_RECEIVED_OK) {
+ if (!send_recovery_r2t)
+ continue;
+
+ if (iscsit_send_recovery_r2t(cmd, offset, length) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+
+ send_recovery_r2t = length = offset = 0;
+ continue;
+ }
+ /*
+ * Set recovery = 1 for any missing, CRC failed, or timed
+ * out PDUs to let the DataOUT logic know that this sequence
+ * has not been completed yet.
+ *
+ * Also, only send a Recovery R2T for ISCSI_PDU_NOT_RECEIVED.
+ * We assume if the PDU either failed CRC or timed out
+ * that a Recovery R2T has already been sent.
+ */
+ recovery = 1;
+
+ if (first_pdu[i].status != ISCSI_PDU_NOT_RECEIVED)
+ continue;
+
+ if (!offset)
+ offset = first_pdu[i].offset;
+ length += first_pdu[i].length;
+
+ send_recovery_r2t = 1;
+ }
+
+ if (send_recovery_r2t)
+ if (iscsit_send_recovery_r2t(cmd, offset, length) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+
+ return (!recovery) ? DATAOUT_NORMAL : DATAOUT_WITHIN_COMMAND_RECOVERY;
+}
+
+static int iscsit_recalculate_dataout_values(
+ struct iscsi_cmd *cmd,
+ u32 pdu_offset,
+ u32 pdu_length,
+ u32 *r2t_offset,
+ u32 *r2t_length)
+{
+ int i;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_pdu *pdu = NULL;
+
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ cmd->data_sn = 0;
+
+ if (conn->sess->sess_ops->DataPDUInOrder) {
+ *r2t_offset = cmd->write_data_done;
+ *r2t_length = (cmd->seq_end_offset -
+ cmd->write_data_done);
+ return 0;
+ }
+
+ *r2t_offset = cmd->seq_start_offset;
+ *r2t_length = (cmd->seq_end_offset - cmd->seq_start_offset);
+
+ for (i = 0; i < cmd->pdu_count; i++) {
+ pdu = &cmd->pdu_list[i];
+
+ if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+ continue;
+
+ if ((pdu->offset >= cmd->seq_start_offset) &&
+ ((pdu->offset + pdu->length) <=
+ cmd->seq_end_offset)) {
+ if (!cmd->unsolicited_data)
+ cmd->next_burst_len -= pdu->length;
+ else
+ cmd->first_burst_len -= pdu->length;
+
+ cmd->write_data_done -= pdu->length;
+ pdu->status = ISCSI_PDU_NOT_RECEIVED;
+ }
+ }
+ } else {
+ struct iscsi_seq *seq = NULL;
+
+ seq = iscsit_get_seq_holder(cmd, pdu_offset, pdu_length);
+ if (!seq)
+ return -1;
+
+ *r2t_offset = seq->orig_offset;
+ *r2t_length = seq->xfer_len;
+
+ cmd->write_data_done -= (seq->offset - seq->orig_offset);
+ if (cmd->immediate_data)
+ cmd->first_burst_len = cmd->write_data_done;
+
+ seq->data_sn = 0;
+ seq->offset = seq->orig_offset;
+ seq->next_burst_len = 0;
+ seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
+
+ if (conn->sess->sess_ops->DataPDUInOrder)
+ return 0;
+
+ for (i = 0; i < seq->pdu_count; i++) {
+ pdu = &cmd->pdu_list[i+seq->pdu_start];
+
+ if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+ continue;
+
+ pdu->status = ISCSI_PDU_NOT_RECEIVED;
+ }
+ }
+
+ return 0;
+}
+
+int iscsit_recover_dataout_sequence(
+ struct iscsi_cmd *cmd,
+ u32 pdu_offset,
+ u32 pdu_length)
+{
+ u32 r2t_length = 0, r2t_offset = 0;
+
+ spin_lock_bh(&cmd->istate_lock);
+ cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY;
+ spin_unlock_bh(&cmd->istate_lock);
+
+ if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length,
+ &r2t_offset, &r2t_length) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+
+ iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length);
+
+ return DATAOUT_WITHIN_COMMAND_RECOVERY;
+}
+
+static struct iscsi_ooo_cmdsn *iscsit_allocate_ooo_cmdsn(void)
+{
+ struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL;
+
+ ooo_cmdsn = kmem_cache_zalloc(lio_ooo_cache, GFP_ATOMIC);
+ if (!ooo_cmdsn) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_ooo_cmdsn.\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(&ooo_cmdsn->ooo_list);
+
+ return ooo_cmdsn;
+}
+
+/*
+ * Called with sess->cmdsn_mutex held.
+ */
+static int iscsit_attach_ooo_cmdsn(
+ struct iscsi_session *sess,
+ struct iscsi_ooo_cmdsn *ooo_cmdsn)
+{
+ struct iscsi_ooo_cmdsn *ooo_tail, *ooo_tmp;
+ /*
+ * We attach the struct iscsi_ooo_cmdsn entry to the out of order
+ * list in increasing CmdSN order.
+ * This allows iscsi_execute_ooo_cmdsns() to detect any
+ * additional CmdSN holes while performing delayed execution.
+ */
+ if (list_empty(&sess->sess_ooo_cmdsn_list))
+ list_add_tail(&ooo_cmdsn->ooo_list,
+ &sess->sess_ooo_cmdsn_list);
+ else {
+ ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev,
+ typeof(*ooo_tail), ooo_list);
+ /*
+ * CmdSN is greater than the tail of the list.
+ */
+ if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn)
+ list_add_tail(&ooo_cmdsn->ooo_list,
+ &sess->sess_ooo_cmdsn_list);
+ else {
+ /*
+ * CmdSN is either lower than the head, or somewhere
+ * in the middle.
+ */
+ list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
+ ooo_list) {
+ if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
+ continue;
+
+ list_add(&ooo_cmdsn->ooo_list,
+ &ooo_tmp->ooo_list);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Removes an struct iscsi_ooo_cmdsn from a session's list,
+ * called with struct iscsi_session->cmdsn_mutex held.
+ */
+void iscsit_remove_ooo_cmdsn(
+ struct iscsi_session *sess,
+ struct iscsi_ooo_cmdsn *ooo_cmdsn)
+{
+ list_del(&ooo_cmdsn->ooo_list);
+ kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
+}
+
+void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
+{
+ struct iscsi_ooo_cmdsn *ooo_cmdsn;
+ struct iscsi_session *sess = conn->sess;
+
+ mutex_lock(&sess->cmdsn_mutex);
+ list_for_each_entry(ooo_cmdsn, &sess->sess_ooo_cmdsn_list, ooo_list) {
+ if (ooo_cmdsn->cid != conn->cid)
+ continue;
+
+ ooo_cmdsn->cmd = NULL;
+ }
+ mutex_unlock(&sess->cmdsn_mutex);
+}
+
+/*
+ * Called with sess->cmdsn_mutex held.
+ */
+int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess)
+{
+ int ooo_count = 0;
+ struct iscsi_cmd *cmd = NULL;
+ struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
+
+ list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
+ &sess->sess_ooo_cmdsn_list, ooo_list) {
+ if (ooo_cmdsn->cmdsn != sess->exp_cmd_sn)
+ continue;
+
+ if (!ooo_cmdsn->cmd) {
+ sess->exp_cmd_sn++;
+ iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
+ continue;
+ }
+
+ cmd = ooo_cmdsn->cmd;
+ cmd->i_state = cmd->deferred_i_state;
+ ooo_count++;
+ sess->exp_cmd_sn++;
+ pr_debug("Executing out of order CmdSN: 0x%08x,"
+ " incremented ExpCmdSN to 0x%08x.\n",
+ cmd->cmd_sn, sess->exp_cmd_sn);
+
+ iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
+
+ if (iscsit_execute_cmd(cmd, 1) < 0)
+ return -1;
+
+ continue;
+ }
+
+ return ooo_count;
+}
+
+/*
+ * Called either:
+ *
+ * 1. With sess->cmdsn_mutex held from iscsi_execute_ooo_cmdsns()
+ * or iscsi_check_received_cmdsn().
+ * 2. With no locks held directly from iscsi_handle_XXX_pdu() functions
+ * for immediate commands.
+ */
+int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ int lr = 0;
+
+ spin_lock_bh(&cmd->istate_lock);
+ if (ooo)
+ cmd->cmd_flags &= ~ICF_OOO_CMDSN;
+
+ switch (cmd->iscsi_opcode) {
+ case ISCSI_OP_SCSI_CMD:
+ /*
+ * Go ahead and send the CHECK_CONDITION status for
+ * any SCSI CDB exceptions that may have occurred, also
+ * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
+ */
+ if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
+ if (se_cmd->se_cmd_flags &
+ SCF_SCSI_RESERVATION_CONFLICT) {
+ cmd->i_state = ISTATE_SEND_STATUS;
+ spin_unlock_bh(&cmd->istate_lock);
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
+ cmd->i_state);
+ return 0;
+ }
+ spin_unlock_bh(&cmd->istate_lock);
+ /*
+ * Determine if delayed TASK_ABORTED status for WRITEs
+ * should be sent now if no unsolicited data out
+ * payloads are expected, or if the delayed status
+ * should be sent after unsolicited data out with
+ * ISCSI_FLAG_CMD_FINAL set in iscsi_handle_data_out()
+ */
+ if (transport_check_aborted_status(se_cmd,
+ (cmd->unsolicited_data == 0)) != 0)
+ return 0;
+ /*
+ * Otherwise send CHECK_CONDITION and sense for
+ * exception
+ */
+ return transport_send_check_condition_and_sense(se_cmd,
+ se_cmd->scsi_sense_reason, 0);
+ }
+ /*
+ * Special case for delayed CmdSN with Immediate
+ * Data and/or Unsolicited Data Out attached.
+ */
+ if (cmd->immediate_data) {
+ if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
+ spin_unlock_bh(&cmd->istate_lock);
+ return transport_generic_handle_data(
+ &cmd->se_cmd);
+ }
+ spin_unlock_bh(&cmd->istate_lock);
+
+ if (!(cmd->cmd_flags &
+ ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
+ /*
+ * Send the delayed TASK_ABORTED status for
+ * WRITEs if no more unsolicitied data is
+ * expected.
+ */
+ if (transport_check_aborted_status(se_cmd, 1)
+ != 0)
+ return 0;
+
+ iscsit_set_dataout_sequence_values(cmd);
+ iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 0);
+ }
+ return 0;
+ }
+ /*
+ * The default handler.
+ */
+ spin_unlock_bh(&cmd->istate_lock);
+
+ if ((cmd->data_direction == DMA_TO_DEVICE) &&
+ !(cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
+ /*
+ * Send the delayed TASK_ABORTED status for WRITEs if
+ * no more nsolicitied data is expected.
+ */
+ if (transport_check_aborted_status(se_cmd, 1) != 0)
+ return 0;
+
+ iscsit_set_dataout_sequence_values(cmd);
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ iscsit_start_dataout_timer(cmd, cmd->conn);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ }
+ return transport_handle_cdb_direct(&cmd->se_cmd);
+
+ case ISCSI_OP_NOOP_OUT:
+ case ISCSI_OP_TEXT:
+ spin_unlock_bh(&cmd->istate_lock);
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+ break;
+ case ISCSI_OP_SCSI_TMFUNC:
+ if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
+ spin_unlock_bh(&cmd->istate_lock);
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
+ cmd->i_state);
+ return 0;
+ }
+ spin_unlock_bh(&cmd->istate_lock);
+
+ return transport_generic_handle_tmr(&cmd->se_cmd);
+ case ISCSI_OP_LOGOUT:
+ spin_unlock_bh(&cmd->istate_lock);
+ switch (cmd->logout_reason) {
+ case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
+ lr = iscsit_logout_closesession(cmd, cmd->conn);
+ break;
+ case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
+ lr = iscsit_logout_closeconnection(cmd, cmd->conn);
+ break;
+ case ISCSI_LOGOUT_REASON_RECOVERY:
+ lr = iscsit_logout_removeconnforrecovery(cmd, cmd->conn);
+ break;
+ default:
+ pr_err("Unknown iSCSI Logout Request Code:"
+ " 0x%02x\n", cmd->logout_reason);
+ return -1;
+ }
+
+ return lr;
+ default:
+ spin_unlock_bh(&cmd->istate_lock);
+ pr_err("Cannot perform out of order execution for"
+ " unknown iSCSI Opcode: 0x%02x\n", cmd->iscsi_opcode);
+ return -1;
+ }
+
+ return 0;
+}
+
+void iscsit_free_all_ooo_cmdsns(struct iscsi_session *sess)
+{
+ struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
+
+ mutex_lock(&sess->cmdsn_mutex);
+ list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
+ &sess->sess_ooo_cmdsn_list, ooo_list) {
+
+ list_del(&ooo_cmdsn->ooo_list);
+ kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
+ }
+ mutex_unlock(&sess->cmdsn_mutex);
+}
+
+int iscsit_handle_ooo_cmdsn(
+ struct iscsi_session *sess,
+ struct iscsi_cmd *cmd,
+ u32 cmdsn)
+{
+ int batch = 0;
+ struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL, *ooo_tail = NULL;
+
+ cmd->deferred_i_state = cmd->i_state;
+ cmd->i_state = ISTATE_DEFERRED_CMD;
+ cmd->cmd_flags |= ICF_OOO_CMDSN;
+
+ if (list_empty(&sess->sess_ooo_cmdsn_list))
+ batch = 1;
+ else {
+ ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev,
+ typeof(*ooo_tail), ooo_list);
+ if (ooo_tail->cmdsn != (cmdsn - 1))
+ batch = 1;
+ }
+
+ ooo_cmdsn = iscsit_allocate_ooo_cmdsn();
+ if (!ooo_cmdsn)
+ return CMDSN_ERROR_CANNOT_RECOVER;
+
+ ooo_cmdsn->cmd = cmd;
+ ooo_cmdsn->batch_count = (batch) ?
+ (cmdsn - sess->exp_cmd_sn) : 1;
+ ooo_cmdsn->cid = cmd->conn->cid;
+ ooo_cmdsn->exp_cmdsn = sess->exp_cmd_sn;
+ ooo_cmdsn->cmdsn = cmdsn;
+
+ if (iscsit_attach_ooo_cmdsn(sess, ooo_cmdsn) < 0) {
+ kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
+ return CMDSN_ERROR_CANNOT_RECOVER;
+ }
+
+ return CMDSN_HIGHER_THAN_EXP;
+}
+
+static int iscsit_set_dataout_timeout_values(
+ struct iscsi_cmd *cmd,
+ u32 *offset,
+ u32 *length)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_r2t *r2t;
+
+ if (cmd->unsolicited_data) {
+ *offset = 0;
+ *length = (conn->sess->sess_ops->FirstBurstLength >
+ cmd->data_length) ?
+ cmd->data_length :
+ conn->sess->sess_ops->FirstBurstLength;
+ return 0;
+ }
+
+ spin_lock_bh(&cmd->r2t_lock);
+ if (list_empty(&cmd->cmd_r2t_list)) {
+ pr_err("cmd->cmd_r2t_list is empty!\n");
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+
+ list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+ if (r2t->sent_r2t && !r2t->recovery_r2t && !r2t->seq_complete) {
+ *offset = r2t->offset;
+ *length = r2t->xfer_len;
+ spin_unlock_bh(&cmd->r2t_lock);
+ return 0;
+ }
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ pr_err("Unable to locate any incomplete DataOUT"
+ " sequences for ITT: 0x%08x.\n", cmd->init_task_tag);
+
+ return -1;
+}
+
+/*
+ * NOTE: Called from interrupt (timer) context.
+ */
+static void iscsit_handle_dataout_timeout(unsigned long data)
+{
+ u32 pdu_length = 0, pdu_offset = 0;
+ u32 r2t_length = 0, r2t_offset = 0;
+ struct iscsi_cmd *cmd = (struct iscsi_cmd *) data;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_session *sess = NULL;
+ struct iscsi_node_attrib *na;
+
+ iscsit_inc_conn_usage_count(conn);
+
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ if (cmd->dataout_timer_flags & ISCSI_TF_STOP) {
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ iscsit_dec_conn_usage_count(conn);
+ return;
+ }
+ cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
+ sess = conn->sess;
+ na = iscsit_tpg_get_node_attrib(sess);
+
+ if (!sess->sess_ops->ErrorRecoveryLevel) {
+ pr_debug("Unable to recover from DataOut timeout while"
+ " in ERL=0.\n");
+ goto failure;
+ }
+
+ if (++cmd->dataout_timeout_retries == na->dataout_timeout_retries) {
+ pr_debug("Command ITT: 0x%08x exceeded max retries"
+ " for DataOUT timeout %u, closing iSCSI connection.\n",
+ cmd->init_task_tag, na->dataout_timeout_retries);
+ goto failure;
+ }
+
+ cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY;
+
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if (conn->sess->sess_ops->DataPDUInOrder) {
+ pdu_offset = cmd->write_data_done;
+ if ((pdu_offset + (conn->sess->sess_ops->MaxBurstLength -
+ cmd->next_burst_len)) > cmd->data_length)
+ pdu_length = (cmd->data_length -
+ cmd->write_data_done);
+ else
+ pdu_length = (conn->sess->sess_ops->MaxBurstLength -
+ cmd->next_burst_len);
+ } else {
+ pdu_offset = cmd->seq_start_offset;
+ pdu_length = (cmd->seq_end_offset -
+ cmd->seq_start_offset);
+ }
+ } else {
+ if (iscsit_set_dataout_timeout_values(cmd, &pdu_offset,
+ &pdu_length) < 0)
+ goto failure;
+ }
+
+ if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length,
+ &r2t_offset, &r2t_length) < 0)
+ goto failure;
+
+ pr_debug("Command ITT: 0x%08x timed out waiting for"
+ " completion of %sDataOUT Sequence Offset: %u, Length: %u\n",
+ cmd->init_task_tag, (cmd->unsolicited_data) ? "Unsolicited " :
+ "", r2t_offset, r2t_length);
+
+ if (iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length) < 0)
+ goto failure;
+
+ iscsit_start_dataout_timer(cmd, conn);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ iscsit_dec_conn_usage_count(conn);
+
+ return;
+
+failure:
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ iscsit_cause_connection_reinstatement(conn, 0);
+ iscsit_dec_conn_usage_count(conn);
+}
+
+void iscsit_mod_dataout_timer(struct iscsi_cmd *cmd)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess);
+
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ return;
+ }
+
+ mod_timer(&cmd->dataout_timer,
+ (get_jiffies_64() + na->dataout_timeout * HZ));
+ pr_debug("Updated DataOUT timer for ITT: 0x%08x",
+ cmd->init_task_tag);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+}
+
+/*
+ * Called with cmd->dataout_timeout_lock held.
+ */
+void iscsit_start_dataout_timer(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess);
+
+ if (cmd->dataout_timer_flags & ISCSI_TF_RUNNING)
+ return;
+
+ pr_debug("Starting DataOUT timer for ITT: 0x%08x on"
+ " CID: %hu.\n", cmd->init_task_tag, conn->cid);
+
+ init_timer(&cmd->dataout_timer);
+ cmd->dataout_timer.expires = (get_jiffies_64() + na->dataout_timeout * HZ);
+ cmd->dataout_timer.data = (unsigned long)cmd;
+ cmd->dataout_timer.function = iscsit_handle_dataout_timeout;
+ cmd->dataout_timer_flags &= ~ISCSI_TF_STOP;
+ cmd->dataout_timer_flags |= ISCSI_TF_RUNNING;
+ add_timer(&cmd->dataout_timer);
+}
+
+void iscsit_stop_dataout_timer(struct iscsi_cmd *cmd)
+{
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ return;
+ }
+ cmd->dataout_timer_flags |= ISCSI_TF_STOP;
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+
+ del_timer_sync(&cmd->dataout_timer);
+
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
+ pr_debug("Stopped DataOUT Timer for ITT: 0x%08x\n",
+ cmd->init_task_tag);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+}
diff --git a/drivers/target/iscsi/iscsi_target_erl1.h b/drivers/target/iscsi/iscsi_target_erl1.h
new file mode 100644
index 0000000..85e67e2
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl1.h
@@ -0,0 +1,26 @@
+#ifndef ISCSI_TARGET_ERL1_H
+#define ISCSI_TARGET_ERL1_H
+
+extern int iscsit_dump_data_payload(struct iscsi_conn *, u32, int);
+extern int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
+ struct iscsi_cmd *, struct iscsi_datain_req *);
+extern int iscsit_create_recovery_datain_values_datasequenceinorder_no(
+ struct iscsi_cmd *, struct iscsi_datain_req *);
+extern int iscsit_handle_recovery_datain_or_r2t(struct iscsi_conn *, unsigned char *,
+ u32, u32, u32, u32);
+extern int iscsit_handle_status_snack(struct iscsi_conn *, u32, u32,
+ u32, u32);
+extern int iscsit_handle_data_ack(struct iscsi_conn *, u32, u32, u32);
+extern int iscsit_dataout_datapduinorder_no_fbit(struct iscsi_cmd *, struct iscsi_pdu *);
+extern int iscsit_recover_dataout_sequence(struct iscsi_cmd *, u32, u32);
+extern void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *);
+extern void iscsit_free_all_ooo_cmdsns(struct iscsi_session *);
+extern int iscsit_execute_ooo_cmdsns(struct iscsi_session *);
+extern int iscsit_execute_cmd(struct iscsi_cmd *, int);
+extern int iscsit_handle_ooo_cmdsn(struct iscsi_session *, struct iscsi_cmd *, u32);
+extern void iscsit_remove_ooo_cmdsn(struct iscsi_session *, struct iscsi_ooo_cmdsn *);
+extern void iscsit_mod_dataout_timer(struct iscsi_cmd *);
+extern void iscsit_start_dataout_timer(struct iscsi_cmd *, struct iscsi_conn *);
+extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
+
+#endif /* ISCSI_TARGET_ERL1_H */
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
new file mode 100644
index 0000000..91a4d17
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -0,0 +1,474 @@
+/*******************************************************************************
+ * This file contains error recovery level two functions used by
+ * the iSCSI Target driver.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target.h"
+
+/*
+ * FIXME: Does RData SNACK apply here as well?
+ */
+void iscsit_create_conn_recovery_datain_values(
+ struct iscsi_cmd *cmd,
+ u32 exp_data_sn)
+{
+ u32 data_sn = 0;
+ struct iscsi_conn *conn = cmd->conn;
+
+ cmd->next_burst_len = 0;
+ cmd->read_data_done = 0;
+
+ while (exp_data_sn > data_sn) {
+ if ((cmd->next_burst_len +
+ conn->conn_ops->MaxRecvDataSegmentLength) <
+ conn->sess->sess_ops->MaxBurstLength) {
+ cmd->read_data_done +=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ cmd->next_burst_len +=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ } else {
+ cmd->read_data_done +=
+ (conn->sess->sess_ops->MaxBurstLength -
+ cmd->next_burst_len);
+ cmd->next_burst_len = 0;
+ }
+ data_sn++;
+ }
+}
+
+void iscsit_create_conn_recovery_dataout_values(
+ struct iscsi_cmd *cmd)
+{
+ u32 write_data_done = 0;
+ struct iscsi_conn *conn = cmd->conn;
+
+ cmd->data_sn = 0;
+ cmd->next_burst_len = 0;
+
+ while (cmd->write_data_done > write_data_done) {
+ if ((write_data_done + conn->sess->sess_ops->MaxBurstLength) <=
+ cmd->write_data_done)
+ write_data_done += conn->sess->sess_ops->MaxBurstLength;
+ else
+ break;
+ }
+
+ cmd->write_data_done = write_data_done;
+}
+
+static int iscsit_attach_active_connection_recovery_entry(
+ struct iscsi_session *sess,
+ struct iscsi_conn_recovery *cr)
+{
+ spin_lock(&sess->cr_a_lock);
+ list_add_tail(&cr->cr_list, &sess->cr_active_list);
+ spin_unlock(&sess->cr_a_lock);
+
+ return 0;
+}
+
+static int iscsit_attach_inactive_connection_recovery_entry(
+ struct iscsi_session *sess,
+ struct iscsi_conn_recovery *cr)
+{
+ spin_lock(&sess->cr_i_lock);
+ list_add_tail(&cr->cr_list, &sess->cr_inactive_list);
+
+ sess->conn_recovery_count++;
+ pr_debug("Incremented connection recovery count to %u for"
+ " SID: %u\n", sess->conn_recovery_count, sess->sid);
+ spin_unlock(&sess->cr_i_lock);
+
+ return 0;
+}
+
+struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
+ struct iscsi_session *sess,
+ u16 cid)
+{
+ struct iscsi_conn_recovery *cr;
+
+ spin_lock(&sess->cr_i_lock);
+ list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
+ if (cr->cid == cid) {
+ spin_unlock(&sess->cr_i_lock);
+ return cr;
+ }
+ }
+ spin_unlock(&sess->cr_i_lock);
+
+ return NULL;
+}
+
+void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
+{
+ struct iscsi_cmd *cmd, *cmd_tmp;
+ struct iscsi_conn_recovery *cr, *cr_tmp;
+
+ spin_lock(&sess->cr_a_lock);
+ list_for_each_entry_safe(cr, cr_tmp, &sess->cr_active_list, cr_list) {
+ list_del(&cr->cr_list);
+ spin_unlock(&sess->cr_a_lock);
+
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp,
+ &cr->conn_recovery_cmd_list, i_list) {
+
+ list_del(&cmd->i_list);
+ cmd->conn = NULL;
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
+ !(cmd->se_cmd.transport_wait_for_tasks))
+ iscsit_release_cmd(cmd);
+ else
+ cmd->se_cmd.transport_wait_for_tasks(
+ &cmd->se_cmd, 1, 1);
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ }
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ spin_lock(&sess->cr_a_lock);
+
+ kfree(cr);
+ }
+ spin_unlock(&sess->cr_a_lock);
+
+ spin_lock(&sess->cr_i_lock);
+ list_for_each_entry_safe(cr, cr_tmp, &sess->cr_inactive_list, cr_list) {
+ list_del(&cr->cr_list);
+ spin_unlock(&sess->cr_i_lock);
+
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp,
+ &cr->conn_recovery_cmd_list, i_list) {
+
+ list_del(&cmd->i_list);
+ cmd->conn = NULL;
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
+ !(cmd->se_cmd.transport_wait_for_tasks))
+ iscsit_release_cmd(cmd);
+ else
+ cmd->se_cmd.transport_wait_for_tasks(
+ &cmd->se_cmd, 1, 1);
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ }
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ spin_lock(&sess->cr_i_lock);
+
+ kfree(cr);
+ }
+ spin_unlock(&sess->cr_i_lock);
+}
+
+int iscsit_remove_active_connection_recovery_entry(
+ struct iscsi_conn_recovery *cr,
+ struct iscsi_session *sess)
+{
+ spin_lock(&sess->cr_a_lock);
+ list_del(&cr->cr_list);
+
+ sess->conn_recovery_count--;
+ pr_debug("Decremented connection recovery count to %u for"
+ " SID: %u\n", sess->conn_recovery_count, sess->sid);
+ spin_unlock(&sess->cr_a_lock);
+
+ kfree(cr);
+
+ return 0;
+}
+
+int iscsit_remove_inactive_connection_recovery_entry(
+ struct iscsi_conn_recovery *cr,
+ struct iscsi_session *sess)
+{
+ spin_lock(&sess->cr_i_lock);
+ list_del(&cr->cr_list);
+ spin_unlock(&sess->cr_i_lock);
+
+ return 0;
+}
+
+/*
+ * Called with cr->conn_recovery_cmd_lock help.
+ */
+int iscsit_remove_cmd_from_connection_recovery(
+ struct iscsi_cmd *cmd,
+ struct iscsi_session *sess)
+{
+ struct iscsi_conn_recovery *cr;
+
+ if (!cmd->cr) {
+ pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
+ " is NULL!\n", cmd->init_task_tag);
+ BUG();
+ }
+ cr = cmd->cr;
+
+ list_del(&cmd->i_list);
+ return --cr->cmd_count;
+}
+
+void iscsit_discard_cr_cmds_by_expstatsn(
+ struct iscsi_conn_recovery *cr,
+ u32 exp_statsn)
+{
+ u32 dropped_count = 0;
+ struct iscsi_cmd *cmd, *cmd_tmp;
+ struct iscsi_session *sess = cr->sess;
+
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp,
+ &cr->conn_recovery_cmd_list, i_list) {
+
+ if (((cmd->deferred_i_state != ISTATE_SENT_STATUS) &&
+ (cmd->deferred_i_state != ISTATE_REMOVE)) ||
+ (cmd->stat_sn >= exp_statsn)) {
+ continue;
+ }
+
+ dropped_count++;
+ pr_debug("Dropping Acknowledged ITT: 0x%08x, StatSN:"
+ " 0x%08x, CID: %hu.\n", cmd->init_task_tag,
+ cmd->stat_sn, cr->cid);
+
+ iscsit_remove_cmd_from_connection_recovery(cmd, sess);
+
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
+ !(cmd->se_cmd.transport_wait_for_tasks))
+ iscsit_release_cmd(cmd);
+ else
+ cmd->se_cmd.transport_wait_for_tasks(
+ &cmd->se_cmd, 1, 0);
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ }
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+
+ pr_debug("Dropped %u total acknowledged commands on"
+ " CID: %hu less than old ExpStatSN: 0x%08x\n",
+ dropped_count, cr->cid, exp_statsn);
+
+ if (!cr->cmd_count) {
+ pr_debug("No commands to be reassigned for failed"
+ " connection CID: %hu on SID: %u\n",
+ cr->cid, sess->sid);
+ iscsit_remove_inactive_connection_recovery_entry(cr, sess);
+ iscsit_attach_active_connection_recovery_entry(sess, cr);
+ pr_debug("iSCSI connection recovery successful for CID:"
+ " %hu on SID: %u\n", cr->cid, sess->sid);
+ iscsit_remove_active_connection_recovery_entry(cr, sess);
+ } else {
+ iscsit_remove_inactive_connection_recovery_entry(cr, sess);
+ iscsit_attach_active_connection_recovery_entry(sess, cr);
+ }
+}
+
+int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
+{
+ u32 dropped_count = 0;
+ struct iscsi_cmd *cmd, *cmd_tmp;
+ struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
+ struct iscsi_session *sess = conn->sess;
+
+ mutex_lock(&sess->cmdsn_mutex);
+ list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
+ &sess->sess_ooo_cmdsn_list, ooo_list) {
+
+ if (ooo_cmdsn->cid != conn->cid)
+ continue;
+
+ dropped_count++;
+ pr_debug("Dropping unacknowledged CmdSN:"
+ " 0x%08x during connection recovery on CID: %hu\n",
+ ooo_cmdsn->cmdsn, conn->cid);
+ iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
+ }
+ mutex_unlock(&sess->cmdsn_mutex);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
+ if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
+ continue;
+
+ list_del(&cmd->i_list);
+
+ spin_unlock_bh(&conn->cmd_lock);
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
+ !(cmd->se_cmd.transport_wait_for_tasks))
+ iscsit_release_cmd(cmd);
+ else
+ cmd->se_cmd.transport_wait_for_tasks(
+ &cmd->se_cmd, 1, 1);
+ spin_lock_bh(&conn->cmd_lock);
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ pr_debug("Dropped %u total unacknowledged commands on CID:"
+ " %hu for ExpCmdSN: 0x%08x.\n", dropped_count, conn->cid,
+ sess->exp_cmd_sn);
+ return 0;
+}
+
+int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
+{
+ u32 cmd_count = 0;
+ struct iscsi_cmd *cmd, *cmd_tmp;
+ struct iscsi_conn_recovery *cr;
+
+ /*
+ * Allocate an struct iscsi_conn_recovery for this connection.
+ * Each struct iscsi_cmd contains an struct iscsi_conn_recovery pointer
+ * (struct iscsi_cmd->cr) so we need to allocate this before preparing the
+ * connection's command list for connection recovery.
+ */
+ cr = kzalloc(sizeof(struct iscsi_conn_recovery), GFP_KERNEL);
+ if (!cr) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_conn_recovery.\n");
+ return -1;
+ }
+ INIT_LIST_HEAD(&cr->cr_list);
+ INIT_LIST_HEAD(&cr->conn_recovery_cmd_list);
+ spin_lock_init(&cr->conn_recovery_cmd_lock);
+ /*
+ * Only perform connection recovery on ISCSI_OP_SCSI_CMD or
+ * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
+ * list_del(&cmd->i_list); to release the command to the
+ * session pool and remove it from the connection's list.
+ *
+ * Also stop the DataOUT timer, which will be restarted after
+ * sending the TMR response.
+ */
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
+
+ if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) &&
+ (cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) {
+ pr_debug("Not performing realligence on"
+ " Opcode: 0x%02x, ITT: 0x%08x, CmdSN: 0x%08x,"
+ " CID: %hu\n", cmd->iscsi_opcode,
+ cmd->init_task_tag, cmd->cmd_sn, conn->cid);
+
+ list_del(&cmd->i_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
+ !(cmd->se_cmd.transport_wait_for_tasks))
+ iscsit_release_cmd(cmd);
+ else
+ cmd->se_cmd.transport_wait_for_tasks(
+ &cmd->se_cmd, 1, 0);
+ spin_lock_bh(&conn->cmd_lock);
+ continue;
+ }
+
+ /*
+ * Special case where commands greater than or equal to
+ * the session's ExpCmdSN are attached to the connection
+ * list but not to the out of order CmdSN list. The one
+ * obvious case is when a command with immediate data
+ * attached must only check the CmdSN against ExpCmdSN
+ * after the data is received. The special case below
+ * is when the connection fails before data is received,
+ * but also may apply to other PDUs, so it has been
+ * made generic here.
+ */
+ if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
+ (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {
+ list_del(&cmd->i_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
+ !(cmd->se_cmd.transport_wait_for_tasks))
+ iscsit_release_cmd(cmd);
+ else
+ cmd->se_cmd.transport_wait_for_tasks(
+ &cmd->se_cmd, 1, 1);
+ spin_lock_bh(&conn->cmd_lock);
+ continue;
+ }
+
+ cmd_count++;
+ pr_debug("Preparing Opcode: 0x%02x, ITT: 0x%08x,"
+ " CmdSN: 0x%08x, StatSN: 0x%08x, CID: %hu for"
+ " realligence.\n", cmd->iscsi_opcode,
+ cmd->init_task_tag, cmd->cmd_sn, cmd->stat_sn,
+ conn->cid);
+
+ cmd->deferred_i_state = cmd->i_state;
+ cmd->i_state = ISTATE_IN_CONNECTION_RECOVERY;
+
+ if (cmd->data_direction == DMA_TO_DEVICE)
+ iscsit_stop_dataout_timer(cmd);
+
+ cmd->sess = conn->sess;
+
+ list_del(&cmd->i_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ iscsit_free_all_datain_reqs(cmd);
+
+ if ((cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) &&
+ cmd->se_cmd.transport_wait_for_tasks)
+ cmd->se_cmd.transport_wait_for_tasks(&cmd->se_cmd,
+ 0, 0);
+ /*
+ * Add the struct iscsi_cmd to the connection recovery cmd list
+ */
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ list_add_tail(&cmd->i_list, &cr->conn_recovery_cmd_list);
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+
+ spin_lock_bh(&conn->cmd_lock);
+ cmd->cr = cr;
+ cmd->conn = NULL;
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+ /*
+ * Fill in the various values in the preallocated struct iscsi_conn_recovery.
+ */
+ cr->cid = conn->cid;
+ cr->cmd_count = cmd_count;
+ cr->maxrecvdatasegmentlength = conn->conn_ops->MaxRecvDataSegmentLength;
+ cr->sess = conn->sess;
+
+ iscsit_attach_inactive_connection_recovery_entry(conn->sess, cr);
+
+ return 0;
+}
+
+int iscsit_connection_recovery_transport_reset(struct iscsi_conn *conn)
+{
+ atomic_set(&conn->connection_recovery, 1);
+
+ if (iscsit_close_connection(conn) < 0)
+ return -1;
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h
new file mode 100644
index 0000000..22f8d24
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl2.h
@@ -0,0 +1,18 @@
+#ifndef ISCSI_TARGET_ERL2_H
+#define ISCSI_TARGET_ERL2_H
+
+extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, u32);
+extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
+extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
+ struct iscsi_session *, u16);
+extern void iscsit_free_connection_recovery_entires(struct iscsi_session *);
+extern int iscsit_remove_active_connection_recovery_entry(
+ struct iscsi_conn_recovery *, struct iscsi_session *);
+extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *,
+ struct iscsi_session *);
+extern void iscsit_discard_cr_cmds_by_expstatsn(struct iscsi_conn_recovery *, u32);
+extern int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *);
+extern int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *);
+extern int iscsit_connection_recovery_transport_reset(struct iscsi_conn *);
+
+#endif /*** ISCSI_TARGET_ERL2_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
new file mode 100644
index 0000000..daad362
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -0,0 +1,1222 @@
+/*******************************************************************************
+ * This file contains the login functions used by the iSCSI Target driver.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/kthread.h>
+#include <linux/crypto.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_tq.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_nego.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_login.h"
+#include "iscsi_target_stat.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_parameters.h"
+
+extern struct idr sess_idr;
+extern struct mutex auth_id_lock;
+extern spinlock_t sess_idr_lock;
+
+static int iscsi_login_init_conn(struct iscsi_conn *conn)
+{
+ INIT_LIST_HEAD(&conn->conn_list);
+ INIT_LIST_HEAD(&conn->conn_cmd_list);
+ INIT_LIST_HEAD(&conn->immed_queue_list);
+ INIT_LIST_HEAD(&conn->response_queue_list);
+ init_completion(&conn->conn_post_wait_comp);
+ init_completion(&conn->conn_wait_comp);
+ init_completion(&conn->conn_wait_rcfr_comp);
+ init_completion(&conn->conn_waiting_on_uc_comp);
+ init_completion(&conn->conn_logout_comp);
+ init_completion(&conn->rx_half_close_comp);
+ init_completion(&conn->tx_half_close_comp);
+ spin_lock_init(&conn->cmd_lock);
+ spin_lock_init(&conn->conn_usage_lock);
+ spin_lock_init(&conn->immed_queue_lock);
+ spin_lock_init(&conn->nopin_timer_lock);
+ spin_lock_init(&conn->response_queue_lock);
+ spin_lock_init(&conn->state_lock);
+
+ if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
+ pr_err("Unable to allocate conn->conn_cpumask\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * Used by iscsi_target_nego.c:iscsi_target_locate_portal() to setup
+ * per struct iscsi_conn libcrypto contexts for crc32c and crc32-intel
+ */
+int iscsi_login_setup_crypto(struct iscsi_conn *conn)
+{
+ /*
+ * Setup slicing by CRC32C algorithm for RX and TX libcrypto contexts
+ * which will default to crc32c_intel.ko for cpu_has_xmm4_2, or fallback
+ * to software 1x8 byte slicing from crc32c.ko
+ */
+ conn->conn_rx_hash.flags = 0;
+ conn->conn_rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(conn->conn_rx_hash.tfm)) {
+ pr_err("crypto_alloc_hash() failed for conn_rx_tfm\n");
+ return -ENOMEM;
+ }
+
+ conn->conn_tx_hash.flags = 0;
+ conn->conn_tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(conn->conn_tx_hash.tfm)) {
+ pr_err("crypto_alloc_hash() failed for conn_tx_tfm\n");
+ crypto_free_hash(conn->conn_rx_hash.tfm);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int iscsi_login_check_initiator_version(
+ struct iscsi_conn *conn,
+ u8 version_max,
+ u8 version_min)
+{
+ if ((version_max != 0x00) || (version_min != 0x00)) {
+ pr_err("Unsupported iSCSI IETF Pre-RFC Revision,"
+ " version Min/Max 0x%02x/0x%02x, rejecting login.\n",
+ version_min, version_max);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_NO_VERSION);
+ return -1;
+ }
+
+ return 0;
+}
+
+int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
+{
+ int sessiontype;
+ struct iscsi_param *initiatorname_param = NULL, *sessiontype_param = NULL;
+ struct iscsi_portal_group *tpg = conn->tpg;
+ struct iscsi_session *sess = NULL, *sess_p = NULL;
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+ struct se_session *se_sess, *se_sess_tmp;
+
+ initiatorname_param = iscsi_find_param_from_key(
+ INITIATORNAME, conn->param_list);
+ if (!initiatorname_param)
+ return -1;
+
+ sessiontype_param = iscsi_find_param_from_key(
+ SESSIONTYPE, conn->param_list);
+ if (!sessiontype_param)
+ return -1;
+
+ sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0;
+
+ spin_lock_bh(&se_tpg->session_lock);
+ list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
+ sess_list) {
+
+ sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ spin_lock(&sess_p->conn_lock);
+ if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
+ atomic_read(&sess_p->session_logout) ||
+ (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+ spin_unlock(&sess_p->conn_lock);
+ continue;
+ }
+ if (!memcmp((void *)sess_p->isid, (void *)conn->sess->isid, 6) &&
+ (!strcmp((void *)sess_p->sess_ops->InitiatorName,
+ (void *)initiatorname_param->value) &&
+ (sess_p->sess_ops->SessionType == sessiontype))) {
+ atomic_set(&sess_p->session_reinstatement, 1);
+ spin_unlock(&sess_p->conn_lock);
+ iscsit_inc_session_usage_count(sess_p);
+ iscsit_stop_time2retain_timer(sess_p);
+ sess = sess_p;
+ break;
+ }
+ spin_unlock(&sess_p->conn_lock);
+ }
+ spin_unlock_bh(&se_tpg->session_lock);
+ /*
+ * If the Time2Retain handler has expired, the session is already gone.
+ */
+ if (!sess)
+ return 0;
+
+ pr_debug("%s iSCSI Session SID %u is still active for %s,"
+ " preforming session reinstatement.\n", (sessiontype) ?
+ "Discovery" : "Normal", sess->sid,
+ sess->sess_ops->InitiatorName);
+
+ spin_lock_bh(&sess->conn_lock);
+ if (sess->session_state == TARG_SESS_STATE_FAILED) {
+ spin_unlock_bh(&sess->conn_lock);
+ iscsit_dec_session_usage_count(sess);
+ return iscsit_close_session(sess);
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ iscsit_stop_session(sess, 1, 1);
+ iscsit_dec_session_usage_count(sess);
+
+ return iscsit_close_session(sess);
+}
+
+static void iscsi_login_set_conn_values(
+ struct iscsi_session *sess,
+ struct iscsi_conn *conn,
+ u16 cid)
+{
+ conn->sess = sess;
+ conn->cid = cid;
+ /*
+ * Generate a random Status sequence number (statsn) for the new
+ * iSCSI connection.
+ */
+ get_random_bytes(&conn->stat_sn, sizeof(u32));
+
+ mutex_lock(&auth_id_lock);
+ conn->auth_id = iscsit_global->auth_id++;
+ mutex_unlock(&auth_id_lock);
+}
+
+/*
+ * This is the leading connection of a new session,
+ * or session reinstatement.
+ */
+static int iscsi_login_zero_tsih_s1(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ struct iscsi_session *sess = NULL;
+ struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+
+ sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
+ if (!sess) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ pr_err("Could not allocate memory for session\n");
+ return -1;
+ }
+
+ iscsi_login_set_conn_values(sess, conn, pdu->cid);
+ sess->init_task_tag = pdu->itt;
+ memcpy((void *)&sess->isid, (void *)pdu->isid, 6);
+ sess->exp_cmd_sn = pdu->cmdsn;
+ INIT_LIST_HEAD(&sess->sess_conn_list);
+ INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list);
+ INIT_LIST_HEAD(&sess->cr_active_list);
+ INIT_LIST_HEAD(&sess->cr_inactive_list);
+ init_completion(&sess->async_msg_comp);
+ init_completion(&sess->reinstatement_comp);
+ init_completion(&sess->session_wait_comp);
+ init_completion(&sess->session_waiting_on_uc_comp);
+ mutex_init(&sess->cmdsn_mutex);
+ spin_lock_init(&sess->conn_lock);
+ spin_lock_init(&sess->cr_a_lock);
+ spin_lock_init(&sess->cr_i_lock);
+ spin_lock_init(&sess->session_usage_lock);
+ spin_lock_init(&sess->ttt_lock);
+
+ if (!idr_pre_get(&sess_idr, GFP_KERNEL)) {
+ pr_err("idr_pre_get() for sess_idr failed\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ spin_lock(&sess_idr_lock);
+ idr_get_new(&sess_idr, NULL, &sess->session_index);
+ spin_unlock(&sess_idr_lock);
+
+ sess->creation_time = get_jiffies_64();
+ spin_lock_init(&sess->session_stats_lock);
+ /*
+ * The FFP CmdSN window values will be allocated from the TPG's
+ * Initiator Node's ACL once the login has been successfully completed.
+ */
+ sess->max_cmd_sn = pdu->cmdsn;
+
+ sess->sess_ops = kzalloc(sizeof(struct iscsi_sess_ops), GFP_KERNEL);
+ if (!sess->sess_ops) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_sess_ops.\n");
+ return -1;
+ }
+
+ sess->se_sess = transport_init_session();
+ if (!sess->se_sess) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int iscsi_login_zero_tsih_s2(
+ struct iscsi_conn *conn)
+{
+ struct iscsi_node_attrib *na;
+ struct iscsi_session *sess = conn->sess;
+ unsigned char buf[32];
+
+ sess->tpg = conn->tpg;
+
+ /*
+ * Assign a new TPG Session Handle. Note this is protected with
+ * struct iscsi_portal_group->np_login_sem from iscsit_access_np().
+ */
+ sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
+ if (!sess->tsih)
+ sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
+
+ /*
+ * Create the default params from user defined values..
+ */
+ if (iscsi_copy_param_list(&conn->param_list,
+ ISCSI_TPG_C(conn)->param_list, 1) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ iscsi_set_keys_to_negotiate(0, conn->param_list);
+
+ if (sess->sess_ops->SessionType)
+ return iscsi_set_keys_irrelevant_for_discovery(
+ conn->param_list);
+
+ na = iscsit_tpg_get_node_attrib(sess);
+
+ /*
+ * Need to send TargetPortalGroupTag back in first login response
+ * on any iSCSI connection where the Initiator provides TargetName.
+ * See 5.3.1. Login Phase Start
+ *
+ * In our case, we have already located the struct iscsi_tiqn at this point.
+ */
+ memset(buf, 0, 32);
+ sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
+ if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ /*
+ * Workaround for Initiators that have broken connection recovery logic.
+ *
+ * "We would really like to get rid of this." Linux-iSCSI.org team
+ */
+ memset(buf, 0, 32);
+ sprintf(buf, "ErrorRecoveryLevel=%d", na->default_erl);
+ if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Remove PSTATE_NEGOTIATE for the four FIM related keys.
+ * The Initiator node will be able to enable FIM by proposing them itself.
+ */
+int iscsi_login_disable_FIM_keys(
+ struct iscsi_param_list *param_list,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_param *param;
+
+ param = iscsi_find_param_from_key("OFMarker", param_list);
+ if (!param) {
+ pr_err("iscsi_find_param_from_key() for"
+ " OFMarker failed\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ param->state &= ~PSTATE_NEGOTIATE;
+
+ param = iscsi_find_param_from_key("OFMarkInt", param_list);
+ if (!param) {
+ pr_err("iscsi_find_param_from_key() for"
+ " IFMarker failed\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ param->state &= ~PSTATE_NEGOTIATE;
+
+ param = iscsi_find_param_from_key("IFMarker", param_list);
+ if (!param) {
+ pr_err("iscsi_find_param_from_key() for"
+ " IFMarker failed\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ param->state &= ~PSTATE_NEGOTIATE;
+
+ param = iscsi_find_param_from_key("IFMarkInt", param_list);
+ if (!param) {
+ pr_err("iscsi_find_param_from_key() for"
+ " IFMarker failed\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ param->state &= ~PSTATE_NEGOTIATE;
+
+ return 0;
+}
+
+static int iscsi_login_non_zero_tsih_s1(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+
+ iscsi_login_set_conn_values(NULL, conn, pdu->cid);
+ return 0;
+}
+
+/*
+ * Add a new connection to an existing session.
+ */
+static int iscsi_login_non_zero_tsih_s2(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ struct iscsi_portal_group *tpg = conn->tpg;
+ struct iscsi_session *sess = NULL, *sess_p = NULL;
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+ struct se_session *se_sess, *se_sess_tmp;
+ struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+
+ spin_lock_bh(&se_tpg->session_lock);
+ list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
+ sess_list) {
+
+ sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
+ atomic_read(&sess_p->session_logout) ||
+ (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
+ continue;
+ if (!memcmp((const void *)sess_p->isid,
+ (const void *)pdu->isid, 6) &&
+ (sess_p->tsih == pdu->tsih)) {
+ iscsit_inc_session_usage_count(sess_p);
+ iscsit_stop_time2retain_timer(sess_p);
+ sess = sess_p;
+ break;
+ }
+ }
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ /*
+ * If the Time2Retain handler has expired, the session is already gone.
+ */
+ if (!sess) {
+ pr_err("Initiator attempting to add a connection to"
+ " a non-existent session, rejecting iSCSI Login.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_NO_SESSION);
+ return -1;
+ }
+
+ /*
+ * Stop the Time2Retain timer if this is a failed session, we restart
+ * the timer if the login is not successful.
+ */
+ spin_lock_bh(&sess->conn_lock);
+ if (sess->session_state == TARG_SESS_STATE_FAILED)
+ atomic_set(&sess->session_continuation, 1);
+ spin_unlock_bh(&sess->conn_lock);
+
+ iscsi_login_set_conn_values(sess, conn, pdu->cid);
+
+ if (iscsi_copy_param_list(&conn->param_list,
+ ISCSI_TPG_C(conn)->param_list, 0) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ iscsi_set_keys_to_negotiate(0, conn->param_list);
+ /*
+ * Need to send TargetPortalGroupTag back in first login response
+ * on any iSCSI connection where the Initiator provides TargetName.
+ * See 5.3.1. Login Phase Start
+ *
+ * In our case, we have already located the struct iscsi_tiqn at this point.
+ */
+ memset(buf, 0, 32);
+ sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
+ if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ return iscsi_login_disable_FIM_keys(conn->param_list, conn);
+}
+
+int iscsi_login_post_auth_non_zero_tsih(
+ struct iscsi_conn *conn,
+ u16 cid,
+ u32 exp_statsn)
+{
+ struct iscsi_conn *conn_ptr = NULL;
+ struct iscsi_conn_recovery *cr = NULL;
+ struct iscsi_session *sess = conn->sess;
+
+ /*
+ * By following item 5 in the login table, if we have found
+ * an existing ISID and a valid/existing TSIH and an existing
+ * CID we do connection reinstatement. Currently we dont not
+ * support it so we send back an non-zero status class to the
+ * initiator and release the new connection.
+ */
+ conn_ptr = iscsit_get_conn_from_cid_rcfr(sess, cid);
+ if ((conn_ptr)) {
+ pr_err("Connection exists with CID %hu for %s,"
+ " performing connection reinstatement.\n",
+ conn_ptr->cid, sess->sess_ops->InitiatorName);
+
+ iscsit_connection_reinstatement_rcfr(conn_ptr);
+ iscsit_dec_conn_usage_count(conn_ptr);
+ }
+
+ /*
+ * Check for any connection recovery entires containing CID.
+ * We use the original ExpStatSN sent in the first login request
+ * to acknowledge commands for the failed connection.
+ *
+ * Also note that an explict logout may have already been sent,
+ * but the response may not be sent due to additional connection
+ * loss.
+ */
+ if (sess->sess_ops->ErrorRecoveryLevel == 2) {
+ cr = iscsit_get_inactive_connection_recovery_entry(
+ sess, cid);
+ if ((cr)) {
+ pr_debug("Performing implicit logout"
+ " for connection recovery on CID: %hu\n",
+ conn->cid);
+ iscsit_discard_cr_cmds_by_expstatsn(cr, exp_statsn);
+ }
+ }
+
+ /*
+ * Else we follow item 4 from the login table in that we have
+ * found an existing ISID and a valid/existing TSIH and a new
+ * CID we go ahead and continue to add a new connection to the
+ * session.
+ */
+ pr_debug("Adding CID %hu to existing session for %s.\n",
+ cid, sess->sess_ops->InitiatorName);
+
+ if ((atomic_read(&sess->nconn) + 1) > sess->sess_ops->MaxConnections) {
+ pr_err("Adding additional connection to this session"
+ " would exceed MaxConnections %d, login failed.\n",
+ sess->sess_ops->MaxConnections);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_ISID_ERROR);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+
+ if (!sess->sess_ops->SessionType)
+ iscsit_start_nopin_timer(conn);
+}
+
+static int iscsi_post_login_handler(
+ struct iscsi_np *np,
+ struct iscsi_conn *conn,
+ u8 zero_tsih)
+{
+ int stop_timer = 0;
+ struct iscsi_session *sess = conn->sess;
+ struct se_session *se_sess = sess->se_sess;
+ struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+ struct iscsi_thread_set *ts;
+
+ iscsit_inc_conn_usage_count(conn);
+
+ iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_SUCCESS,
+ ISCSI_LOGIN_STATUS_ACCEPT);
+
+ pr_debug("Moving to TARG_CONN_STATE_LOGGED_IN.\n");
+ conn->conn_state = TARG_CONN_STATE_LOGGED_IN;
+
+ iscsi_set_connection_parameters(conn->conn_ops, conn->param_list);
+ iscsit_set_sync_and_steering_values(conn);
+ /*
+ * SCSI Initiator -> SCSI Target Port Mapping
+ */
+ ts = iscsi_get_thread_set();
+ if (!zero_tsih) {
+ iscsi_set_session_parameters(sess->sess_ops,
+ conn->param_list, 0);
+ iscsi_release_param_list(conn->param_list);
+ conn->param_list = NULL;
+
+ spin_lock_bh(&sess->conn_lock);
+ atomic_set(&sess->session_continuation, 0);
+ if (sess->session_state == TARG_SESS_STATE_FAILED) {
+ pr_debug("Moving to"
+ " TARG_SESS_STATE_LOGGED_IN.\n");
+ sess->session_state = TARG_SESS_STATE_LOGGED_IN;
+ stop_timer = 1;
+ }
+
+ pr_debug("iSCSI Login successful on CID: %hu from %s to"
+ " %s:%hu,%hu\n", conn->cid, conn->login_ip, np->np_ip,
+ np->np_port, tpg->tpgt);
+
+ list_add_tail(&conn->conn_list, &sess->sess_conn_list);
+ atomic_inc(&sess->nconn);
+ pr_debug("Incremented iSCSI Connection count to %hu"
+ " from node: %s\n", atomic_read(&sess->nconn),
+ sess->sess_ops->InitiatorName);
+ spin_unlock_bh(&sess->conn_lock);
+
+ iscsi_post_login_start_timers(conn);
+ iscsi_activate_thread_set(conn, ts);
+ /*
+ * Determine CPU mask to ensure connection's RX and TX kthreads
+ * are scheduled on the same CPU.
+ */
+ iscsit_thread_get_cpumask(conn);
+ conn->conn_rx_reset_cpumask = 1;
+ conn->conn_tx_reset_cpumask = 1;
+
+ iscsit_dec_conn_usage_count(conn);
+ if (stop_timer) {
+ spin_lock_bh(&se_tpg->session_lock);
+ iscsit_stop_time2retain_timer(sess);
+ spin_unlock_bh(&se_tpg->session_lock);
+ }
+ iscsit_dec_session_usage_count(sess);
+ return 0;
+ }
+
+ iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
+ iscsi_release_param_list(conn->param_list);
+ conn->param_list = NULL;
+
+ iscsit_determine_maxcmdsn(sess);
+
+ spin_lock_bh(&se_tpg->session_lock);
+ __transport_register_session(&sess->tpg->tpg_se_tpg,
+ se_sess->se_node_acl, se_sess, (void *)sess);
+ pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n");
+ sess->session_state = TARG_SESS_STATE_LOGGED_IN;
+
+ pr_debug("iSCSI Login successful on CID: %hu from %s to %s:%hu,%hu\n",
+ conn->cid, conn->login_ip, np->np_ip, np->np_port, tpg->tpgt);
+
+ spin_lock_bh(&sess->conn_lock);
+ list_add_tail(&conn->conn_list, &sess->sess_conn_list);
+ atomic_inc(&sess->nconn);
+ pr_debug("Incremented iSCSI Connection count to %hu from node:"
+ " %s\n", atomic_read(&sess->nconn),
+ sess->sess_ops->InitiatorName);
+ spin_unlock_bh(&sess->conn_lock);
+
+ sess->sid = tpg->sid++;
+ if (!sess->sid)
+ sess->sid = tpg->sid++;
+ pr_debug("Established iSCSI session from node: %s\n",
+ sess->sess_ops->InitiatorName);
+
+ tpg->nsessions++;
+ if (tpg->tpg_tiqn)
+ tpg->tpg_tiqn->tiqn_nsessions++;
+
+ pr_debug("Incremented number of active iSCSI sessions to %u on"
+ " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ iscsi_post_login_start_timers(conn);
+ iscsi_activate_thread_set(conn, ts);
+ /*
+ * Determine CPU mask to ensure connection's RX and TX kthreads
+ * are scheduled on the same CPU.
+ */
+ iscsit_thread_get_cpumask(conn);
+ conn->conn_rx_reset_cpumask = 1;
+ conn->conn_tx_reset_cpumask = 1;
+
+ iscsit_dec_conn_usage_count(conn);
+
+ return 0;
+}
+
+static void iscsi_handle_login_thread_timeout(unsigned long data)
+{
+ struct iscsi_np *np = (struct iscsi_np *) data;
+
+ spin_lock_bh(&np->np_thread_lock);
+ pr_err("iSCSI Login timeout on Network Portal %s:%hu\n",
+ np->np_ip, np->np_port);
+
+ if (np->np_login_timer_flags & ISCSI_TF_STOP) {
+ spin_unlock_bh(&np->np_thread_lock);
+ return;
+ }
+
+ if (np->np_thread)
+ send_sig(SIGINT, np->np_thread, 1);
+
+ np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
+ spin_unlock_bh(&np->np_thread_lock);
+}
+
+static void iscsi_start_login_thread_timer(struct iscsi_np *np)
+{
+ /*
+ * This used the TA_LOGIN_TIMEOUT constant because at this
+ * point we do not have access to ISCSI_TPG_ATTRIB(tpg)->login_timeout
+ */
+ spin_lock_bh(&np->np_thread_lock);
+ init_timer(&np->np_login_timer);
+ np->np_login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ);
+ np->np_login_timer.data = (unsigned long)np;
+ np->np_login_timer.function = iscsi_handle_login_thread_timeout;
+ np->np_login_timer_flags &= ~ISCSI_TF_STOP;
+ np->np_login_timer_flags |= ISCSI_TF_RUNNING;
+ add_timer(&np->np_login_timer);
+
+ pr_debug("Added timeout timer to iSCSI login request for"
+ " %u seconds.\n", TA_LOGIN_TIMEOUT);
+ spin_unlock_bh(&np->np_thread_lock);
+}
+
+static void iscsi_stop_login_thread_timer(struct iscsi_np *np)
+{
+ spin_lock_bh(&np->np_thread_lock);
+ if (!(np->np_login_timer_flags & ISCSI_TF_RUNNING)) {
+ spin_unlock_bh(&np->np_thread_lock);
+ return;
+ }
+ np->np_login_timer_flags |= ISCSI_TF_STOP;
+ spin_unlock_bh(&np->np_thread_lock);
+
+ del_timer_sync(&np->np_login_timer);
+
+ spin_lock_bh(&np->np_thread_lock);
+ np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
+ spin_unlock_bh(&np->np_thread_lock);
+}
+
+int iscsi_target_setup_login_socket(
+ struct iscsi_np *np,
+ struct __kernel_sockaddr_storage *sockaddr)
+{
+ struct socket *sock;
+ int backlog = 5, ret, opt = 0, len;
+
+ switch (np->np_network_transport) {
+ case ISCSI_TCP:
+ np->np_ip_proto = IPPROTO_TCP;
+ np->np_sock_type = SOCK_STREAM;
+ break;
+ case ISCSI_SCTP_TCP:
+ np->np_ip_proto = IPPROTO_SCTP;
+ np->np_sock_type = SOCK_STREAM;
+ break;
+ case ISCSI_SCTP_UDP:
+ np->np_ip_proto = IPPROTO_SCTP;
+ np->np_sock_type = SOCK_SEQPACKET;
+ break;
+ case ISCSI_IWARP_TCP:
+ case ISCSI_IWARP_SCTP:
+ case ISCSI_INFINIBAND:
+ default:
+ pr_err("Unsupported network_transport: %d\n",
+ np->np_network_transport);
+ return -EINVAL;
+ }
+
+ ret = sock_create(sockaddr->ss_family, np->np_sock_type,
+ np->np_ip_proto, &sock);
+ if (ret < 0) {
+ pr_err("sock_create() failed.\n");
+ return ret;
+ }
+ np->np_socket = sock;
+ /*
+ * The SCTP stack needs struct socket->file.
+ */
+ if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
+ (np->np_network_transport == ISCSI_SCTP_UDP)) {
+ if (!sock->file) {
+ sock->file = kzalloc(sizeof(struct file), GFP_KERNEL);
+ if (!sock->file) {
+ pr_err("Unable to allocate struct"
+ " file for SCTP\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ np->np_flags |= NPF_SCTP_STRUCT_FILE;
+ }
+ }
+ /*
+ * Setup the np->np_sockaddr from the passed sockaddr setup
+ * in iscsi_target_configfs.c code..
+ */
+ memcpy((void *)&np->np_sockaddr, (void *)sockaddr,
+ sizeof(struct __kernel_sockaddr_storage));
+
+ if (sockaddr->ss_family == AF_INET6)
+ len = sizeof(struct sockaddr_in6);
+ else
+ len = sizeof(struct sockaddr_in);
+ /*
+ * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
+ */
+ opt = 1;
+ if (np->np_network_transport == ISCSI_TCP) {
+ ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
+ (char *)&opt, sizeof(opt));
+ if (ret < 0) {
+ pr_err("kernel_setsockopt() for TCP_NODELAY"
+ " failed: %d\n", ret);
+ goto fail;
+ }
+ }
+
+ ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
+ (char *)&opt, sizeof(opt));
+ if (ret < 0) {
+ pr_err("kernel_setsockopt() for SO_REUSEADDR"
+ " failed\n");
+ goto fail;
+ }
+
+ ret = kernel_bind(sock, (struct sockaddr *)&np->np_sockaddr, len);
+ if (ret < 0) {
+ pr_err("kernel_bind() failed: %d\n", ret);
+ goto fail;
+ }
+
+ ret = kernel_listen(sock, backlog);
+ if (ret != 0) {
+ pr_err("kernel_listen() failed: %d\n", ret);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ np->np_socket = NULL;
+ if (sock) {
+ if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
+ kfree(sock->file);
+ sock->file = NULL;
+ }
+
+ sock_release(sock);
+ }
+ return ret;
+}
+
+static int __iscsi_target_login_thread(struct iscsi_np *np)
+{
+ u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0;
+ int err, ret = 0, ip_proto, sock_type, set_sctp_conn_flag, stop;
+ struct iscsi_conn *conn = NULL;
+ struct iscsi_login *login;
+ struct iscsi_portal_group *tpg = NULL;
+ struct socket *new_sock, *sock;
+ struct kvec iov;
+ struct iscsi_login_req *pdu;
+ struct sockaddr_in sock_in;
+ struct sockaddr_in6 sock_in6;
+
+ flush_signals(current);
+ set_sctp_conn_flag = 0;
+ sock = np->np_socket;
+ ip_proto = np->np_ip_proto;
+ sock_type = np->np_sock_type;
+
+ spin_lock_bh(&np->np_thread_lock);
+ if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+ complete(&np->np_restart_comp);
+ } else {
+ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+
+ if (kernel_accept(sock, &new_sock, 0) < 0) {
+ spin_lock_bh(&np->np_thread_lock);
+ if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ spin_unlock_bh(&np->np_thread_lock);
+ complete(&np->np_restart_comp);
+ /* Get another socket */
+ return 1;
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+ goto out;
+ }
+ /*
+ * The SCTP stack needs struct socket->file.
+ */
+ if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
+ (np->np_network_transport == ISCSI_SCTP_UDP)) {
+ if (!new_sock->file) {
+ new_sock->file = kzalloc(
+ sizeof(struct file), GFP_KERNEL);
+ if (!new_sock->file) {
+ pr_err("Unable to allocate struct"
+ " file for SCTP\n");
+ sock_release(new_sock);
+ /* Get another socket */
+ return 1;
+ }
+ set_sctp_conn_flag = 1;
+ }
+ }
+
+ iscsi_start_login_thread_timer(np);
+
+ conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+ if (!conn) {
+ pr_err("Could not allocate memory for"
+ " new connection\n");
+ if (set_sctp_conn_flag) {
+ kfree(new_sock->file);
+ new_sock->file = NULL;
+ }
+ sock_release(new_sock);
+ /* Get another socket */
+ return 1;
+ }
+
+ pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
+ conn->conn_state = TARG_CONN_STATE_FREE;
+ conn->sock = new_sock;
+
+ if (set_sctp_conn_flag)
+ conn->conn_flags |= CONNFLAG_SCTP_STRUCT_FILE;
+
+ pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
+ conn->conn_state = TARG_CONN_STATE_XPT_UP;
+
+ /*
+ * Allocate conn->conn_ops early as a failure calling
+ * iscsit_tx_login_rsp() below will call tx_data().
+ */
+ conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
+ if (!conn->conn_ops) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_conn_ops.\n");
+ goto new_sess_out;
+ }
+ /*
+ * Perform the remaining iSCSI connection initialization items..
+ */
+ if (iscsi_login_init_conn(conn) < 0)
+ goto new_sess_out;
+
+ memset(buffer, 0, ISCSI_HDR_LEN);
+ memset(&iov, 0, sizeof(struct kvec));
+ iov.iov_base = buffer;
+ iov.iov_len = ISCSI_HDR_LEN;
+
+ if (rx_data(conn, &iov, 1, ISCSI_HDR_LEN) <= 0) {
+ pr_err("rx_data() returned an error.\n");
+ goto new_sess_out;
+ }
+
+ iscsi_opcode = (buffer[0] & ISCSI_OPCODE_MASK);
+ if (!(iscsi_opcode & ISCSI_OP_LOGIN)) {
+ pr_err("First opcode is not login request,"
+ " failing login request.\n");
+ goto new_sess_out;
+ }
+
+ pdu = (struct iscsi_login_req *) buffer;
+ pdu->cid = be16_to_cpu(pdu->cid);
+ pdu->tsih = be16_to_cpu(pdu->tsih);
+ pdu->itt = be32_to_cpu(pdu->itt);
+ pdu->cmdsn = be32_to_cpu(pdu->cmdsn);
+ pdu->exp_statsn = be32_to_cpu(pdu->exp_statsn);
+ /*
+ * Used by iscsit_tx_login_rsp() for Login Resonses PDUs
+ * when Status-Class != 0.
+ */
+ conn->login_itt = pdu->itt;
+
+ spin_lock_bh(&np->np_thread_lock);
+ if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
+ spin_unlock_bh(&np->np_thread_lock);
+ pr_err("iSCSI Network Portal on %s:%hu currently not"
+ " active.\n", np->np_ip, np->np_port);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+ goto new_sess_out;
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+
+ if (np->np_sockaddr.ss_family == AF_INET6) {
+ memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
+
+ if (conn->sock->ops->getname(conn->sock,
+ (struct sockaddr *)&sock_in6, &err, 1) < 0) {
+ pr_err("sock_ops->getname() failed.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_TARGET_ERROR);
+ goto new_sess_out;
+ }
+ snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
+ &sock_in6.sin6_addr.in6_u);
+ conn->login_port = ntohs(sock_in6.sin6_port);
+ } else {
+ memset(&sock_in, 0, sizeof(struct sockaddr_in));
+
+ if (conn->sock->ops->getname(conn->sock,
+ (struct sockaddr *)&sock_in, &err, 1) < 0) {
+ pr_err("sock_ops->getname() failed.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_TARGET_ERROR);
+ goto new_sess_out;
+ }
+ sprintf(conn->login_ip, "%pI4", &sock_in.sin_addr.s_addr);
+ conn->login_port = ntohs(sock_in.sin_port);
+ }
+
+ conn->network_transport = np->np_network_transport;
+
+ pr_debug("Received iSCSI login request from %s on %s Network"
+ " Portal %s:%hu\n", conn->login_ip,
+ (conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP",
+ np->np_ip, np->np_port);
+
+ pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n");
+ conn->conn_state = TARG_CONN_STATE_IN_LOGIN;
+
+ if (iscsi_login_check_initiator_version(conn, pdu->max_version,
+ pdu->min_version) < 0)
+ goto new_sess_out;
+
+ zero_tsih = (pdu->tsih == 0x0000);
+ if ((zero_tsih)) {
+ /*
+ * This is the leading connection of a new session.
+ * We wait until after authentication to check for
+ * session reinstatement.
+ */
+ if (iscsi_login_zero_tsih_s1(conn, buffer) < 0)
+ goto new_sess_out;
+ } else {
+ /*
+ * Add a new connection to an existing session.
+ * We check for a non-existant session in
+ * iscsi_login_non_zero_tsih_s2() below based
+ * on ISID/TSIH, but wait until after authentication
+ * to check for connection reinstatement, etc.
+ */
+ if (iscsi_login_non_zero_tsih_s1(conn, buffer) < 0)
+ goto new_sess_out;
+ }
+
+ /*
+ * This will process the first login request, and call
+ * iscsi_target_locate_portal(), and return a valid struct iscsi_login.
+ */
+ login = iscsi_target_init_negotiation(np, conn, buffer);
+ if (!login) {
+ tpg = conn->tpg;
+ goto new_sess_out;
+ }
+
+ tpg = conn->tpg;
+ if (!tpg) {
+ pr_err("Unable to locate struct iscsi_conn->tpg\n");
+ goto new_sess_out;
+ }
+
+ if (zero_tsih) {
+ if (iscsi_login_zero_tsih_s2(conn) < 0) {
+ iscsi_target_nego_release(login, conn);
+ goto new_sess_out;
+ }
+ } else {
+ if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0) {
+ iscsi_target_nego_release(login, conn);
+ goto old_sess_out;
+ }
+ }
+
+ if (iscsi_target_start_negotiation(login, conn) < 0)
+ goto new_sess_out;
+
+ if (!conn->sess) {
+ pr_err("struct iscsi_conn session pointer is NULL!\n");
+ goto new_sess_out;
+ }
+
+ iscsi_stop_login_thread_timer(np);
+
+ if (signal_pending(current))
+ goto new_sess_out;
+
+ ret = iscsi_post_login_handler(np, conn, zero_tsih);
+
+ if (ret < 0)
+ goto new_sess_out;
+
+ iscsit_deaccess_np(np, tpg);
+ tpg = NULL;
+ /* Get another socket */
+ return 1;
+
+new_sess_out:
+ pr_err("iSCSI Login negotiation failed.\n");
+ iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ if (!zero_tsih || !conn->sess)
+ goto old_sess_out;
+ if (conn->sess->se_sess)
+ transport_free_session(conn->sess->se_sess);
+ if (conn->sess->session_index != 0) {
+ spin_lock_bh(&sess_idr_lock);
+ idr_remove(&sess_idr, conn->sess->session_index);
+ spin_unlock_bh(&sess_idr_lock);
+ }
+ if (conn->sess->sess_ops)
+ kfree(conn->sess->sess_ops);
+ if (conn->sess)
+ kfree(conn->sess);
+old_sess_out:
+ iscsi_stop_login_thread_timer(np);
+ /*
+ * If login negotiation fails check if the Time2Retain timer
+ * needs to be restarted.
+ */
+ if (!zero_tsih && conn->sess) {
+ spin_lock_bh(&conn->sess->conn_lock);
+ if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
+ struct se_portal_group *se_tpg =
+ &ISCSI_TPG_C(conn)->tpg_se_tpg;
+
+ atomic_set(&conn->sess->session_continuation, 0);
+ spin_unlock_bh(&conn->sess->conn_lock);
+ spin_lock_bh(&se_tpg->session_lock);
+ iscsit_start_time2retain_handler(conn->sess);
+ spin_unlock_bh(&se_tpg->session_lock);
+ } else
+ spin_unlock_bh(&conn->sess->conn_lock);
+ iscsit_dec_session_usage_count(conn->sess);
+ }
+
+ if (!IS_ERR(conn->conn_rx_hash.tfm))
+ crypto_free_hash(conn->conn_rx_hash.tfm);
+ if (!IS_ERR(conn->conn_tx_hash.tfm))
+ crypto_free_hash(conn->conn_tx_hash.tfm);
+
+ if (conn->conn_cpumask)
+ free_cpumask_var(conn->conn_cpumask);
+
+ kfree(conn->conn_ops);
+
+ if (conn->param_list) {
+ iscsi_release_param_list(conn->param_list);
+ conn->param_list = NULL;
+ }
+ if (conn->sock) {
+ if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
+ kfree(conn->sock->file);
+ conn->sock->file = NULL;
+ }
+ sock_release(conn->sock);
+ }
+ kfree(conn);
+
+ if (tpg) {
+ iscsit_deaccess_np(np, tpg);
+ tpg = NULL;
+ }
+
+out:
+ stop = kthread_should_stop();
+ if (!stop && signal_pending(current)) {
+ spin_lock_bh(&np->np_thread_lock);
+ stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
+ spin_unlock_bh(&np->np_thread_lock);
+ }
+ /* Wait for another socket.. */
+ if (!stop)
+ return 1;
+
+ iscsi_stop_login_thread_timer(np);
+ spin_lock_bh(&np->np_thread_lock);
+ np->np_thread_state = ISCSI_NP_THREAD_EXIT;
+ spin_unlock_bh(&np->np_thread_lock);
+ return 0;
+}
+
+int iscsi_target_login_thread(void *arg)
+{
+ struct iscsi_np *np = (struct iscsi_np *)arg;
+ int ret;
+
+ allow_signal(SIGINT);
+
+ while (!kthread_should_stop()) {
+ ret = __iscsi_target_login_thread(np);
+ /*
+ * We break and exit here unless another sock_accept() call
+ * is expected.
+ */
+ if (ret != 1)
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
new file mode 100644
index 0000000..091dcae
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -0,0 +1,12 @@
+#ifndef ISCSI_TARGET_LOGIN_H
+#define ISCSI_TARGET_LOGIN_H
+
+extern int iscsi_login_setup_crypto(struct iscsi_conn *);
+extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
+extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
+extern int iscsi_target_setup_login_socket(struct iscsi_np *,
+ struct __kernel_sockaddr_storage *);
+extern int iscsi_target_login_thread(void *);
+extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *);
+
+#endif /*** ISCSI_TARGET_LOGIN_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
new file mode 100644
index 0000000..4d087ac
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -0,0 +1,1067 @@
+/*******************************************************************************
+ * This file contains main functions related to iSCSI Parameter negotiation.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/ctype.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_tpg.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_login.h"
+#include "iscsi_target_nego.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_auth.h"
+
+#define MAX_LOGIN_PDUS 7
+#define TEXT_LEN 4096
+
+void convert_null_to_semi(char *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (buf[i] == '\0')
+ buf[i] = ';';
+}
+
+int strlen_semi(char *buf)
+{
+ int i = 0;
+
+ while (buf[i] != '\0') {
+ if (buf[i] == ';')
+ return i;
+ i++;
+ }
+
+ return -1;
+}
+
+int extract_param(
+ const char *in_buf,
+ const char *pattern,
+ unsigned int max_length,
+ char *out_buf,
+ unsigned char *type)
+{
+ char *ptr;
+ int len;
+
+ if (!in_buf || !pattern || !out_buf || !type)
+ return -1;
+
+ ptr = strstr(in_buf, pattern);
+ if (!ptr)
+ return -1;
+
+ ptr = strstr(ptr, "=");
+ if (!ptr)
+ return -1;
+
+ ptr += 1;
+ if (*ptr == '0' && (*(ptr+1) == 'x' || *(ptr+1) == 'X')) {
+ ptr += 2; /* skip 0x */
+ *type = HEX;
+ } else
+ *type = DECIMAL;
+
+ len = strlen_semi(ptr);
+ if (len < 0)
+ return -1;
+
+ if (len > max_length) {
+ pr_err("Length of input: %d exeeds max_length:"
+ " %d\n", len, max_length);
+ return -1;
+ }
+ memcpy(out_buf, ptr, len);
+ out_buf[len] = '\0';
+
+ return 0;
+}
+
+static u32 iscsi_handle_authentication(
+ struct iscsi_conn *conn,
+ char *in_buf,
+ char *out_buf,
+ int in_length,
+ int *out_length,
+ unsigned char *authtype)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_auth *auth;
+ struct iscsi_node_acl *iscsi_nacl;
+ struct se_node_acl *se_nacl;
+
+ if (!sess->sess_ops->SessionType) {
+ /*
+ * For SessionType=Normal
+ */
+ se_nacl = conn->sess->se_sess->se_node_acl;
+ if (!se_nacl) {
+ pr_err("Unable to locate struct se_node_acl for"
+ " CHAP auth\n");
+ return -1;
+ }
+ iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
+ se_node_acl);
+ if (!iscsi_nacl) {
+ pr_err("Unable to locate struct iscsi_node_acl for"
+ " CHAP auth\n");
+ return -1;
+ }
+
+ auth = ISCSI_NODE_AUTH(iscsi_nacl);
+ } else {
+ /*
+ * For SessionType=Discovery
+ */
+ auth = &iscsit_global->discovery_acl.node_auth;
+ }
+
+ if (strstr("CHAP", authtype))
+ strcpy(conn->sess->auth_type, "CHAP");
+ else
+ strcpy(conn->sess->auth_type, NONE);
+
+ if (strstr("None", authtype))
+ return 1;
+#ifdef CANSRP
+ else if (strstr("SRP", authtype))
+ return srp_main_loop(conn, auth, in_buf, out_buf,
+ &in_length, out_length);
+#endif
+ else if (strstr("CHAP", authtype))
+ return chap_main_loop(conn, auth, in_buf, out_buf,
+ &in_length, out_length);
+ else if (strstr("SPKM1", authtype))
+ return 2;
+ else if (strstr("SPKM2", authtype))
+ return 2;
+ else if (strstr("KRB5", authtype))
+ return 2;
+ else
+ return 2;
+}
+
+static void iscsi_remove_failed_auth_entry(struct iscsi_conn *conn)
+{
+ kfree(conn->auth_protocol);
+}
+
+static int iscsi_target_check_login_request(
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ int req_csg, req_nsg, rsp_csg, rsp_nsg;
+ u32 payload_length;
+ struct iscsi_login_req *login_req;
+ struct iscsi_login_rsp *login_rsp;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ login_rsp = (struct iscsi_login_rsp *) login->rsp;
+ payload_length = ntoh24(login_req->dlength);
+
+ switch (login_req->opcode & ISCSI_OPCODE_MASK) {
+ case ISCSI_OP_LOGIN:
+ break;
+ default:
+ pr_err("Received unknown opcode 0x%02x.\n",
+ login_req->opcode & ISCSI_OPCODE_MASK);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ if ((login_req->flags & ISCSI_FLAG_LOGIN_CONTINUE) &&
+ (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
+ pr_err("Login request has both ISCSI_FLAG_LOGIN_CONTINUE"
+ " and ISCSI_FLAG_LOGIN_TRANSIT set, protocol error.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ req_csg = (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
+ rsp_csg = (login_rsp->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
+ req_nsg = (login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK);
+ rsp_nsg = (login_rsp->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK);
+
+ if (req_csg != login->current_stage) {
+ pr_err("Initiator unexpectedly changed login stage"
+ " from %d to %d, login failed.\n", login->current_stage,
+ req_csg);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ if ((req_nsg == 2) || (req_csg >= 2) ||
+ ((login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT) &&
+ (req_nsg <= req_csg))) {
+ pr_err("Illegal login_req->flags Combination, CSG: %d,"
+ " NSG: %d, ISCSI_FLAG_LOGIN_TRANSIT: %d.\n", req_csg,
+ req_nsg, (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT));
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ if ((login_req->max_version != login->version_max) ||
+ (login_req->min_version != login->version_min)) {
+ pr_err("Login request changed Version Max/Nin"
+ " unexpectedly to 0x%02x/0x%02x, protocol error\n",
+ login_req->max_version, login_req->min_version);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ if (memcmp(login_req->isid, login->isid, 6) != 0) {
+ pr_err("Login request changed ISID unexpectedly,"
+ " protocol error.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ if (login_req->itt != login->init_task_tag) {
+ pr_err("Login request changed ITT unexpectedly to"
+ " 0x%08x, protocol error.\n", login_req->itt);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ if (payload_length > MAX_KEY_VALUE_PAIRS) {
+ pr_err("Login request payload exceeds default"
+ " MaxRecvDataSegmentLength: %u, protocol error.\n",
+ MAX_KEY_VALUE_PAIRS);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int iscsi_target_check_first_request(
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ struct iscsi_param *param = NULL;
+ struct se_node_acl *se_nacl;
+
+ login->first_request = 0;
+
+ list_for_each_entry(param, &conn->param_list->param_list, p_list) {
+ if (!strncmp(param->name, SESSIONTYPE, 11)) {
+ if (!IS_PSTATE_ACCEPTOR(param)) {
+ pr_err("SessionType key not received"
+ " in first login request.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+ return -1;
+ }
+ if (!strncmp(param->value, DISCOVERY, 9))
+ return 0;
+ }
+
+ if (!strncmp(param->name, INITIATORNAME, 13)) {
+ if (!IS_PSTATE_ACCEPTOR(param)) {
+ if (!login->leading_connection)
+ continue;
+
+ pr_err("InitiatorName key not received"
+ " in first login request.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+ return -1;
+ }
+
+ /*
+ * For non-leading connections, double check that the
+ * received InitiatorName matches the existing session's
+ * struct iscsi_node_acl.
+ */
+ if (!login->leading_connection) {
+ se_nacl = conn->sess->se_sess->se_node_acl;
+ if (!se_nacl) {
+ pr_err("Unable to locate"
+ " struct se_node_acl\n");
+ iscsit_tx_login_rsp(conn,
+ ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_TGT_NOT_FOUND);
+ return -1;
+ }
+
+ if (strcmp(param->value,
+ se_nacl->initiatorname)) {
+ pr_err("Incorrect"
+ " InitiatorName: %s for this"
+ " iSCSI Initiator Node.\n",
+ param->value);
+ iscsit_tx_login_rsp(conn,
+ ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_TGT_NOT_FOUND);
+ return -1;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+ u32 padding = 0;
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_login_rsp *login_rsp;
+
+ login_rsp = (struct iscsi_login_rsp *) login->rsp;
+
+ login_rsp->opcode = ISCSI_OP_LOGIN_RSP;
+ hton24(login_rsp->dlength, login->rsp_length);
+ memcpy(login_rsp->isid, login->isid, 6);
+ login_rsp->tsih = cpu_to_be16(login->tsih);
+ login_rsp->itt = cpu_to_be32(login->init_task_tag);
+ login_rsp->statsn = cpu_to_be32(conn->stat_sn++);
+ login_rsp->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ login_rsp->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ pr_debug("Sending Login Response, Flags: 0x%02x, ITT: 0x%08x,"
+ " ExpCmdSN; 0x%08x, MaxCmdSN: 0x%08x, StatSN: 0x%08x, Length:"
+ " %u\n", login_rsp->flags, ntohl(login_rsp->itt),
+ ntohl(login_rsp->exp_cmdsn), ntohl(login_rsp->max_cmdsn),
+ ntohl(login_rsp->statsn), login->rsp_length);
+
+ padding = ((-login->rsp_length) & 3);
+
+ if (iscsi_login_tx_data(
+ conn,
+ login->rsp,
+ login->rsp_buf,
+ login->rsp_length + padding) < 0)
+ return -1;
+
+ login->rsp_length = 0;
+ login_rsp->tsih = be16_to_cpu(login_rsp->tsih);
+ login_rsp->itt = be32_to_cpu(login_rsp->itt);
+ login_rsp->statsn = be32_to_cpu(login_rsp->statsn);
+ mutex_lock(&sess->cmdsn_mutex);
+ login_rsp->exp_cmdsn = be32_to_cpu(sess->exp_cmd_sn);
+ login_rsp->max_cmdsn = be32_to_cpu(sess->max_cmd_sn);
+ mutex_unlock(&sess->cmdsn_mutex);
+
+ return 0;
+}
+
+static int iscsi_target_do_rx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+ u32 padding = 0, payload_length;
+ struct iscsi_login_req *login_req;
+
+ if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0)
+ return -1;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ payload_length = ntoh24(login_req->dlength);
+ login_req->tsih = be16_to_cpu(login_req->tsih);
+ login_req->itt = be32_to_cpu(login_req->itt);
+ login_req->cid = be16_to_cpu(login_req->cid);
+ login_req->cmdsn = be32_to_cpu(login_req->cmdsn);
+ login_req->exp_statsn = be32_to_cpu(login_req->exp_statsn);
+
+ pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
+ " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
+ login_req->flags, login_req->itt, login_req->cmdsn,
+ login_req->exp_statsn, login_req->cid, payload_length);
+
+ if (iscsi_target_check_login_request(conn, login) < 0)
+ return -1;
+
+ padding = ((-payload_length) & 3);
+ memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
+
+ if (iscsi_login_rx_data(
+ conn,
+ login->req_buf,
+ payload_length + padding) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+ if (iscsi_target_do_tx_login_io(conn, login) < 0)
+ return -1;
+
+ if (iscsi_target_do_rx_login_io(conn, login) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int iscsi_target_get_initial_payload(
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ u32 padding = 0, payload_length;
+ struct iscsi_login_req *login_req;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ payload_length = ntoh24(login_req->dlength);
+
+ pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
+ " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
+ login_req->flags, login_req->itt, login_req->cmdsn,
+ login_req->exp_statsn, payload_length);
+
+ if (iscsi_target_check_login_request(conn, login) < 0)
+ return -1;
+
+ padding = ((-payload_length) & 3);
+
+ if (iscsi_login_rx_data(
+ conn,
+ login->req_buf,
+ payload_length + padding) < 0)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * NOTE: We check for existing sessions or connections AFTER the initiator
+ * has been successfully authenticated in order to protect against faked
+ * ISID/TSIH combinations.
+ */
+static int iscsi_target_check_for_existing_instances(
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ if (login->checked_for_existing)
+ return 0;
+
+ login->checked_for_existing = 1;
+
+ if (!login->tsih)
+ return iscsi_check_for_session_reinstatement(conn);
+ else
+ return iscsi_login_post_auth_non_zero_tsih(conn, login->cid,
+ login->initial_exp_statsn);
+}
+
+static int iscsi_target_do_authentication(
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ int authret;
+ u32 payload_length;
+ struct iscsi_param *param;
+ struct iscsi_login_req *login_req;
+ struct iscsi_login_rsp *login_rsp;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ login_rsp = (struct iscsi_login_rsp *) login->rsp;
+ payload_length = ntoh24(login_req->dlength);
+
+ param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
+ if (!param)
+ return -1;
+
+ authret = iscsi_handle_authentication(
+ conn,
+ login->req_buf,
+ login->rsp_buf,
+ payload_length,
+ &login->rsp_length,
+ param->value);
+ switch (authret) {
+ case 0:
+ pr_debug("Received OK response"
+ " from LIO Authentication, continuing.\n");
+ break;
+ case 1:
+ pr_debug("iSCSI security negotiation"
+ " completed sucessfully.\n");
+ login->auth_complete = 1;
+ if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
+ (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
+ login_rsp->flags |= (ISCSI_FLAG_LOGIN_NEXT_STAGE1 |
+ ISCSI_FLAG_LOGIN_TRANSIT);
+ login->current_stage = 1;
+ }
+ return iscsi_target_check_for_existing_instances(
+ conn, login);
+ case 2:
+ pr_err("Security negotiation"
+ " failed.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_AUTH_FAILED);
+ return -1;
+ default:
+ pr_err("Received unknown error %d from LIO"
+ " Authentication\n", authret);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_TARGET_ERROR);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int iscsi_target_handle_csg_zero(
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ int ret;
+ u32 payload_length;
+ struct iscsi_param *param;
+ struct iscsi_login_req *login_req;
+ struct iscsi_login_rsp *login_rsp;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ login_rsp = (struct iscsi_login_rsp *) login->rsp;
+ payload_length = ntoh24(login_req->dlength);
+
+ param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
+ if (!param)
+ return -1;
+
+ ret = iscsi_decode_text_input(
+ PHASE_SECURITY|PHASE_DECLARATIVE,
+ SENDER_INITIATOR|SENDER_RECEIVER,
+ login->req_buf,
+ payload_length,
+ conn->param_list);
+ if (ret < 0)
+ return -1;
+
+ if (ret > 0) {
+ if (login->auth_complete) {
+ pr_err("Initiator has already been"
+ " successfully authenticated, but is still"
+ " sending %s keys.\n", param->value);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ goto do_auth;
+ }
+
+ if (login->first_request)
+ if (iscsi_target_check_first_request(conn, login) < 0)
+ return -1;
+
+ ret = iscsi_encode_text_output(
+ PHASE_SECURITY|PHASE_DECLARATIVE,
+ SENDER_TARGET,
+ login->rsp_buf,
+ &login->rsp_length,
+ conn->param_list);
+ if (ret < 0)
+ return -1;
+
+ if (!iscsi_check_negotiated_keys(conn->param_list)) {
+ if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
+ !strncmp(param->value, NONE, 4)) {
+ pr_err("Initiator sent AuthMethod=None but"
+ " Target is enforcing iSCSI Authentication,"
+ " login failed.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_AUTH_FAILED);
+ return -1;
+ }
+
+ if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
+ !login->auth_complete)
+ return 0;
+
+ if (strncmp(param->value, NONE, 4) && !login->auth_complete)
+ return 0;
+
+ if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
+ (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
+ login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE1 |
+ ISCSI_FLAG_LOGIN_TRANSIT;
+ login->current_stage = 1;
+ }
+ }
+
+ return 0;
+do_auth:
+ return iscsi_target_do_authentication(conn, login);
+}
+
+static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+ int ret;
+ u32 payload_length;
+ struct iscsi_login_req *login_req;
+ struct iscsi_login_rsp *login_rsp;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ login_rsp = (struct iscsi_login_rsp *) login->rsp;
+ payload_length = ntoh24(login_req->dlength);
+
+ ret = iscsi_decode_text_input(
+ PHASE_OPERATIONAL|PHASE_DECLARATIVE,
+ SENDER_INITIATOR|SENDER_RECEIVER,
+ login->req_buf,
+ payload_length,
+ conn->param_list);
+ if (ret < 0)
+ return -1;
+
+ if (login->first_request)
+ if (iscsi_target_check_first_request(conn, login) < 0)
+ return -1;
+
+ if (iscsi_target_check_for_existing_instances(conn, login) < 0)
+ return -1;
+
+ ret = iscsi_encode_text_output(
+ PHASE_OPERATIONAL|PHASE_DECLARATIVE,
+ SENDER_TARGET,
+ login->rsp_buf,
+ &login->rsp_length,
+ conn->param_list);
+ if (ret < 0)
+ return -1;
+
+ if (!login->auth_complete &&
+ ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
+ pr_err("Initiator is requesting CSG: 1, has not been"
+ " successfully authenticated, and the Target is"
+ " enforcing iSCSI Authentication, login failed.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_AUTH_FAILED);
+ return -1;
+ }
+
+ if (!iscsi_check_negotiated_keys(conn->param_list))
+ if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE3) &&
+ (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT))
+ login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE3 |
+ ISCSI_FLAG_LOGIN_TRANSIT;
+
+ return 0;
+}
+
+static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+ int pdu_count = 0;
+ struct iscsi_login_req *login_req;
+ struct iscsi_login_rsp *login_rsp;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ login_rsp = (struct iscsi_login_rsp *) login->rsp;
+
+ while (1) {
+ if (++pdu_count > MAX_LOGIN_PDUS) {
+ pr_err("MAX_LOGIN_PDUS count reached.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_TARGET_ERROR);
+ return -1;
+ }
+
+ switch ((login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2) {
+ case 0:
+ login_rsp->flags |= (0 & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK);
+ if (iscsi_target_handle_csg_zero(conn, login) < 0)
+ return -1;
+ break;
+ case 1:
+ login_rsp->flags |= ISCSI_FLAG_LOGIN_CURRENT_STAGE1;
+ if (iscsi_target_handle_csg_one(conn, login) < 0)
+ return -1;
+ if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
+ login->tsih = conn->sess->tsih;
+ if (iscsi_target_do_tx_login_io(conn,
+ login) < 0)
+ return -1;
+ return 0;
+ }
+ break;
+ default:
+ pr_err("Illegal CSG: %d received from"
+ " Initiator, protocol error.\n",
+ (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
+ >> 2);
+ break;
+ }
+
+ if (iscsi_target_do_login_io(conn, login) < 0)
+ return -1;
+
+ if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
+ login_rsp->flags &= ~ISCSI_FLAG_LOGIN_TRANSIT;
+ login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK;
+ }
+ }
+
+ return 0;
+}
+
+static void iscsi_initiatorname_tolower(
+ char *param_buf)
+{
+ char *c;
+ u32 iqn_size = strlen(param_buf), i;
+
+ for (i = 0; i < iqn_size; i++) {
+ c = (char *)&param_buf[i];
+ if (!isupper(*c))
+ continue;
+
+ *c = tolower(*c);
+ }
+}
+
+/*
+ * Processes the first Login Request..
+ */
+static int iscsi_target_locate_portal(
+ struct iscsi_np *np,
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ char *i_buf = NULL, *s_buf = NULL, *t_buf = NULL;
+ char *tmpbuf, *start = NULL, *end = NULL, *key, *value;
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_tiqn *tiqn;
+ struct iscsi_login_req *login_req;
+ struct iscsi_targ_login_rsp *login_rsp;
+ u32 payload_length;
+ int sessiontype = 0, ret = 0;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ login_rsp = (struct iscsi_targ_login_rsp *) login->rsp;
+ payload_length = ntoh24(login_req->dlength);
+
+ login->first_request = 1;
+ login->leading_connection = (!login_req->tsih) ? 1 : 0;
+ login->current_stage =
+ (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
+ login->version_min = login_req->min_version;
+ login->version_max = login_req->max_version;
+ memcpy(login->isid, login_req->isid, 6);
+ login->cmd_sn = login_req->cmdsn;
+ login->init_task_tag = login_req->itt;
+ login->initial_exp_statsn = login_req->exp_statsn;
+ login->cid = login_req->cid;
+ login->tsih = login_req->tsih;
+
+ if (iscsi_target_get_initial_payload(conn, login) < 0)
+ return -1;
+
+ tmpbuf = kzalloc(payload_length + 1, GFP_KERNEL);
+ if (!tmpbuf) {
+ pr_err("Unable to allocate memory for tmpbuf.\n");
+ return -1;
+ }
+
+ memcpy(tmpbuf, login->req_buf, payload_length);
+ tmpbuf[payload_length] = '\0';
+ start = tmpbuf;
+ end = (start + payload_length);
+
+ /*
+ * Locate the initial keys expected from the Initiator node in
+ * the first login request in order to progress with the login phase.
+ */
+ while (start < end) {
+ if (iscsi_extract_key_value(start, &key, &value) < 0) {
+ ret = -1;
+ goto out;
+ }
+
+ if (!strncmp(key, "InitiatorName", 13))
+ i_buf = value;
+ else if (!strncmp(key, "SessionType", 11))
+ s_buf = value;
+ else if (!strncmp(key, "TargetName", 10))
+ t_buf = value;
+
+ start += strlen(key) + strlen(value) + 2;
+ }
+
+ /*
+ * See 5.3. Login Phase.
+ */
+ if (!i_buf) {
+ pr_err("InitiatorName key not received"
+ " in first login request.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+ ret = -1;
+ goto out;
+ }
+ /*
+ * Convert the incoming InitiatorName to lowercase following
+ * RFC-3720 3.2.6.1. section c) that says that iSCSI IQNs
+ * are NOT case sensitive.
+ */
+ iscsi_initiatorname_tolower(i_buf);
+
+ if (!s_buf) {
+ if (!login->leading_connection)
+ goto get_target;
+
+ pr_err("SessionType key not received"
+ " in first login request.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+ ret = -1;
+ goto out;
+ }
+
+ /*
+ * Use default portal group for discovery sessions.
+ */
+ sessiontype = strncmp(s_buf, DISCOVERY, 9);
+ if (!sessiontype) {
+ conn->tpg = iscsit_global->discovery_tpg;
+ if (!login->leading_connection)
+ goto get_target;
+
+ sess->sess_ops->SessionType = 1;
+ /*
+ * Setup crc32c modules from libcrypto
+ */
+ if (iscsi_login_setup_crypto(conn) < 0) {
+ pr_err("iscsi_login_setup_crypto() failed\n");
+ ret = -1;
+ goto out;
+ }
+ /*
+ * Serialize access across the discovery struct iscsi_portal_group to
+ * process login attempt.
+ */
+ if (iscsit_access_np(np, conn->tpg) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+ ret = -1;
+ goto out;
+ }
+ ret = 0;
+ goto out;
+ }
+
+get_target:
+ if (!t_buf) {
+ pr_err("TargetName key not received"
+ " in first login request while"
+ " SessionType=Normal.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+ ret = -1;
+ goto out;
+ }
+
+ /*
+ * Locate Target IQN from Storage Node.
+ */
+ tiqn = iscsit_get_tiqn_for_login(t_buf);
+ if (!tiqn) {
+ pr_err("Unable to locate Target IQN: %s in"
+ " Storage Node\n", t_buf);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+ ret = -1;
+ goto out;
+ }
+ pr_debug("Located Storage Object: %s\n", tiqn->tiqn);
+
+ /*
+ * Locate Target Portal Group from Storage Node.
+ */
+ conn->tpg = iscsit_get_tpg_from_np(tiqn, np);
+ if (!conn->tpg) {
+ pr_err("Unable to locate Target Portal Group"
+ " on %s\n", tiqn->tiqn);
+ iscsit_put_tiqn_for_login(tiqn);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+ ret = -1;
+ goto out;
+ }
+ pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt);
+ /*
+ * Setup crc32c modules from libcrypto
+ */
+ if (iscsi_login_setup_crypto(conn) < 0) {
+ pr_err("iscsi_login_setup_crypto() failed\n");
+ ret = -1;
+ goto out;
+ }
+ /*
+ * Serialize access across the struct iscsi_portal_group to
+ * process login attempt.
+ */
+ if (iscsit_access_np(np, conn->tpg) < 0) {
+ iscsit_put_tiqn_for_login(tiqn);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+ ret = -1;
+ conn->tpg = NULL;
+ goto out;
+ }
+
+ /*
+ * conn->sess->node_acl will be set when the referenced
+ * struct iscsi_session is located from received ISID+TSIH in
+ * iscsi_login_non_zero_tsih_s2().
+ */
+ if (!login->leading_connection) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * This value is required in iscsi_login_zero_tsih_s2()
+ */
+ sess->sess_ops->SessionType = 0;
+
+ /*
+ * Locate incoming Initiator IQN reference from Storage Node.
+ */
+ sess->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
+ &conn->tpg->tpg_se_tpg, i_buf);
+ if (!sess->se_sess->se_node_acl) {
+ pr_err("iSCSI Initiator Node: %s is not authorized to"
+ " access iSCSI target portal group: %hu.\n",
+ i_buf, conn->tpg->tpgt);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_TGT_FORBIDDEN);
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ kfree(tmpbuf);
+ return ret;
+}
+
+struct iscsi_login *iscsi_target_init_negotiation(
+ struct iscsi_np *np,
+ struct iscsi_conn *conn,
+ char *login_pdu)
+{
+ struct iscsi_login *login;
+
+ login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL);
+ if (!login) {
+ pr_err("Unable to allocate memory for struct iscsi_login.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return NULL;
+ }
+
+ login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ if (!login->req) {
+ pr_err("Unable to allocate memory for Login Request.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ goto out;
+ }
+ memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
+
+ login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
+ if (!login->req_buf) {
+ pr_err("Unable to allocate memory for response buffer.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ goto out;
+ }
+ /*
+ * SessionType: Discovery
+ *
+ * Locates Default Portal
+ *
+ * SessionType: Normal
+ *
+ * Locates Target Portal from NP -> Target IQN
+ */
+ if (iscsi_target_locate_portal(np, conn, login) < 0) {
+ pr_err("iSCSI Login negotiation failed.\n");
+ goto out;
+ }
+
+ return login;
+out:
+ kfree(login->req);
+ kfree(login->req_buf);
+ kfree(login);
+
+ return NULL;
+}
+
+int iscsi_target_start_negotiation(
+ struct iscsi_login *login,
+ struct iscsi_conn *conn)
+{
+ int ret = -1;
+
+ login->rsp = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ if (!login->rsp) {
+ pr_err("Unable to allocate memory for"
+ " Login Response.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ ret = -1;
+ goto out;
+ }
+
+ login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
+ if (!login->rsp_buf) {
+ pr_err("Unable to allocate memory for"
+ " request buffer.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ ret = -1;
+ goto out;
+ }
+
+ ret = iscsi_target_do_login(conn, login);
+out:
+ if (ret != 0)
+ iscsi_remove_failed_auth_entry(conn);
+
+ iscsi_target_nego_release(login, conn);
+ return ret;
+}
+
+void iscsi_target_nego_release(
+ struct iscsi_login *login,
+ struct iscsi_conn *conn)
+{
+ kfree(login->req);
+ kfree(login->rsp);
+ kfree(login->req_buf);
+ kfree(login->rsp_buf);
+ kfree(login);
+}
diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h
new file mode 100644
index 0000000..92e133a
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nego.h
@@ -0,0 +1,17 @@
+#ifndef ISCSI_TARGET_NEGO_H
+#define ISCSI_TARGET_NEGO_H
+
+#define DECIMAL 0
+#define HEX 1
+
+extern void convert_null_to_semi(char *, int);
+extern int extract_param(const char *, const char *, unsigned int, char *,
+ unsigned char *);
+extern struct iscsi_login *iscsi_target_init_negotiation(
+ struct iscsi_np *, struct iscsi_conn *, char *);
+extern int iscsi_target_start_negotiation(
+ struct iscsi_login *, struct iscsi_conn *);
+extern void iscsi_target_nego_release(
+ struct iscsi_login *, struct iscsi_conn *);
+
+#endif /* ISCSI_TARGET_NEGO_H */
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
new file mode 100644
index 0000000..aeafbe0
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -0,0 +1,263 @@
+/*******************************************************************************
+ * This file contains the main functions related to Initiator Node Attributes.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_nodeattrib.h"
+
+static inline char *iscsit_na_get_initiatorname(
+ struct iscsi_node_acl *nacl)
+{
+ struct se_node_acl *se_nacl = &nacl->se_node_acl;
+
+ return &se_nacl->initiatorname[0];
+}
+
+void iscsit_set_default_node_attribues(
+ struct iscsi_node_acl *acl)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ a->dataout_timeout = NA_DATAOUT_TIMEOUT;
+ a->dataout_timeout_retries = NA_DATAOUT_TIMEOUT_RETRIES;
+ a->nopin_timeout = NA_NOPIN_TIMEOUT;
+ a->nopin_response_timeout = NA_NOPIN_RESPONSE_TIMEOUT;
+ a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS;
+ a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS;
+ a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS;
+ a->default_erl = NA_DEFAULT_ERL;
+}
+
+extern int iscsit_na_dataout_timeout(
+ struct iscsi_node_acl *acl,
+ u32 dataout_timeout)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (dataout_timeout > NA_DATAOUT_TIMEOUT_MAX) {
+ pr_err("Requested DataOut Timeout %u larger than"
+ " maximum %u\n", dataout_timeout,
+ NA_DATAOUT_TIMEOUT_MAX);
+ return -EINVAL;
+ } else if (dataout_timeout < NA_DATAOUT_TIMEOUT_MIX) {
+ pr_err("Requested DataOut Timeout %u smaller than"
+ " minimum %u\n", dataout_timeout,
+ NA_DATAOUT_TIMEOUT_MIX);
+ return -EINVAL;
+ }
+
+ a->dataout_timeout = dataout_timeout;
+ pr_debug("Set DataOut Timeout to %u for Initiator Node"
+ " %s\n", a->dataout_timeout, iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
+
+extern int iscsit_na_dataout_timeout_retries(
+ struct iscsi_node_acl *acl,
+ u32 dataout_timeout_retries)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (dataout_timeout_retries > NA_DATAOUT_TIMEOUT_RETRIES_MAX) {
+ pr_err("Requested DataOut Timeout Retries %u larger"
+ " than maximum %u", dataout_timeout_retries,
+ NA_DATAOUT_TIMEOUT_RETRIES_MAX);
+ return -EINVAL;
+ } else if (dataout_timeout_retries < NA_DATAOUT_TIMEOUT_RETRIES_MIN) {
+ pr_err("Requested DataOut Timeout Retries %u smaller"
+ " than minimum %u", dataout_timeout_retries,
+ NA_DATAOUT_TIMEOUT_RETRIES_MIN);
+ return -EINVAL;
+ }
+
+ a->dataout_timeout_retries = dataout_timeout_retries;
+ pr_debug("Set DataOut Timeout Retries to %u for"
+ " Initiator Node %s\n", a->dataout_timeout_retries,
+ iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
+
+extern int iscsit_na_nopin_timeout(
+ struct iscsi_node_acl *acl,
+ u32 nopin_timeout)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+ struct se_node_acl *se_nacl = &a->nacl->se_node_acl;
+ struct se_session *se_sess;
+ u32 orig_nopin_timeout = a->nopin_timeout;
+
+ if (nopin_timeout > NA_NOPIN_TIMEOUT_MAX) {
+ pr_err("Requested NopIn Timeout %u larger than maximum"
+ " %u\n", nopin_timeout, NA_NOPIN_TIMEOUT_MAX);
+ return -EINVAL;
+ } else if ((nopin_timeout < NA_NOPIN_TIMEOUT_MIN) &&
+ (nopin_timeout != 0)) {
+ pr_err("Requested NopIn Timeout %u smaller than"
+ " minimum %u and not 0\n", nopin_timeout,
+ NA_NOPIN_TIMEOUT_MIN);
+ return -EINVAL;
+ }
+
+ a->nopin_timeout = nopin_timeout;
+ pr_debug("Set NopIn Timeout to %u for Initiator"
+ " Node %s\n", a->nopin_timeout,
+ iscsit_na_get_initiatorname(acl));
+ /*
+ * Reenable disabled nopin_timeout timer for all iSCSI connections.
+ */
+ if (!orig_nopin_timeout) {
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+
+ spin_lock(&sess->conn_lock);
+ list_for_each_entry(conn, &sess->sess_conn_list,
+ conn_list) {
+ if (conn->conn_state !=
+ TARG_CONN_STATE_LOGGED_IN)
+ continue;
+
+ spin_lock(&conn->nopin_timer_lock);
+ __iscsit_start_nopin_timer(conn);
+ spin_unlock(&conn->nopin_timer_lock);
+ }
+ spin_unlock(&sess->conn_lock);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+ }
+
+ return 0;
+}
+
+extern int iscsit_na_nopin_response_timeout(
+ struct iscsi_node_acl *acl,
+ u32 nopin_response_timeout)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (nopin_response_timeout > NA_NOPIN_RESPONSE_TIMEOUT_MAX) {
+ pr_err("Requested NopIn Response Timeout %u larger"
+ " than maximum %u\n", nopin_response_timeout,
+ NA_NOPIN_RESPONSE_TIMEOUT_MAX);
+ return -EINVAL;
+ } else if (nopin_response_timeout < NA_NOPIN_RESPONSE_TIMEOUT_MIN) {
+ pr_err("Requested NopIn Response Timeout %u smaller"
+ " than minimum %u\n", nopin_response_timeout,
+ NA_NOPIN_RESPONSE_TIMEOUT_MIN);
+ return -EINVAL;
+ }
+
+ a->nopin_response_timeout = nopin_response_timeout;
+ pr_debug("Set NopIn Response Timeout to %u for"
+ " Initiator Node %s\n", a->nopin_timeout,
+ iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
+
+extern int iscsit_na_random_datain_pdu_offsets(
+ struct iscsi_node_acl *acl,
+ u32 random_datain_pdu_offsets)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (random_datain_pdu_offsets != 0 && random_datain_pdu_offsets != 1) {
+ pr_err("Requested Random DataIN PDU Offsets: %u not"
+ " 0 or 1\n", random_datain_pdu_offsets);
+ return -EINVAL;
+ }
+
+ a->random_datain_pdu_offsets = random_datain_pdu_offsets;
+ pr_debug("Set Random DataIN PDU Offsets to %u for"
+ " Initiator Node %s\n", a->random_datain_pdu_offsets,
+ iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
+
+extern int iscsit_na_random_datain_seq_offsets(
+ struct iscsi_node_acl *acl,
+ u32 random_datain_seq_offsets)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (random_datain_seq_offsets != 0 && random_datain_seq_offsets != 1) {
+ pr_err("Requested Random DataIN Sequence Offsets: %u"
+ " not 0 or 1\n", random_datain_seq_offsets);
+ return -EINVAL;
+ }
+
+ a->random_datain_seq_offsets = random_datain_seq_offsets;
+ pr_debug("Set Random DataIN Sequence Offsets to %u for"
+ " Initiator Node %s\n", a->random_datain_seq_offsets,
+ iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
+
+extern int iscsit_na_random_r2t_offsets(
+ struct iscsi_node_acl *acl,
+ u32 random_r2t_offsets)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (random_r2t_offsets != 0 && random_r2t_offsets != 1) {
+ pr_err("Requested Random R2T Offsets: %u not"
+ " 0 or 1\n", random_r2t_offsets);
+ return -EINVAL;
+ }
+
+ a->random_r2t_offsets = random_r2t_offsets;
+ pr_debug("Set Random R2T Offsets to %u for"
+ " Initiator Node %s\n", a->random_r2t_offsets,
+ iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
+
+extern int iscsit_na_default_erl(
+ struct iscsi_node_acl *acl,
+ u32 default_erl)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (default_erl != 0 && default_erl != 1 && default_erl != 2) {
+ pr_err("Requested default ERL: %u not 0, 1, or 2\n",
+ default_erl);
+ return -EINVAL;
+ }
+
+ a->default_erl = default_erl;
+ pr_debug("Set use ERL0 flag to %u for Initiator"
+ " Node %s\n", a->default_erl,
+ iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h
new file mode 100644
index 0000000..c970b326
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h
@@ -0,0 +1,14 @@
+#ifndef ISCSI_TARGET_NODEATTRIB_H
+#define ISCSI_TARGET_NODEATTRIB_H
+
+extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *);
+extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);
+extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32);
+extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32);
+extern int iscsit_na_nopin_response_timeout(struct iscsi_node_acl *, u32);
+extern int iscsit_na_random_datain_pdu_offsets(struct iscsi_node_acl *, u32);
+extern int iscsit_na_random_datain_seq_offsets(struct iscsi_node_acl *, u32);
+extern int iscsit_na_random_r2t_offsets(struct iscsi_node_acl *, u32);
+extern int iscsit_na_default_erl(struct iscsi_node_acl *, u32);
+
+#endif /* ISCSI_TARGET_NODEATTRIB_H */
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
new file mode 100644
index 0000000..497b2e7
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -0,0 +1,1894 @@
+/*******************************************************************************
+ * This file contains main functions related to iSCSI Parameter negotiation.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/slab.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_parameters.h"
+
+int iscsi_login_rx_data(
+ struct iscsi_conn *conn,
+ char *buf,
+ int length)
+{
+ int rx_got;
+ struct kvec iov;
+
+ memset(&iov, 0, sizeof(struct kvec));
+ iov.iov_len = length;
+ iov.iov_base = buf;
+
+ /*
+ * Initial Marker-less Interval.
+ * Add the values regardless of IFMarker/OFMarker, considering
+ * it may not be negoitated yet.
+ */
+ conn->of_marker += length;
+
+ rx_got = rx_data(conn, &iov, 1, length);
+ if (rx_got != length) {
+ pr_err("rx_data returned %d, expecting %d.\n",
+ rx_got, length);
+ return -1;
+ }
+
+ return 0 ;
+}
+
+int iscsi_login_tx_data(
+ struct iscsi_conn *conn,
+ char *pdu_buf,
+ char *text_buf,
+ int text_length)
+{
+ int length, tx_sent;
+ struct kvec iov[2];
+
+ length = (ISCSI_HDR_LEN + text_length);
+
+ memset(&iov[0], 0, 2 * sizeof(struct kvec));
+ iov[0].iov_len = ISCSI_HDR_LEN;
+ iov[0].iov_base = pdu_buf;
+ iov[1].iov_len = text_length;
+ iov[1].iov_base = text_buf;
+
+ /*
+ * Initial Marker-less Interval.
+ * Add the values regardless of IFMarker/OFMarker, considering
+ * it may not be negoitated yet.
+ */
+ conn->if_marker += length;
+
+ tx_sent = tx_data(conn, &iov[0], 2, length);
+ if (tx_sent != length) {
+ pr_err("tx_data returned %d, expecting %d.\n",
+ tx_sent, length);
+ return -1;
+ }
+
+ return 0;
+}
+
+void iscsi_dump_conn_ops(struct iscsi_conn_ops *conn_ops)
+{
+ pr_debug("HeaderDigest: %s\n", (conn_ops->HeaderDigest) ?
+ "CRC32C" : "None");
+ pr_debug("DataDigest: %s\n", (conn_ops->DataDigest) ?
+ "CRC32C" : "None");
+ pr_debug("MaxRecvDataSegmentLength: %u\n",
+ conn_ops->MaxRecvDataSegmentLength);
+ pr_debug("OFMarker: %s\n", (conn_ops->OFMarker) ? "Yes" : "No");
+ pr_debug("IFMarker: %s\n", (conn_ops->IFMarker) ? "Yes" : "No");
+ if (conn_ops->OFMarker)
+ pr_debug("OFMarkInt: %u\n", conn_ops->OFMarkInt);
+ if (conn_ops->IFMarker)
+ pr_debug("IFMarkInt: %u\n", conn_ops->IFMarkInt);
+}
+
+void iscsi_dump_sess_ops(struct iscsi_sess_ops *sess_ops)
+{
+ pr_debug("InitiatorName: %s\n", sess_ops->InitiatorName);
+ pr_debug("InitiatorAlias: %s\n", sess_ops->InitiatorAlias);
+ pr_debug("TargetName: %s\n", sess_ops->TargetName);
+ pr_debug("TargetAlias: %s\n", sess_ops->TargetAlias);
+ pr_debug("TargetPortalGroupTag: %hu\n",
+ sess_ops->TargetPortalGroupTag);
+ pr_debug("MaxConnections: %hu\n", sess_ops->MaxConnections);
+ pr_debug("InitialR2T: %s\n",
+ (sess_ops->InitialR2T) ? "Yes" : "No");
+ pr_debug("ImmediateData: %s\n", (sess_ops->ImmediateData) ?
+ "Yes" : "No");
+ pr_debug("MaxBurstLength: %u\n", sess_ops->MaxBurstLength);
+ pr_debug("FirstBurstLength: %u\n", sess_ops->FirstBurstLength);
+ pr_debug("DefaultTime2Wait: %hu\n", sess_ops->DefaultTime2Wait);
+ pr_debug("DefaultTime2Retain: %hu\n",
+ sess_ops->DefaultTime2Retain);
+ pr_debug("MaxOutstandingR2T: %hu\n",
+ sess_ops->MaxOutstandingR2T);
+ pr_debug("DataPDUInOrder: %s\n",
+ (sess_ops->DataPDUInOrder) ? "Yes" : "No");
+ pr_debug("DataSequenceInOrder: %s\n",
+ (sess_ops->DataSequenceInOrder) ? "Yes" : "No");
+ pr_debug("ErrorRecoveryLevel: %hu\n",
+ sess_ops->ErrorRecoveryLevel);
+ pr_debug("SessionType: %s\n", (sess_ops->SessionType) ?
+ "Discovery" : "Normal");
+}
+
+void iscsi_print_params(struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param;
+
+ list_for_each_entry(param, &param_list->param_list, p_list)
+ pr_debug("%s: %s\n", param->name, param->value);
+}
+
+static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *param_list,
+ char *name, char *value, u8 phase, u8 scope, u8 sender,
+ u16 type_range, u8 use)
+{
+ struct iscsi_param *param = NULL;
+
+ param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
+ if (!param) {
+ pr_err("Unable to allocate memory for parameter.\n");
+ goto out;
+ }
+ INIT_LIST_HEAD(&param->p_list);
+
+ param->name = kzalloc(strlen(name) + 1, GFP_KERNEL);
+ if (!param->name) {
+ pr_err("Unable to allocate memory for parameter name.\n");
+ goto out;
+ }
+
+ param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
+ if (!param->value) {
+ pr_err("Unable to allocate memory for parameter value.\n");
+ goto out;
+ }
+
+ memcpy(param->name, name, strlen(name));
+ param->name[strlen(name)] = '\0';
+ memcpy(param->value, value, strlen(value));
+ param->value[strlen(value)] = '\0';
+ param->phase = phase;
+ param->scope = scope;
+ param->sender = sender;
+ param->use = use;
+ param->type_range = type_range;
+
+ switch (param->type_range) {
+ case TYPERANGE_BOOL_AND:
+ param->type = TYPE_BOOL_AND;
+ break;
+ case TYPERANGE_BOOL_OR:
+ param->type = TYPE_BOOL_OR;
+ break;
+ case TYPERANGE_0_TO_2:
+ case TYPERANGE_0_TO_3600:
+ case TYPERANGE_0_TO_32767:
+ case TYPERANGE_0_TO_65535:
+ case TYPERANGE_1_TO_65535:
+ case TYPERANGE_2_TO_3600:
+ case TYPERANGE_512_TO_16777215:
+ param->type = TYPE_NUMBER;
+ break;
+ case TYPERANGE_AUTH:
+ case TYPERANGE_DIGEST:
+ param->type = TYPE_VALUE_LIST | TYPE_STRING;
+ break;
+ case TYPERANGE_MARKINT:
+ param->type = TYPE_NUMBER_RANGE;
+ param->type_range |= TYPERANGE_1_TO_65535;
+ break;
+ case TYPERANGE_ISCSINAME:
+ case TYPERANGE_SESSIONTYPE:
+ case TYPERANGE_TARGETADDRESS:
+ case TYPERANGE_UTF8:
+ param->type = TYPE_STRING;
+ break;
+ default:
+ pr_err("Unknown type_range 0x%02x\n",
+ param->type_range);
+ goto out;
+ }
+ list_add_tail(&param->p_list, &param_list->param_list);
+
+ return param;
+out:
+ if (param) {
+ kfree(param->value);
+ kfree(param->name);
+ kfree(param);
+ }
+
+ return NULL;
+}
+
+/* #warning Add extension keys */
+int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
+{
+ struct iscsi_param *param = NULL;
+ struct iscsi_param_list *pl;
+
+ pl = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
+ if (!pl) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_param_list.\n");
+ return -1 ;
+ }
+ INIT_LIST_HEAD(&pl->param_list);
+ INIT_LIST_HEAD(&pl->extra_response_list);
+
+ /*
+ * The format for setting the initial parameter definitions are:
+ *
+ * Parameter name:
+ * Initial value:
+ * Allowable phase:
+ * Scope:
+ * Allowable senders:
+ * Typerange:
+ * Use:
+ */
+ param = iscsi_set_default_param(pl, AUTHMETHOD, INITIAL_AUTHMETHOD,
+ PHASE_SECURITY, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_AUTH, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, HEADERDIGEST, INITIAL_HEADERDIGEST,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_DIGEST, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, DATADIGEST, INITIAL_DATADIGEST,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_DIGEST, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, MAXCONNECTIONS,
+ INITIAL_MAXCONNECTIONS, PHASE_OPERATIONAL,
+ SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_1_TO_65535, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, SENDTARGETS, INITIAL_SENDTARGETS,
+ PHASE_FFP0, SCOPE_SESSION_WIDE, SENDER_INITIATOR,
+ TYPERANGE_UTF8, 0);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, TARGETNAME, INITIAL_TARGETNAME,
+ PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_ISCSINAME, USE_ALL);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, INITIATORNAME,
+ INITIAL_INITIATORNAME, PHASE_DECLARATIVE,
+ SCOPE_SESSION_WIDE, SENDER_INITIATOR,
+ TYPERANGE_ISCSINAME, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, TARGETALIAS, INITIAL_TARGETALIAS,
+ PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_TARGET,
+ TYPERANGE_UTF8, USE_ALL);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, INITIATORALIAS,
+ INITIAL_INITIATORALIAS, PHASE_DECLARATIVE,
+ SCOPE_SESSION_WIDE, SENDER_INITIATOR, TYPERANGE_UTF8,
+ USE_ALL);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, TARGETADDRESS,
+ INITIAL_TARGETADDRESS, PHASE_DECLARATIVE,
+ SCOPE_SESSION_WIDE, SENDER_TARGET,
+ TYPERANGE_TARGETADDRESS, USE_ALL);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, TARGETPORTALGROUPTAG,
+ INITIAL_TARGETPORTALGROUPTAG,
+ PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_TARGET,
+ TYPERANGE_0_TO_65535, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, INITIALR2T, INITIAL_INITIALR2T,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_BOOL_OR, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, IMMEDIATEDATA,
+ INITIAL_IMMEDIATEDATA, PHASE_OPERATIONAL,
+ SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_AND,
+ USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, MAXRECVDATASEGMENTLENGTH,
+ INITIAL_MAXRECVDATASEGMENTLENGTH,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_512_TO_16777215, USE_ALL);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, MAXBURSTLENGTH,
+ INITIAL_MAXBURSTLENGTH, PHASE_OPERATIONAL,
+ SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_512_TO_16777215, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, FIRSTBURSTLENGTH,
+ INITIAL_FIRSTBURSTLENGTH,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_512_TO_16777215, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, DEFAULTTIME2WAIT,
+ INITIAL_DEFAULTTIME2WAIT,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_0_TO_3600, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, DEFAULTTIME2RETAIN,
+ INITIAL_DEFAULTTIME2RETAIN,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_0_TO_3600, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, MAXOUTSTANDINGR2T,
+ INITIAL_MAXOUTSTANDINGR2T,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_1_TO_65535, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, DATAPDUINORDER,
+ INITIAL_DATAPDUINORDER, PHASE_OPERATIONAL,
+ SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_OR,
+ USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, DATASEQUENCEINORDER,
+ INITIAL_DATASEQUENCEINORDER,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_BOOL_OR, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, ERRORRECOVERYLEVEL,
+ INITIAL_ERRORRECOVERYLEVEL,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_0_TO_2, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, SESSIONTYPE, INITIAL_SESSIONTYPE,
+ PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_INITIATOR,
+ TYPERANGE_SESSIONTYPE, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, IFMARKER, INITIAL_IFMARKER,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_BOOL_AND, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, OFMARKER, INITIAL_OFMARKER,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_BOOL_AND, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, IFMARKINT, INITIAL_IFMARKINT,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_MARKINT, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, OFMARKINT, INITIAL_OFMARKINT,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_MARKINT, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ *param_list_ptr = pl;
+ return 0;
+out:
+ iscsi_release_param_list(pl);
+ return -1;
+}
+
+int iscsi_set_keys_to_negotiate(
+ int sessiontype,
+ struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param;
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ param->state = 0;
+ if (!strcmp(param->name, AUTHMETHOD)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, HEADERDIGEST)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, DATADIGEST)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, MAXCONNECTIONS)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, TARGETNAME)) {
+ continue;
+ } else if (!strcmp(param->name, INITIATORNAME)) {
+ continue;
+ } else if (!strcmp(param->name, TARGETALIAS)) {
+ if (param->value)
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, INITIATORALIAS)) {
+ continue;
+ } else if (!strcmp(param->name, TARGETPORTALGROUPTAG)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, INITIALR2T)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, IMMEDIATEDATA)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, MAXBURSTLENGTH)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, DEFAULTTIME2RETAIN)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, MAXOUTSTANDINGR2T)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, DATAPDUINORDER)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, DATASEQUENCEINORDER)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, ERRORRECOVERYLEVEL)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, SESSIONTYPE)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, IFMARKER)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, OFMARKER)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, IFMARKINT)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, OFMARKINT)) {
+ SET_PSTATE_NEGOTIATE(param);
+ }
+ }
+
+ return 0;
+}
+
+int iscsi_set_keys_irrelevant_for_discovery(
+ struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param;
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!strcmp(param->name, MAXCONNECTIONS))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, INITIALR2T))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, IMMEDIATEDATA))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, MAXBURSTLENGTH))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, FIRSTBURSTLENGTH))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, MAXOUTSTANDINGR2T))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, DATAPDUINORDER))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, DATASEQUENCEINORDER))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, ERRORRECOVERYLEVEL))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, DEFAULTTIME2WAIT))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, DEFAULTTIME2RETAIN))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, IFMARKER))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, OFMARKER))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, IFMARKINT))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, OFMARKINT))
+ param->state &= ~PSTATE_NEGOTIATE;
+ }
+
+ return 0;
+}
+
+int iscsi_copy_param_list(
+ struct iscsi_param_list **dst_param_list,
+ struct iscsi_param_list *src_param_list,
+ int leading)
+{
+ struct iscsi_param *param = NULL;
+ struct iscsi_param *new_param = NULL;
+ struct iscsi_param_list *param_list = NULL;
+
+ param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
+ if (!param_list) {
+ pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
+ goto err_out;
+ }
+ INIT_LIST_HEAD(&param_list->param_list);
+ INIT_LIST_HEAD(&param_list->extra_response_list);
+
+ list_for_each_entry(param, &src_param_list->param_list, p_list) {
+ if (!leading && (param->scope & SCOPE_SESSION_WIDE)) {
+ if ((strcmp(param->name, "TargetName") != 0) &&
+ (strcmp(param->name, "InitiatorName") != 0) &&
+ (strcmp(param->name, "TargetPortalGroupTag") != 0))
+ continue;
+ }
+
+ new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
+ if (!new_param) {
+ pr_err("Unable to allocate memory for struct iscsi_param.\n");
+ goto err_out;
+ }
+
+ new_param->name = kstrdup(param->name, GFP_KERNEL);
+ new_param->value = kstrdup(param->value, GFP_KERNEL);
+ if (!new_param->value || !new_param->name) {
+ kfree(new_param->value);
+ kfree(new_param->name);
+ kfree(new_param);
+ pr_err("Unable to allocate memory for parameter name/value.\n");
+ goto err_out;
+ }
+
+ new_param->set_param = param->set_param;
+ new_param->phase = param->phase;
+ new_param->scope = param->scope;
+ new_param->sender = param->sender;
+ new_param->type = param->type;
+ new_param->use = param->use;
+ new_param->type_range = param->type_range;
+
+ list_add_tail(&new_param->p_list, &param_list->param_list);
+ }
+
+ if (!list_empty(&param_list->param_list)) {
+ *dst_param_list = param_list;
+ } else {
+ pr_err("No parameters allocated.\n");
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ iscsi_release_param_list(param_list);
+ return -1;
+}
+
+static void iscsi_release_extra_responses(struct iscsi_param_list *param_list)
+{
+ struct iscsi_extra_response *er, *er_tmp;
+
+ list_for_each_entry_safe(er, er_tmp, &param_list->extra_response_list,
+ er_list) {
+ list_del(&er->er_list);
+ kfree(er);
+ }
+}
+
+void iscsi_release_param_list(struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param, *param_tmp;
+
+ list_for_each_entry_safe(param, param_tmp, &param_list->param_list,
+ p_list) {
+ list_del(&param->p_list);
+
+ kfree(param->name);
+ param->name = NULL;
+ kfree(param->value);
+ param->value = NULL;
+ kfree(param);
+ param = NULL;
+ }
+
+ iscsi_release_extra_responses(param_list);
+
+ kfree(param_list);
+}
+
+struct iscsi_param *iscsi_find_param_from_key(
+ char *key,
+ struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param;
+
+ if (!key || !param_list) {
+ pr_err("Key or parameter list pointer is NULL.\n");
+ return NULL;
+ }
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!strcmp(key, param->name))
+ return param;
+ }
+
+ pr_err("Unable to locate key \"%s\".\n", key);
+ return NULL;
+}
+
+int iscsi_extract_key_value(char *textbuf, char **key, char **value)
+{
+ *value = strchr(textbuf, '=');
+ if (!*value) {
+ pr_err("Unable to locate \"=\" seperator for key,"
+ " ignoring request.\n");
+ return -1;
+ }
+
+ *key = textbuf;
+ **value = '\0';
+ *value = *value + 1;
+
+ return 0;
+}
+
+int iscsi_update_param_value(struct iscsi_param *param, char *value)
+{
+ kfree(param->value);
+
+ param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
+ if (!param->value) {
+ pr_err("Unable to allocate memory for value.\n");
+ return -1;
+ }
+
+ memcpy(param->value, value, strlen(value));
+ param->value[strlen(value)] = '\0';
+
+ pr_debug("iSCSI Parameter updated to %s=%s\n",
+ param->name, param->value);
+ return 0;
+}
+
+static int iscsi_add_notunderstood_response(
+ char *key,
+ char *value,
+ struct iscsi_param_list *param_list)
+{
+ struct iscsi_extra_response *extra_response;
+
+ if (strlen(value) > VALUE_MAXLEN) {
+ pr_err("Value for notunderstood key \"%s\" exceeds %d,"
+ " protocol error.\n", key, VALUE_MAXLEN);
+ return -1;
+ }
+
+ extra_response = kzalloc(sizeof(struct iscsi_extra_response), GFP_KERNEL);
+ if (!extra_response) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_extra_response.\n");
+ return -1;
+ }
+ INIT_LIST_HEAD(&extra_response->er_list);
+
+ strncpy(extra_response->key, key, strlen(key) + 1);
+ strncpy(extra_response->value, NOTUNDERSTOOD,
+ strlen(NOTUNDERSTOOD) + 1);
+
+ list_add_tail(&extra_response->er_list,
+ &param_list->extra_response_list);
+ return 0;
+}
+
+static int iscsi_check_for_auth_key(char *key)
+{
+ /*
+ * RFC 1994
+ */
+ if (!strcmp(key, "CHAP_A") || !strcmp(key, "CHAP_I") ||
+ !strcmp(key, "CHAP_C") || !strcmp(key, "CHAP_N") ||
+ !strcmp(key, "CHAP_R"))
+ return 1;
+
+ /*
+ * RFC 2945
+ */
+ if (!strcmp(key, "SRP_U") || !strcmp(key, "SRP_N") ||
+ !strcmp(key, "SRP_g") || !strcmp(key, "SRP_s") ||
+ !strcmp(key, "SRP_A") || !strcmp(key, "SRP_B") ||
+ !strcmp(key, "SRP_M") || !strcmp(key, "SRP_HM"))
+ return 1;
+
+ return 0;
+}
+
+static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
+{
+ if (IS_TYPE_BOOL_AND(param)) {
+ if (!strcmp(param->value, NO))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ } else if (IS_TYPE_BOOL_OR(param)) {
+ if (!strcmp(param->value, YES))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ /*
+ * Required for gPXE iSCSI boot client
+ */
+ if (!strcmp(param->name, IMMEDIATEDATA))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ } else if (IS_TYPE_NUMBER(param)) {
+ if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ /*
+ * The GlobalSAN iSCSI Initiator for MacOSX does
+ * not respond to MaxBurstLength, FirstBurstLength,
+ * DefaultTime2Wait or DefaultTime2Retain parameter keys.
+ * So, we set them to 'reply optional' here, and assume the
+ * the defaults from iscsi_parameters.h if the initiator
+ * is not RFC compliant and the keys are not negotiated.
+ */
+ if (!strcmp(param->name, MAXBURSTLENGTH))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ if (!strcmp(param->name, FIRSTBURSTLENGTH))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ if (!strcmp(param->name, DEFAULTTIME2WAIT))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ if (!strcmp(param->name, DEFAULTTIME2RETAIN))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ /*
+ * Required for gPXE iSCSI boot client
+ */
+ if (!strcmp(param->name, MAXCONNECTIONS))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ } else if (IS_PHASE_DECLARATIVE(param))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+}
+
+static int iscsi_check_boolean_value(struct iscsi_param *param, char *value)
+{
+ if (strcmp(value, YES) && strcmp(value, NO)) {
+ pr_err("Illegal value for \"%s\", must be either"
+ " \"%s\" or \"%s\".\n", param->name, YES, NO);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int iscsi_check_numerical_value(struct iscsi_param *param, char *value_ptr)
+{
+ char *tmpptr;
+ int value = 0;
+
+ value = simple_strtoul(value_ptr, &tmpptr, 0);
+
+/* #warning FIXME: Fix this */
+#if 0
+ if (strspn(endptr, WHITE_SPACE) != strlen(endptr)) {
+ pr_err("Illegal value \"%s\" for \"%s\".\n",
+ value, param->name);
+ return -1;
+ }
+#endif
+ if (IS_TYPERANGE_0_TO_2(param)) {
+ if ((value < 0) || (value > 2)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 0 and 2.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+ if (IS_TYPERANGE_0_TO_3600(param)) {
+ if ((value < 0) || (value > 3600)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 0 and 3600.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+ if (IS_TYPERANGE_0_TO_32767(param)) {
+ if ((value < 0) || (value > 32767)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 0 and 32767.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+ if (IS_TYPERANGE_0_TO_65535(param)) {
+ if ((value < 0) || (value > 65535)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 0 and 65535.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+ if (IS_TYPERANGE_1_TO_65535(param)) {
+ if ((value < 1) || (value > 65535)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 1 and 65535.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+ if (IS_TYPERANGE_2_TO_3600(param)) {
+ if ((value < 2) || (value > 3600)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 2 and 3600.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+ if (IS_TYPERANGE_512_TO_16777215(param)) {
+ if ((value < 512) || (value > 16777215)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 512 and 16777215.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+
+ return 0;
+}
+
+static int iscsi_check_numerical_range_value(struct iscsi_param *param, char *value)
+{
+ char *left_val_ptr = NULL, *right_val_ptr = NULL;
+ char *tilde_ptr = NULL, *tmp_ptr = NULL;
+ u32 left_val, right_val, local_left_val, local_right_val;
+
+ if (strcmp(param->name, IFMARKINT) &&
+ strcmp(param->name, OFMARKINT)) {
+ pr_err("Only parameters \"%s\" or \"%s\" may contain a"
+ " numerical range value.\n", IFMARKINT, OFMARKINT);
+ return -1;
+ }
+
+ if (IS_PSTATE_PROPOSER(param))
+ return 0;
+
+ tilde_ptr = strchr(value, '~');
+ if (!tilde_ptr) {
+ pr_err("Unable to locate numerical range indicator"
+ " \"~\" for \"%s\".\n", param->name);
+ return -1;
+ }
+ *tilde_ptr = '\0';
+
+ left_val_ptr = value;
+ right_val_ptr = value + strlen(left_val_ptr) + 1;
+
+ if (iscsi_check_numerical_value(param, left_val_ptr) < 0)
+ return -1;
+ if (iscsi_check_numerical_value(param, right_val_ptr) < 0)
+ return -1;
+
+ left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0);
+ right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0);
+ *tilde_ptr = '~';
+
+ if (right_val < left_val) {
+ pr_err("Numerical range for parameter \"%s\" contains"
+ " a right value which is less than the left.\n",
+ param->name);
+ return -1;
+ }
+
+ /*
+ * For now, enforce reasonable defaults for [I,O]FMarkInt.
+ */
+ tilde_ptr = strchr(param->value, '~');
+ if (!tilde_ptr) {
+ pr_err("Unable to locate numerical range indicator"
+ " \"~\" for \"%s\".\n", param->name);
+ return -1;
+ }
+ *tilde_ptr = '\0';
+
+ left_val_ptr = param->value;
+ right_val_ptr = param->value + strlen(left_val_ptr) + 1;
+
+ local_left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0);
+ local_right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0);
+ *tilde_ptr = '~';
+
+ if (param->set_param) {
+ if ((left_val < local_left_val) ||
+ (right_val < local_left_val)) {
+ pr_err("Passed value range \"%u~%u\" is below"
+ " minimum left value \"%u\" for key \"%s\","
+ " rejecting.\n", left_val, right_val,
+ local_left_val, param->name);
+ return -1;
+ }
+ } else {
+ if ((left_val < local_left_val) &&
+ (right_val < local_left_val)) {
+ pr_err("Received value range \"%u~%u\" is"
+ " below minimum left value \"%u\" for key"
+ " \"%s\", rejecting.\n", left_val, right_val,
+ local_left_val, param->name);
+ SET_PSTATE_REJECT(param);
+ if (iscsi_update_param_value(param, REJECT) < 0)
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int iscsi_check_string_or_list_value(struct iscsi_param *param, char *value)
+{
+ if (IS_PSTATE_PROPOSER(param))
+ return 0;
+
+ if (IS_TYPERANGE_AUTH_PARAM(param)) {
+ if (strcmp(value, KRB5) && strcmp(value, SPKM1) &&
+ strcmp(value, SPKM2) && strcmp(value, SRP) &&
+ strcmp(value, CHAP) && strcmp(value, NONE)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " \"%s\", \"%s\", \"%s\", \"%s\", \"%s\""
+ " or \"%s\".\n", param->name, KRB5,
+ SPKM1, SPKM2, SRP, CHAP, NONE);
+ return -1;
+ }
+ }
+ if (IS_TYPERANGE_DIGEST_PARAM(param)) {
+ if (strcmp(value, CRC32C) && strcmp(value, NONE)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " \"%s\" or \"%s\".\n", param->name,
+ CRC32C, NONE);
+ return -1;
+ }
+ }
+ if (IS_TYPERANGE_SESSIONTYPE(param)) {
+ if (strcmp(value, DISCOVERY) && strcmp(value, NORMAL)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " \"%s\" or \"%s\".\n", param->name,
+ DISCOVERY, NORMAL);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This function is used to pick a value range number, currently just
+ * returns the lesser of both right values.
+ */
+static char *iscsi_get_value_from_number_range(
+ struct iscsi_param *param,
+ char *value)
+{
+ char *end_ptr, *tilde_ptr1 = NULL, *tilde_ptr2 = NULL;
+ u32 acceptor_right_value, proposer_right_value;
+
+ tilde_ptr1 = strchr(value, '~');
+ if (!tilde_ptr1)
+ return NULL;
+ *tilde_ptr1++ = '\0';
+ proposer_right_value = simple_strtoul(tilde_ptr1, &end_ptr, 0);
+
+ tilde_ptr2 = strchr(param->value, '~');
+ if (!tilde_ptr2)
+ return NULL;
+ *tilde_ptr2++ = '\0';
+ acceptor_right_value = simple_strtoul(tilde_ptr2, &end_ptr, 0);
+
+ return (acceptor_right_value >= proposer_right_value) ?
+ tilde_ptr1 : tilde_ptr2;
+}
+
+static char *iscsi_check_valuelist_for_support(
+ struct iscsi_param *param,
+ char *value)
+{
+ char *tmp1 = NULL, *tmp2 = NULL;
+ char *acceptor_values = NULL, *proposer_values = NULL;
+
+ acceptor_values = param->value;
+ proposer_values = value;
+
+ do {
+ if (!proposer_values)
+ return NULL;
+ tmp1 = strchr(proposer_values, ',');
+ if (tmp1)
+ *tmp1 = '\0';
+ acceptor_values = param->value;
+ do {
+ if (!acceptor_values) {
+ if (tmp1)
+ *tmp1 = ',';
+ return NULL;
+ }
+ tmp2 = strchr(acceptor_values, ',');
+ if (tmp2)
+ *tmp2 = '\0';
+ if (!acceptor_values || !proposer_values) {
+ if (tmp1)
+ *tmp1 = ',';
+ if (tmp2)
+ *tmp2 = ',';
+ return NULL;
+ }
+ if (!strcmp(acceptor_values, proposer_values)) {
+ if (tmp2)
+ *tmp2 = ',';
+ goto out;
+ }
+ if (tmp2)
+ *tmp2++ = ',';
+
+ acceptor_values = tmp2;
+ if (!acceptor_values)
+ break;
+ } while (acceptor_values);
+ if (tmp1)
+ *tmp1++ = ',';
+ proposer_values = tmp1;
+ } while (proposer_values);
+
+out:
+ return proposer_values;
+}
+
+static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value)
+{
+ u8 acceptor_boolean_value = 0, proposer_boolean_value = 0;
+ char *negoitated_value = NULL;
+
+ if (IS_PSTATE_ACCEPTOR(param)) {
+ pr_err("Received key \"%s\" twice, protocol error.\n",
+ param->name);
+ return -1;
+ }
+
+ if (IS_PSTATE_REJECT(param))
+ return 0;
+
+ if (IS_TYPE_BOOL_AND(param)) {
+ if (!strcmp(value, YES))
+ proposer_boolean_value = 1;
+ if (!strcmp(param->value, YES))
+ acceptor_boolean_value = 1;
+ if (acceptor_boolean_value && proposer_boolean_value)
+ do {} while (0);
+ else {
+ if (iscsi_update_param_value(param, NO) < 0)
+ return -1;
+ if (!proposer_boolean_value)
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ }
+ } else if (IS_TYPE_BOOL_OR(param)) {
+ if (!strcmp(value, YES))
+ proposer_boolean_value = 1;
+ if (!strcmp(param->value, YES))
+ acceptor_boolean_value = 1;
+ if (acceptor_boolean_value || proposer_boolean_value) {
+ if (iscsi_update_param_value(param, YES) < 0)
+ return -1;
+ if (proposer_boolean_value)
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ }
+ } else if (IS_TYPE_NUMBER(param)) {
+ char *tmpptr, buf[10];
+ u32 acceptor_value = simple_strtoul(param->value, &tmpptr, 0);
+ u32 proposer_value = simple_strtoul(value, &tmpptr, 0);
+
+ memset(buf, 0, 10);
+
+ if (!strcmp(param->name, MAXCONNECTIONS) ||
+ !strcmp(param->name, MAXBURSTLENGTH) ||
+ !strcmp(param->name, FIRSTBURSTLENGTH) ||
+ !strcmp(param->name, MAXOUTSTANDINGR2T) ||
+ !strcmp(param->name, DEFAULTTIME2RETAIN) ||
+ !strcmp(param->name, ERRORRECOVERYLEVEL)) {
+ if (proposer_value > acceptor_value) {
+ sprintf(buf, "%u", acceptor_value);
+ if (iscsi_update_param_value(param,
+ &buf[0]) < 0)
+ return -1;
+ } else {
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+ }
+ } else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
+ if (acceptor_value > proposer_value) {
+ sprintf(buf, "%u", acceptor_value);
+ if (iscsi_update_param_value(param,
+ &buf[0]) < 0)
+ return -1;
+ } else {
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+ }
+ } else {
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+ }
+
+ if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ } else if (IS_TYPE_NUMBER_RANGE(param)) {
+ negoitated_value = iscsi_get_value_from_number_range(
+ param, value);
+ if (!negoitated_value)
+ return -1;
+ if (iscsi_update_param_value(param, negoitated_value) < 0)
+ return -1;
+ } else if (IS_TYPE_VALUE_LIST(param)) {
+ negoitated_value = iscsi_check_valuelist_for_support(
+ param, value);
+ if (!negoitated_value) {
+ pr_err("Proposer's value list \"%s\" contains"
+ " no valid values from Acceptor's value list"
+ " \"%s\".\n", value, param->value);
+ return -1;
+ }
+ if (iscsi_update_param_value(param, negoitated_value) < 0)
+ return -1;
+ } else if (IS_PHASE_DECLARATIVE(param)) {
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ }
+
+ return 0;
+}
+
+static int iscsi_check_proposer_state(struct iscsi_param *param, char *value)
+{
+ if (IS_PSTATE_RESPONSE_GOT(param)) {
+ pr_err("Received key \"%s\" twice, protocol error.\n",
+ param->name);
+ return -1;
+ }
+
+ if (IS_TYPE_NUMBER_RANGE(param)) {
+ u32 left_val = 0, right_val = 0, recieved_value = 0;
+ char *left_val_ptr = NULL, *right_val_ptr = NULL;
+ char *tilde_ptr = NULL, *tmp_ptr = NULL;
+
+ if (!strcmp(value, IRRELEVANT) || !strcmp(value, REJECT)) {
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+ return 0;
+ }
+
+ tilde_ptr = strchr(value, '~');
+ if (tilde_ptr) {
+ pr_err("Illegal \"~\" in response for \"%s\".\n",
+ param->name);
+ return -1;
+ }
+ tilde_ptr = strchr(param->value, '~');
+ if (!tilde_ptr) {
+ pr_err("Unable to locate numerical range"
+ " indicator \"~\" for \"%s\".\n", param->name);
+ return -1;
+ }
+ *tilde_ptr = '\0';
+
+ left_val_ptr = param->value;
+ right_val_ptr = param->value + strlen(left_val_ptr) + 1;
+ left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0);
+ right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0);
+ recieved_value = simple_strtoul(value, &tmp_ptr, 0);
+
+ *tilde_ptr = '~';
+
+ if ((recieved_value < left_val) ||
+ (recieved_value > right_val)) {
+ pr_err("Illegal response \"%s=%u\", value must"
+ " be between %u and %u.\n", param->name,
+ recieved_value, left_val, right_val);
+ return -1;
+ }
+ } else if (IS_TYPE_VALUE_LIST(param)) {
+ char *comma_ptr = NULL, *tmp_ptr = NULL;
+
+ comma_ptr = strchr(value, ',');
+ if (comma_ptr) {
+ pr_err("Illegal \",\" in response for \"%s\".\n",
+ param->name);
+ return -1;
+ }
+
+ tmp_ptr = iscsi_check_valuelist_for_support(param, value);
+ if (!tmp_ptr)
+ return -1;
+ }
+
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int iscsi_check_value(struct iscsi_param *param, char *value)
+{
+ char *comma_ptr = NULL;
+
+ if (!strcmp(value, REJECT)) {
+ if (!strcmp(param->name, IFMARKINT) ||
+ !strcmp(param->name, OFMARKINT)) {
+ /*
+ * Reject is not fatal for [I,O]FMarkInt, and causes
+ * [I,O]FMarker to be reset to No. (See iSCSI v20 A.3.2)
+ */
+ SET_PSTATE_REJECT(param);
+ return 0;
+ }
+ pr_err("Received %s=%s\n", param->name, value);
+ return -1;
+ }
+ if (!strcmp(value, IRRELEVANT)) {
+ pr_debug("Received %s=%s\n", param->name, value);
+ SET_PSTATE_IRRELEVANT(param);
+ return 0;
+ }
+ if (!strcmp(value, NOTUNDERSTOOD)) {
+ if (!IS_PSTATE_PROPOSER(param)) {
+ pr_err("Received illegal offer %s=%s\n",
+ param->name, value);
+ return -1;
+ }
+
+/* #warning FIXME: Add check for X-ExtensionKey here */
+ pr_err("Standard iSCSI key \"%s\" cannot be answered"
+ " with \"%s\", protocol error.\n", param->name, value);
+ return -1;
+ }
+
+ do {
+ comma_ptr = NULL;
+ comma_ptr = strchr(value, ',');
+
+ if (comma_ptr && !IS_TYPE_VALUE_LIST(param)) {
+ pr_err("Detected value seperator \",\", but"
+ " key \"%s\" does not allow a value list,"
+ " protocol error.\n", param->name);
+ return -1;
+ }
+ if (comma_ptr)
+ *comma_ptr = '\0';
+
+ if (strlen(value) > VALUE_MAXLEN) {
+ pr_err("Value for key \"%s\" exceeds %d,"
+ " protocol error.\n", param->name,
+ VALUE_MAXLEN);
+ return -1;
+ }
+
+ if (IS_TYPE_BOOL_AND(param) || IS_TYPE_BOOL_OR(param)) {
+ if (iscsi_check_boolean_value(param, value) < 0)
+ return -1;
+ } else if (IS_TYPE_NUMBER(param)) {
+ if (iscsi_check_numerical_value(param, value) < 0)
+ return -1;
+ } else if (IS_TYPE_NUMBER_RANGE(param)) {
+ if (iscsi_check_numerical_range_value(param, value) < 0)
+ return -1;
+ } else if (IS_TYPE_STRING(param) || IS_TYPE_VALUE_LIST(param)) {
+ if (iscsi_check_string_or_list_value(param, value) < 0)
+ return -1;
+ } else {
+ pr_err("Huh? 0x%02x\n", param->type);
+ return -1;
+ }
+
+ if (comma_ptr)
+ *comma_ptr++ = ',';
+
+ value = comma_ptr;
+ } while (value);
+
+ return 0;
+}
+
+static struct iscsi_param *__iscsi_check_key(
+ char *key,
+ int sender,
+ struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param;
+
+ if (strlen(key) > KEY_MAXLEN) {
+ pr_err("Length of key name \"%s\" exceeds %d.\n",
+ key, KEY_MAXLEN);
+ return NULL;
+ }
+
+ param = iscsi_find_param_from_key(key, param_list);
+ if (!param)
+ return NULL;
+
+ if ((sender & SENDER_INITIATOR) && !IS_SENDER_INITIATOR(param)) {
+ pr_err("Key \"%s\" may not be sent to %s,"
+ " protocol error.\n", param->name,
+ (sender & SENDER_RECEIVER) ? "target" : "initiator");
+ return NULL;
+ }
+
+ if ((sender & SENDER_TARGET) && !IS_SENDER_TARGET(param)) {
+ pr_err("Key \"%s\" may not be sent to %s,"
+ " protocol error.\n", param->name,
+ (sender & SENDER_RECEIVER) ? "initiator" : "target");
+ return NULL;
+ }
+
+ return param;
+}
+
+static struct iscsi_param *iscsi_check_key(
+ char *key,
+ int phase,
+ int sender,
+ struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param;
+ /*
+ * Key name length must not exceed 63 bytes. (See iSCSI v20 5.1)
+ */
+ if (strlen(key) > KEY_MAXLEN) {
+ pr_err("Length of key name \"%s\" exceeds %d.\n",
+ key, KEY_MAXLEN);
+ return NULL;
+ }
+
+ param = iscsi_find_param_from_key(key, param_list);
+ if (!param)
+ return NULL;
+
+ if ((sender & SENDER_INITIATOR) && !IS_SENDER_INITIATOR(param)) {
+ pr_err("Key \"%s\" may not be sent to %s,"
+ " protocol error.\n", param->name,
+ (sender & SENDER_RECEIVER) ? "target" : "initiator");
+ return NULL;
+ }
+ if ((sender & SENDER_TARGET) && !IS_SENDER_TARGET(param)) {
+ pr_err("Key \"%s\" may not be sent to %s,"
+ " protocol error.\n", param->name,
+ (sender & SENDER_RECEIVER) ? "initiator" : "target");
+ return NULL;
+ }
+
+ if (IS_PSTATE_ACCEPTOR(param)) {
+ pr_err("Key \"%s\" received twice, protocol error.\n",
+ key);
+ return NULL;
+ }
+
+ if (!phase)
+ return param;
+
+ if (!(param->phase & phase)) {
+ pr_err("Key \"%s\" may not be negotiated during ",
+ param->name);
+ switch (phase) {
+ case PHASE_SECURITY:
+ pr_debug("Security phase.\n");
+ break;
+ case PHASE_OPERATIONAL:
+ pr_debug("Operational phase.\n");
+ default:
+ pr_debug("Unknown phase.\n");
+ }
+ return NULL;
+ }
+
+ return param;
+}
+
+static int iscsi_enforce_integrity_rules(
+ u8 phase,
+ struct iscsi_param_list *param_list)
+{
+ char *tmpptr;
+ u8 DataSequenceInOrder = 0;
+ u8 ErrorRecoveryLevel = 0, SessionType = 0;
+ u8 IFMarker = 0, OFMarker = 0;
+ u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0;
+ u32 FirstBurstLength = 0, MaxBurstLength = 0;
+ struct iscsi_param *param = NULL;
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!(param->phase & phase))
+ continue;
+ if (!strcmp(param->name, SESSIONTYPE))
+ if (!strcmp(param->value, NORMAL))
+ SessionType = 1;
+ if (!strcmp(param->name, ERRORRECOVERYLEVEL))
+ ErrorRecoveryLevel = simple_strtoul(param->value,
+ &tmpptr, 0);
+ if (!strcmp(param->name, DATASEQUENCEINORDER))
+ if (!strcmp(param->value, YES))
+ DataSequenceInOrder = 1;
+ if (!strcmp(param->name, MAXBURSTLENGTH))
+ MaxBurstLength = simple_strtoul(param->value,
+ &tmpptr, 0);
+ if (!strcmp(param->name, IFMARKER))
+ if (!strcmp(param->value, YES))
+ IFMarker = 1;
+ if (!strcmp(param->name, OFMARKER))
+ if (!strcmp(param->value, YES))
+ OFMarker = 1;
+ if (!strcmp(param->name, IFMARKINT))
+ if (!strcmp(param->value, REJECT))
+ IFMarkInt_Reject = 1;
+ if (!strcmp(param->name, OFMARKINT))
+ if (!strcmp(param->value, REJECT))
+ OFMarkInt_Reject = 1;
+ }
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!(param->phase & phase))
+ continue;
+ if (!SessionType && (!IS_PSTATE_ACCEPTOR(param) &&
+ (strcmp(param->name, IFMARKER) &&
+ strcmp(param->name, OFMARKER) &&
+ strcmp(param->name, IFMARKINT) &&
+ strcmp(param->name, OFMARKINT))))
+ continue;
+ if (!strcmp(param->name, MAXOUTSTANDINGR2T) &&
+ DataSequenceInOrder && (ErrorRecoveryLevel > 0)) {
+ if (strcmp(param->value, "1")) {
+ if (iscsi_update_param_value(param, "1") < 0)
+ return -1;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ }
+ if (!strcmp(param->name, MAXCONNECTIONS) && !SessionType) {
+ if (strcmp(param->value, "1")) {
+ if (iscsi_update_param_value(param, "1") < 0)
+ return -1;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ }
+ if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
+ FirstBurstLength = simple_strtoul(param->value,
+ &tmpptr, 0);
+ if (FirstBurstLength > MaxBurstLength) {
+ char tmpbuf[10];
+ memset(tmpbuf, 0, 10);
+ sprintf(tmpbuf, "%u", MaxBurstLength);
+ if (iscsi_update_param_value(param, tmpbuf))
+ return -1;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ }
+ if (!strcmp(param->name, IFMARKER) && IFMarkInt_Reject) {
+ if (iscsi_update_param_value(param, NO) < 0)
+ return -1;
+ IFMarker = 0;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ if (!strcmp(param->name, OFMARKER) && OFMarkInt_Reject) {
+ if (iscsi_update_param_value(param, NO) < 0)
+ return -1;
+ OFMarker = 0;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ if (!strcmp(param->name, IFMARKINT) && !IFMarker) {
+ if (!strcmp(param->value, REJECT))
+ continue;
+ param->state &= ~PSTATE_NEGOTIATE;
+ if (iscsi_update_param_value(param, IRRELEVANT) < 0)
+ return -1;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ if (!strcmp(param->name, OFMARKINT) && !OFMarker) {
+ if (!strcmp(param->value, REJECT))
+ continue;
+ param->state &= ~PSTATE_NEGOTIATE;
+ if (iscsi_update_param_value(param, IRRELEVANT) < 0)
+ return -1;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ }
+
+ return 0;
+}
+
+int iscsi_decode_text_input(
+ u8 phase,
+ u8 sender,
+ char *textbuf,
+ u32 length,
+ struct iscsi_param_list *param_list)
+{
+ char *tmpbuf, *start = NULL, *end = NULL;
+
+ tmpbuf = kzalloc(length + 1, GFP_KERNEL);
+ if (!tmpbuf) {
+ pr_err("Unable to allocate memory for tmpbuf.\n");
+ return -1;
+ }
+
+ memcpy(tmpbuf, textbuf, length);
+ tmpbuf[length] = '\0';
+ start = tmpbuf;
+ end = (start + length);
+
+ while (start < end) {
+ char *key, *value;
+ struct iscsi_param *param;
+
+ if (iscsi_extract_key_value(start, &key, &value) < 0) {
+ kfree(tmpbuf);
+ return -1;
+ }
+
+ pr_debug("Got key: %s=%s\n", key, value);
+
+ if (phase & PHASE_SECURITY) {
+ if (iscsi_check_for_auth_key(key) > 0) {
+ char *tmpptr = key + strlen(key);
+ *tmpptr = '=';
+ kfree(tmpbuf);
+ return 1;
+ }
+ }
+
+ param = iscsi_check_key(key, phase, sender, param_list);
+ if (!param) {
+ if (iscsi_add_notunderstood_response(key,
+ value, param_list) < 0) {
+ kfree(tmpbuf);
+ return -1;
+ }
+ start += strlen(key) + strlen(value) + 2;
+ continue;
+ }
+ if (iscsi_check_value(param, value) < 0) {
+ kfree(tmpbuf);
+ return -1;
+ }
+
+ start += strlen(key) + strlen(value) + 2;
+
+ if (IS_PSTATE_PROPOSER(param)) {
+ if (iscsi_check_proposer_state(param, value) < 0) {
+ kfree(tmpbuf);
+ return -1;
+ }
+ SET_PSTATE_RESPONSE_GOT(param);
+ } else {
+ if (iscsi_check_acceptor_state(param, value) < 0) {
+ kfree(tmpbuf);
+ return -1;
+ }
+ SET_PSTATE_ACCEPTOR(param);
+ }
+ }
+
+ kfree(tmpbuf);
+ return 0;
+}
+
+int iscsi_encode_text_output(
+ u8 phase,
+ u8 sender,
+ char *textbuf,
+ u32 *length,
+ struct iscsi_param_list *param_list)
+{
+ char *output_buf = NULL;
+ struct iscsi_extra_response *er;
+ struct iscsi_param *param;
+
+ output_buf = textbuf + *length;
+
+ if (iscsi_enforce_integrity_rules(phase, param_list) < 0)
+ return -1;
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!(param->sender & sender))
+ continue;
+ if (IS_PSTATE_ACCEPTOR(param) &&
+ !IS_PSTATE_RESPONSE_SENT(param) &&
+ !IS_PSTATE_REPLY_OPTIONAL(param) &&
+ (param->phase & phase)) {
+ *length += sprintf(output_buf, "%s=%s",
+ param->name, param->value);
+ *length += 1;
+ output_buf = textbuf + *length;
+ SET_PSTATE_RESPONSE_SENT(param);
+ pr_debug("Sending key: %s=%s\n",
+ param->name, param->value);
+ continue;
+ }
+ if (IS_PSTATE_NEGOTIATE(param) &&
+ !IS_PSTATE_ACCEPTOR(param) &&
+ !IS_PSTATE_PROPOSER(param) &&
+ (param->phase & phase)) {
+ *length += sprintf(output_buf, "%s=%s",
+ param->name, param->value);
+ *length += 1;
+ output_buf = textbuf + *length;
+ SET_PSTATE_PROPOSER(param);
+ iscsi_check_proposer_for_optional_reply(param);
+ pr_debug("Sending key: %s=%s\n",
+ param->name, param->value);
+ }
+ }
+
+ list_for_each_entry(er, &param_list->extra_response_list, er_list) {
+ *length += sprintf(output_buf, "%s=%s", er->key, er->value);
+ *length += 1;
+ output_buf = textbuf + *length;
+ pr_debug("Sending key: %s=%s\n", er->key, er->value);
+ }
+ iscsi_release_extra_responses(param_list);
+
+ return 0;
+}
+
+int iscsi_check_negotiated_keys(struct iscsi_param_list *param_list)
+{
+ int ret = 0;
+ struct iscsi_param *param;
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (IS_PSTATE_NEGOTIATE(param) &&
+ IS_PSTATE_PROPOSER(param) &&
+ !IS_PSTATE_RESPONSE_GOT(param) &&
+ !IS_PSTATE_REPLY_OPTIONAL(param) &&
+ !IS_PHASE_DECLARATIVE(param)) {
+ pr_err("No response for proposed key \"%s\".\n",
+ param->name);
+ ret = -1;
+ }
+ }
+
+ return ret;
+}
+
+int iscsi_change_param_value(
+ char *keyvalue,
+ struct iscsi_param_list *param_list,
+ int check_key)
+{
+ char *key = NULL, *value = NULL;
+ struct iscsi_param *param;
+ int sender = 0;
+
+ if (iscsi_extract_key_value(keyvalue, &key, &value) < 0)
+ return -1;
+
+ if (!check_key) {
+ param = __iscsi_check_key(keyvalue, sender, param_list);
+ if (!param)
+ return -1;
+ } else {
+ param = iscsi_check_key(keyvalue, 0, sender, param_list);
+ if (!param)
+ return -1;
+
+ param->set_param = 1;
+ if (iscsi_check_value(param, value) < 0) {
+ param->set_param = 0;
+ return -1;
+ }
+ param->set_param = 0;
+ }
+
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+
+ return 0;
+}
+
+void iscsi_set_connection_parameters(
+ struct iscsi_conn_ops *ops,
+ struct iscsi_param_list *param_list)
+{
+ char *tmpptr;
+ struct iscsi_param *param;
+
+ pr_debug("---------------------------------------------------"
+ "---------------\n");
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param))
+ continue;
+ if (!strcmp(param->name, AUTHMETHOD)) {
+ pr_debug("AuthMethod: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, HEADERDIGEST)) {
+ ops->HeaderDigest = !strcmp(param->value, CRC32C);
+ pr_debug("HeaderDigest: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, DATADIGEST)) {
+ ops->DataDigest = !strcmp(param->value, CRC32C);
+ pr_debug("DataDigest: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
+ ops->MaxRecvDataSegmentLength =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("MaxRecvDataSegmentLength: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, OFMARKER)) {
+ ops->OFMarker = !strcmp(param->value, YES);
+ pr_debug("OFMarker: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, IFMARKER)) {
+ ops->IFMarker = !strcmp(param->value, YES);
+ pr_debug("IFMarker: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, OFMARKINT)) {
+ ops->OFMarkInt =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("OFMarkInt: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, IFMARKINT)) {
+ ops->IFMarkInt =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("IFMarkInt: %s\n",
+ param->value);
+ }
+ }
+ pr_debug("----------------------------------------------------"
+ "--------------\n");
+}
+
+void iscsi_set_session_parameters(
+ struct iscsi_sess_ops *ops,
+ struct iscsi_param_list *param_list,
+ int leading)
+{
+ char *tmpptr;
+ struct iscsi_param *param;
+
+ pr_debug("----------------------------------------------------"
+ "--------------\n");
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param))
+ continue;
+ if (!strcmp(param->name, INITIATORNAME)) {
+ if (!param->value)
+ continue;
+ if (leading)
+ snprintf(ops->InitiatorName,
+ sizeof(ops->InitiatorName),
+ "%s", param->value);
+ pr_debug("InitiatorName: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, INITIATORALIAS)) {
+ if (!param->value)
+ continue;
+ snprintf(ops->InitiatorAlias,
+ sizeof(ops->InitiatorAlias),
+ "%s", param->value);
+ pr_debug("InitiatorAlias: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, TARGETNAME)) {
+ if (!param->value)
+ continue;
+ if (leading)
+ snprintf(ops->TargetName,
+ sizeof(ops->TargetName),
+ "%s", param->value);
+ pr_debug("TargetName: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, TARGETALIAS)) {
+ if (!param->value)
+ continue;
+ snprintf(ops->TargetAlias, sizeof(ops->TargetAlias),
+ "%s", param->value);
+ pr_debug("TargetAlias: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, TARGETPORTALGROUPTAG)) {
+ ops->TargetPortalGroupTag =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("TargetPortalGroupTag: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, MAXCONNECTIONS)) {
+ ops->MaxConnections =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("MaxConnections: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, INITIALR2T)) {
+ ops->InitialR2T = !strcmp(param->value, YES);
+ pr_debug("InitialR2T: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, IMMEDIATEDATA)) {
+ ops->ImmediateData = !strcmp(param->value, YES);
+ pr_debug("ImmediateData: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, MAXBURSTLENGTH)) {
+ ops->MaxBurstLength =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("MaxBurstLength: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
+ ops->FirstBurstLength =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("FirstBurstLength: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
+ ops->DefaultTime2Wait =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("DefaultTime2Wait: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, DEFAULTTIME2RETAIN)) {
+ ops->DefaultTime2Retain =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("DefaultTime2Retain: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, MAXOUTSTANDINGR2T)) {
+ ops->MaxOutstandingR2T =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("MaxOutstandingR2T: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, DATAPDUINORDER)) {
+ ops->DataPDUInOrder = !strcmp(param->value, YES);
+ pr_debug("DataPDUInOrder: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, DATASEQUENCEINORDER)) {
+ ops->DataSequenceInOrder = !strcmp(param->value, YES);
+ pr_debug("DataSequenceInOrder: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, ERRORRECOVERYLEVEL)) {
+ ops->ErrorRecoveryLevel =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("ErrorRecoveryLevel: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, SESSIONTYPE)) {
+ ops->SessionType = !strcmp(param->value, DISCOVERY);
+ pr_debug("SessionType: %s\n",
+ param->value);
+ }
+ }
+ pr_debug("----------------------------------------------------"
+ "--------------\n");
+
+}
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
new file mode 100644
index 0000000..6a37fd6
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -0,0 +1,269 @@
+#ifndef ISCSI_PARAMETERS_H
+#define ISCSI_PARAMETERS_H
+
+struct iscsi_extra_response {
+ char key[64];
+ char value[32];
+ struct list_head er_list;
+} ____cacheline_aligned;
+
+struct iscsi_param {
+ char *name;
+ char *value;
+ u8 set_param;
+ u8 phase;
+ u8 scope;
+ u8 sender;
+ u8 type;
+ u8 use;
+ u16 type_range;
+ u32 state;
+ struct list_head p_list;
+} ____cacheline_aligned;
+
+extern int iscsi_login_rx_data(struct iscsi_conn *, char *, int);
+extern int iscsi_login_tx_data(struct iscsi_conn *, char *, char *, int);
+extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
+extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *);
+extern void iscsi_print_params(struct iscsi_param_list *);
+extern int iscsi_create_default_params(struct iscsi_param_list **);
+extern int iscsi_set_keys_to_negotiate(int, struct iscsi_param_list *);
+extern int iscsi_set_keys_irrelevant_for_discovery(struct iscsi_param_list *);
+extern int iscsi_copy_param_list(struct iscsi_param_list **,
+ struct iscsi_param_list *, int);
+extern int iscsi_change_param_value(char *, struct iscsi_param_list *, int);
+extern void iscsi_release_param_list(struct iscsi_param_list *);
+extern struct iscsi_param *iscsi_find_param_from_key(char *, struct iscsi_param_list *);
+extern int iscsi_extract_key_value(char *, char **, char **);
+extern int iscsi_update_param_value(struct iscsi_param *, char *);
+extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_param_list *);
+extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
+ struct iscsi_param_list *);
+extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
+extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *,
+ struct iscsi_param_list *);
+extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
+ struct iscsi_param_list *, int);
+
+#define YES "Yes"
+#define NO "No"
+#define ALL "All"
+#define IRRELEVANT "Irrelevant"
+#define NONE "None"
+#define NOTUNDERSTOOD "NotUnderstood"
+#define REJECT "Reject"
+
+/*
+ * The Parameter Names.
+ */
+#define AUTHMETHOD "AuthMethod"
+#define HEADERDIGEST "HeaderDigest"
+#define DATADIGEST "DataDigest"
+#define MAXCONNECTIONS "MaxConnections"
+#define SENDTARGETS "SendTargets"
+#define TARGETNAME "TargetName"
+#define INITIATORNAME "InitiatorName"
+#define TARGETALIAS "TargetAlias"
+#define INITIATORALIAS "InitiatorAlias"
+#define TARGETADDRESS "TargetAddress"
+#define TARGETPORTALGROUPTAG "TargetPortalGroupTag"
+#define INITIALR2T "InitialR2T"
+#define IMMEDIATEDATA "ImmediateData"
+#define MAXRECVDATASEGMENTLENGTH "MaxRecvDataSegmentLength"
+#define MAXBURSTLENGTH "MaxBurstLength"
+#define FIRSTBURSTLENGTH "FirstBurstLength"
+#define DEFAULTTIME2WAIT "DefaultTime2Wait"
+#define DEFAULTTIME2RETAIN "DefaultTime2Retain"
+#define MAXOUTSTANDINGR2T "MaxOutstandingR2T"
+#define DATAPDUINORDER "DataPDUInOrder"
+#define DATASEQUENCEINORDER "DataSequenceInOrder"
+#define ERRORRECOVERYLEVEL "ErrorRecoveryLevel"
+#define SESSIONTYPE "SessionType"
+#define IFMARKER "IFMarker"
+#define OFMARKER "OFMarker"
+#define IFMARKINT "IFMarkInt"
+#define OFMARKINT "OFMarkInt"
+#define X_EXTENSIONKEY "X-com.sbei.version"
+#define X_EXTENSIONKEY_CISCO_NEW "X-com.cisco.protocol"
+#define X_EXTENSIONKEY_CISCO_OLD "X-com.cisco.iscsi.draft"
+
+/*
+ * For AuthMethod.
+ */
+#define KRB5 "KRB5"
+#define SPKM1 "SPKM1"
+#define SPKM2 "SPKM2"
+#define SRP "SRP"
+#define CHAP "CHAP"
+
+/*
+ * Initial values for Parameter Negotiation.
+ */
+#define INITIAL_AUTHMETHOD CHAP
+#define INITIAL_HEADERDIGEST "CRC32C,None"
+#define INITIAL_DATADIGEST "CRC32C,None"
+#define INITIAL_MAXCONNECTIONS "1"
+#define INITIAL_SENDTARGETS ALL
+#define INITIAL_TARGETNAME "LIO.Target"
+#define INITIAL_INITIATORNAME "LIO.Initiator"
+#define INITIAL_TARGETALIAS "LIO Target"
+#define INITIAL_INITIATORALIAS "LIO Initiator"
+#define INITIAL_TARGETADDRESS "0.0.0.0:0000,0"
+#define INITIAL_TARGETPORTALGROUPTAG "1"
+#define INITIAL_INITIALR2T YES
+#define INITIAL_IMMEDIATEDATA YES
+#define INITIAL_MAXRECVDATASEGMENTLENGTH "8192"
+#define INITIAL_MAXBURSTLENGTH "262144"
+#define INITIAL_FIRSTBURSTLENGTH "65536"
+#define INITIAL_DEFAULTTIME2WAIT "2"
+#define INITIAL_DEFAULTTIME2RETAIN "20"
+#define INITIAL_MAXOUTSTANDINGR2T "1"
+#define INITIAL_DATAPDUINORDER YES
+#define INITIAL_DATASEQUENCEINORDER YES
+#define INITIAL_ERRORRECOVERYLEVEL "0"
+#define INITIAL_SESSIONTYPE NORMAL
+#define INITIAL_IFMARKER NO
+#define INITIAL_OFMARKER NO
+#define INITIAL_IFMARKINT "2048~65535"
+#define INITIAL_OFMARKINT "2048~65535"
+
+/*
+ * For [Header,Data]Digests.
+ */
+#define CRC32C "CRC32C"
+
+/*
+ * For SessionType.
+ */
+#define DISCOVERY "Discovery"
+#define NORMAL "Normal"
+
+/*
+ * struct iscsi_param->use
+ */
+#define USE_LEADING_ONLY 0x01
+#define USE_INITIAL_ONLY 0x02
+#define USE_ALL 0x04
+
+#define IS_USE_LEADING_ONLY(p) ((p)->use & USE_LEADING_ONLY)
+#define IS_USE_INITIAL_ONLY(p) ((p)->use & USE_INITIAL_ONLY)
+#define IS_USE_ALL(p) ((p)->use & USE_ALL)
+
+#define SET_USE_INITIAL_ONLY(p) ((p)->use |= USE_INITIAL_ONLY)
+
+/*
+ * struct iscsi_param->sender
+ */
+#define SENDER_INITIATOR 0x01
+#define SENDER_TARGET 0x02
+#define SENDER_BOTH 0x03
+/* Used in iscsi_check_key() */
+#define SENDER_RECEIVER 0x04
+
+#define IS_SENDER_INITIATOR(p) ((p)->sender & SENDER_INITIATOR)
+#define IS_SENDER_TARGET(p) ((p)->sender & SENDER_TARGET)
+#define IS_SENDER_BOTH(p) ((p)->sender & SENDER_BOTH)
+
+/*
+ * struct iscsi_param->scope
+ */
+#define SCOPE_CONNECTION_ONLY 0x01
+#define SCOPE_SESSION_WIDE 0x02
+
+#define IS_SCOPE_CONNECTION_ONLY(p) ((p)->scope & SCOPE_CONNECTION_ONLY)
+#define IS_SCOPE_SESSION_WIDE(p) ((p)->scope & SCOPE_SESSION_WIDE)
+
+/*
+ * struct iscsi_param->phase
+ */
+#define PHASE_SECURITY 0x01
+#define PHASE_OPERATIONAL 0x02
+#define PHASE_DECLARATIVE 0x04
+#define PHASE_FFP0 0x08
+
+#define IS_PHASE_SECURITY(p) ((p)->phase & PHASE_SECURITY)
+#define IS_PHASE_OPERATIONAL(p) ((p)->phase & PHASE_OPERATIONAL)
+#define IS_PHASE_DECLARATIVE(p) ((p)->phase & PHASE_DECLARATIVE)
+#define IS_PHASE_FFP0(p) ((p)->phase & PHASE_FFP0)
+
+/*
+ * struct iscsi_param->type
+ */
+#define TYPE_BOOL_AND 0x01
+#define TYPE_BOOL_OR 0x02
+#define TYPE_NUMBER 0x04
+#define TYPE_NUMBER_RANGE 0x08
+#define TYPE_STRING 0x10
+#define TYPE_VALUE_LIST 0x20
+
+#define IS_TYPE_BOOL_AND(p) ((p)->type & TYPE_BOOL_AND)
+#define IS_TYPE_BOOL_OR(p) ((p)->type & TYPE_BOOL_OR)
+#define IS_TYPE_NUMBER(p) ((p)->type & TYPE_NUMBER)
+#define IS_TYPE_NUMBER_RANGE(p) ((p)->type & TYPE_NUMBER_RANGE)
+#define IS_TYPE_STRING(p) ((p)->type & TYPE_STRING)
+#define IS_TYPE_VALUE_LIST(p) ((p)->type & TYPE_VALUE_LIST)
+
+/*
+ * struct iscsi_param->type_range
+ */
+#define TYPERANGE_BOOL_AND 0x0001
+#define TYPERANGE_BOOL_OR 0x0002
+#define TYPERANGE_0_TO_2 0x0004
+#define TYPERANGE_0_TO_3600 0x0008
+#define TYPERANGE_0_TO_32767 0x0010
+#define TYPERANGE_0_TO_65535 0x0020
+#define TYPERANGE_1_TO_65535 0x0040
+#define TYPERANGE_2_TO_3600 0x0080
+#define TYPERANGE_512_TO_16777215 0x0100
+#define TYPERANGE_AUTH 0x0200
+#define TYPERANGE_DIGEST 0x0400
+#define TYPERANGE_ISCSINAME 0x0800
+#define TYPERANGE_MARKINT 0x1000
+#define TYPERANGE_SESSIONTYPE 0x2000
+#define TYPERANGE_TARGETADDRESS 0x4000
+#define TYPERANGE_UTF8 0x8000
+
+#define IS_TYPERANGE_0_TO_2(p) ((p)->type_range & TYPERANGE_0_TO_2)
+#define IS_TYPERANGE_0_TO_3600(p) ((p)->type_range & TYPERANGE_0_TO_3600)
+#define IS_TYPERANGE_0_TO_32767(p) ((p)->type_range & TYPERANGE_0_TO_32767)
+#define IS_TYPERANGE_0_TO_65535(p) ((p)->type_range & TYPERANGE_0_TO_65535)
+#define IS_TYPERANGE_1_TO_65535(p) ((p)->type_range & TYPERANGE_1_TO_65535)
+#define IS_TYPERANGE_2_TO_3600(p) ((p)->type_range & TYPERANGE_2_TO_3600)
+#define IS_TYPERANGE_512_TO_16777215(p) ((p)->type_range & \
+ TYPERANGE_512_TO_16777215)
+#define IS_TYPERANGE_AUTH_PARAM(p) ((p)->type_range & TYPERANGE_AUTH)
+#define IS_TYPERANGE_DIGEST_PARAM(p) ((p)->type_range & TYPERANGE_DIGEST)
+#define IS_TYPERANGE_SESSIONTYPE(p) ((p)->type_range & \
+ TYPERANGE_SESSIONTYPE)
+
+/*
+ * struct iscsi_param->state
+ */
+#define PSTATE_ACCEPTOR 0x01
+#define PSTATE_NEGOTIATE 0x02
+#define PSTATE_PROPOSER 0x04
+#define PSTATE_IRRELEVANT 0x08
+#define PSTATE_REJECT 0x10
+#define PSTATE_REPLY_OPTIONAL 0x20
+#define PSTATE_RESPONSE_GOT 0x40
+#define PSTATE_RESPONSE_SENT 0x80
+
+#define IS_PSTATE_ACCEPTOR(p) ((p)->state & PSTATE_ACCEPTOR)
+#define IS_PSTATE_NEGOTIATE(p) ((p)->state & PSTATE_NEGOTIATE)
+#define IS_PSTATE_PROPOSER(p) ((p)->state & PSTATE_PROPOSER)
+#define IS_PSTATE_IRRELEVANT(p) ((p)->state & PSTATE_IRRELEVANT)
+#define IS_PSTATE_REJECT(p) ((p)->state & PSTATE_REJECT)
+#define IS_PSTATE_REPLY_OPTIONAL(p) ((p)->state & PSTATE_REPLY_OPTIONAL)
+#define IS_PSTATE_RESPONSE_GOT(p) ((p)->state & PSTATE_RESPONSE_GOT)
+#define IS_PSTATE_RESPONSE_SENT(p) ((p)->state & PSTATE_RESPONSE_SENT)
+
+#define SET_PSTATE_ACCEPTOR(p) ((p)->state |= PSTATE_ACCEPTOR)
+#define SET_PSTATE_NEGOTIATE(p) ((p)->state |= PSTATE_NEGOTIATE)
+#define SET_PSTATE_PROPOSER(p) ((p)->state |= PSTATE_PROPOSER)
+#define SET_PSTATE_IRRELEVANT(p) ((p)->state |= PSTATE_IRRELEVANT)
+#define SET_PSTATE_REJECT(p) ((p)->state |= PSTATE_REJECT)
+#define SET_PSTATE_REPLY_OPTIONAL(p) ((p)->state |= PSTATE_REPLY_OPTIONAL)
+#define SET_PSTATE_RESPONSE_GOT(p) ((p)->state |= PSTATE_RESPONSE_GOT)
+#define SET_PSTATE_RESPONSE_SENT(p) ((p)->state |= PSTATE_RESPONSE_SENT)
+
+#endif /* ISCSI_PARAMETERS_H */
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
new file mode 100644
index 0000000..fc69408
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -0,0 +1,664 @@
+/*******************************************************************************
+ * This file contains main functions related to iSCSI DataSequenceInOrder=No
+ * and DataPDUInOrder=No.
+ *
+ \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/random.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_seq_pdu_list.h"
+
+#define OFFLOAD_BUF_SIZE 32768
+
+void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
+{
+ int i;
+ struct iscsi_seq *seq;
+
+ pr_debug("Dumping Sequence List for ITT: 0x%08x:\n",
+ cmd->init_task_tag);
+
+ for (i = 0; i < cmd->seq_count; i++) {
+ seq = &cmd->seq_list[i];
+ pr_debug("i: %d, pdu_start: %d, pdu_count: %d,"
+ " offset: %d, xfer_len: %d, seq_send_order: %d,"
+ " seq_no: %d\n", i, seq->pdu_start, seq->pdu_count,
+ seq->offset, seq->xfer_len, seq->seq_send_order,
+ seq->seq_no);
+ }
+}
+
+void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
+{
+ int i;
+ struct iscsi_pdu *pdu;
+
+ pr_debug("Dumping PDU List for ITT: 0x%08x:\n",
+ cmd->init_task_tag);
+
+ for (i = 0; i < cmd->pdu_count; i++) {
+ pdu = &cmd->pdu_list[i];
+ pr_debug("i: %d, offset: %d, length: %d,"
+ " pdu_send_order: %d, seq_no: %d\n", i, pdu->offset,
+ pdu->length, pdu->pdu_send_order, pdu->seq_no);
+ }
+}
+
+static void iscsit_ordered_seq_lists(
+ struct iscsi_cmd *cmd,
+ u8 type)
+{
+ u32 i, seq_count = 0;
+
+ for (i = 0; i < cmd->seq_count; i++) {
+ if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
+ continue;
+ cmd->seq_list[i].seq_send_order = seq_count++;
+ }
+}
+
+static void iscsit_ordered_pdu_lists(
+ struct iscsi_cmd *cmd,
+ u8 type)
+{
+ u32 i, pdu_send_order = 0, seq_no = 0;
+
+ for (i = 0; i < cmd->pdu_count; i++) {
+redo:
+ if (cmd->pdu_list[i].seq_no == seq_no) {
+ cmd->pdu_list[i].pdu_send_order = pdu_send_order++;
+ continue;
+ }
+ seq_no++;
+ pdu_send_order = 0;
+ goto redo;
+ }
+}
+
+/*
+ * Generate count random values into array.
+ * Use 0x80000000 to mark generates valued in array[].
+ */
+static void iscsit_create_random_array(u32 *array, u32 count)
+{
+ int i, j, k;
+
+ if (count == 1) {
+ array[0] = 0;
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+redo:
+ get_random_bytes(&j, sizeof(u32));
+ j = (1 + (int) (9999 + 1) - j) % count;
+ for (k = 0; k < i + 1; k++) {
+ j |= 0x80000000;
+ if ((array[k] & 0x80000000) && (array[k] == j))
+ goto redo;
+ }
+ array[i] = j;
+ }
+
+ for (i = 0; i < count; i++)
+ array[i] &= ~0x80000000;
+}
+
+static int iscsit_randomize_pdu_lists(
+ struct iscsi_cmd *cmd,
+ u8 type)
+{
+ int i = 0;
+ u32 *array, pdu_count, seq_count = 0, seq_no = 0, seq_offset = 0;
+
+ for (pdu_count = 0; pdu_count < cmd->pdu_count; pdu_count++) {
+redo:
+ if (cmd->pdu_list[pdu_count].seq_no == seq_no) {
+ seq_count++;
+ continue;
+ }
+ array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
+ if (!array) {
+ pr_err("Unable to allocate memory"
+ " for random array.\n");
+ return -1;
+ }
+ iscsit_create_random_array(array, seq_count);
+
+ for (i = 0; i < seq_count; i++)
+ cmd->pdu_list[seq_offset+i].pdu_send_order = array[i];
+
+ kfree(array);
+
+ seq_offset += seq_count;
+ seq_count = 0;
+ seq_no++;
+ goto redo;
+ }
+
+ if (seq_count) {
+ array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
+ if (!array) {
+ pr_err("Unable to allocate memory for"
+ " random array.\n");
+ return -1;
+ }
+ iscsit_create_random_array(array, seq_count);
+
+ for (i = 0; i < seq_count; i++)
+ cmd->pdu_list[seq_offset+i].pdu_send_order = array[i];
+
+ kfree(array);
+ }
+
+ return 0;
+}
+
+static int iscsit_randomize_seq_lists(
+ struct iscsi_cmd *cmd,
+ u8 type)
+{
+ int i, j = 0;
+ u32 *array, seq_count = cmd->seq_count;
+
+ if ((type == PDULIST_IMMEDIATE) || (type == PDULIST_UNSOLICITED))
+ seq_count--;
+ else if (type == PDULIST_IMMEDIATE_AND_UNSOLICITED)
+ seq_count -= 2;
+
+ if (!seq_count)
+ return 0;
+
+ array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
+ if (!array) {
+ pr_err("Unable to allocate memory for random array.\n");
+ return -1;
+ }
+ iscsit_create_random_array(array, seq_count);
+
+ for (i = 0; i < cmd->seq_count; i++) {
+ if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
+ continue;
+ cmd->seq_list[i].seq_send_order = array[j++];
+ }
+
+ kfree(array);
+ return 0;
+}
+
+static void iscsit_determine_counts_for_list(
+ struct iscsi_cmd *cmd,
+ struct iscsi_build_list *bl,
+ u32 *seq_count,
+ u32 *pdu_count)
+{
+ int check_immediate = 0;
+ u32 burstlength = 0, offset = 0;
+ u32 unsolicited_data_length = 0;
+ struct iscsi_conn *conn = cmd->conn;
+
+ if ((bl->type == PDULIST_IMMEDIATE) ||
+ (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
+ check_immediate = 1;
+
+ if ((bl->type == PDULIST_UNSOLICITED) ||
+ (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
+ unsolicited_data_length = (cmd->data_length >
+ conn->sess->sess_ops->FirstBurstLength) ?
+ conn->sess->sess_ops->FirstBurstLength : cmd->data_length;
+
+ while (offset < cmd->data_length) {
+ *pdu_count += 1;
+
+ if (check_immediate) {
+ check_immediate = 0;
+ offset += bl->immediate_data_length;
+ *seq_count += 1;
+ if (unsolicited_data_length)
+ unsolicited_data_length -=
+ bl->immediate_data_length;
+ continue;
+ }
+ if (unsolicited_data_length > 0) {
+ if ((offset + conn->conn_ops->MaxRecvDataSegmentLength)
+ >= cmd->data_length) {
+ unsolicited_data_length -=
+ (cmd->data_length - offset);
+ offset += (cmd->data_length - offset);
+ continue;
+ }
+ if ((offset + conn->conn_ops->MaxRecvDataSegmentLength)
+ >= conn->sess->sess_ops->FirstBurstLength) {
+ unsolicited_data_length -=
+ (conn->sess->sess_ops->FirstBurstLength -
+ offset);
+ offset += (conn->sess->sess_ops->FirstBurstLength -
+ offset);
+ burstlength = 0;
+ *seq_count += 1;
+ continue;
+ }
+
+ offset += conn->conn_ops->MaxRecvDataSegmentLength;
+ unsolicited_data_length -=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ continue;
+ }
+ if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
+ cmd->data_length) {
+ offset += (cmd->data_length - offset);
+ continue;
+ }
+ if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >=
+ conn->sess->sess_ops->MaxBurstLength) {
+ offset += (conn->sess->sess_ops->MaxBurstLength -
+ burstlength);
+ burstlength = 0;
+ *seq_count += 1;
+ continue;
+ }
+
+ burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
+ offset += conn->conn_ops->MaxRecvDataSegmentLength;
+ }
+}
+
+
+/*
+ * Builds PDU and/or Sequence list, called while DataSequenceInOrder=No
+ * and DataPDUInOrder=No.
+ */
+static int iscsit_build_pdu_and_seq_list(
+ struct iscsi_cmd *cmd,
+ struct iscsi_build_list *bl)
+{
+ int check_immediate = 0, datapduinorder, datasequenceinorder;
+ u32 burstlength = 0, offset = 0, i = 0;
+ u32 pdu_count = 0, seq_no = 0, unsolicited_data_length = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_pdu *pdu = cmd->pdu_list;
+ struct iscsi_seq *seq = cmd->seq_list;
+
+ datapduinorder = conn->sess->sess_ops->DataPDUInOrder;
+ datasequenceinorder = conn->sess->sess_ops->DataSequenceInOrder;
+
+ if ((bl->type == PDULIST_IMMEDIATE) ||
+ (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
+ check_immediate = 1;
+
+ if ((bl->type == PDULIST_UNSOLICITED) ||
+ (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
+ unsolicited_data_length = (cmd->data_length >
+ conn->sess->sess_ops->FirstBurstLength) ?
+ conn->sess->sess_ops->FirstBurstLength : cmd->data_length;
+
+ while (offset < cmd->data_length) {
+ pdu_count++;
+ if (!datapduinorder) {
+ pdu[i].offset = offset;
+ pdu[i].seq_no = seq_no;
+ }
+ if (!datasequenceinorder && (pdu_count == 1)) {
+ seq[seq_no].pdu_start = i;
+ seq[seq_no].seq_no = seq_no;
+ seq[seq_no].offset = offset;
+ seq[seq_no].orig_offset = offset;
+ }
+
+ if (check_immediate) {
+ check_immediate = 0;
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_IMMEDIATE;
+ pdu[i++].length = bl->immediate_data_length;
+ }
+ if (!datasequenceinorder) {
+ seq[seq_no].type = SEQTYPE_IMMEDIATE;
+ seq[seq_no].pdu_count = 1;
+ seq[seq_no].xfer_len =
+ bl->immediate_data_length;
+ }
+ offset += bl->immediate_data_length;
+ pdu_count = 0;
+ seq_no++;
+ if (unsolicited_data_length)
+ unsolicited_data_length -=
+ bl->immediate_data_length;
+ continue;
+ }
+ if (unsolicited_data_length > 0) {
+ if ((offset +
+ conn->conn_ops->MaxRecvDataSegmentLength) >=
+ cmd->data_length) {
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_UNSOLICITED;
+ pdu[i].length =
+ (cmd->data_length - offset);
+ }
+ if (!datasequenceinorder) {
+ seq[seq_no].type = SEQTYPE_UNSOLICITED;
+ seq[seq_no].pdu_count = pdu_count;
+ seq[seq_no].xfer_len = (burstlength +
+ (cmd->data_length - offset));
+ }
+ unsolicited_data_length -=
+ (cmd->data_length - offset);
+ offset += (cmd->data_length - offset);
+ continue;
+ }
+ if ((offset +
+ conn->conn_ops->MaxRecvDataSegmentLength) >=
+ conn->sess->sess_ops->FirstBurstLength) {
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_UNSOLICITED;
+ pdu[i++].length =
+ (conn->sess->sess_ops->FirstBurstLength -
+ offset);
+ }
+ if (!datasequenceinorder) {
+ seq[seq_no].type = SEQTYPE_UNSOLICITED;
+ seq[seq_no].pdu_count = pdu_count;
+ seq[seq_no].xfer_len = (burstlength +
+ (conn->sess->sess_ops->FirstBurstLength -
+ offset));
+ }
+ unsolicited_data_length -=
+ (conn->sess->sess_ops->FirstBurstLength -
+ offset);
+ offset += (conn->sess->sess_ops->FirstBurstLength -
+ offset);
+ burstlength = 0;
+ pdu_count = 0;
+ seq_no++;
+ continue;
+ }
+
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_UNSOLICITED;
+ pdu[i++].length =
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ }
+ burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
+ offset += conn->conn_ops->MaxRecvDataSegmentLength;
+ unsolicited_data_length -=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ continue;
+ }
+ if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
+ cmd->data_length) {
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_NORMAL;
+ pdu[i].length = (cmd->data_length - offset);
+ }
+ if (!datasequenceinorder) {
+ seq[seq_no].type = SEQTYPE_NORMAL;
+ seq[seq_no].pdu_count = pdu_count;
+ seq[seq_no].xfer_len = (burstlength +
+ (cmd->data_length - offset));
+ }
+ offset += (cmd->data_length - offset);
+ continue;
+ }
+ if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >=
+ conn->sess->sess_ops->MaxBurstLength) {
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_NORMAL;
+ pdu[i++].length =
+ (conn->sess->sess_ops->MaxBurstLength -
+ burstlength);
+ }
+ if (!datasequenceinorder) {
+ seq[seq_no].type = SEQTYPE_NORMAL;
+ seq[seq_no].pdu_count = pdu_count;
+ seq[seq_no].xfer_len = (burstlength +
+ (conn->sess->sess_ops->MaxBurstLength -
+ burstlength));
+ }
+ offset += (conn->sess->sess_ops->MaxBurstLength -
+ burstlength);
+ burstlength = 0;
+ pdu_count = 0;
+ seq_no++;
+ continue;
+ }
+
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_NORMAL;
+ pdu[i++].length =
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ }
+ burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
+ offset += conn->conn_ops->MaxRecvDataSegmentLength;
+ }
+
+ if (!datasequenceinorder) {
+ if (bl->data_direction & ISCSI_PDU_WRITE) {
+ if (bl->randomize & RANDOM_R2T_OFFSETS) {
+ if (iscsit_randomize_seq_lists(cmd, bl->type)
+ < 0)
+ return -1;
+ } else
+ iscsit_ordered_seq_lists(cmd, bl->type);
+ } else if (bl->data_direction & ISCSI_PDU_READ) {
+ if (bl->randomize & RANDOM_DATAIN_SEQ_OFFSETS) {
+ if (iscsit_randomize_seq_lists(cmd, bl->type)
+ < 0)
+ return -1;
+ } else
+ iscsit_ordered_seq_lists(cmd, bl->type);
+ }
+#if 0
+ iscsit_dump_seq_list(cmd);
+#endif
+ }
+ if (!datapduinorder) {
+ if (bl->data_direction & ISCSI_PDU_WRITE) {
+ if (bl->randomize & RANDOM_DATAOUT_PDU_OFFSETS) {
+ if (iscsit_randomize_pdu_lists(cmd, bl->type)
+ < 0)
+ return -1;
+ } else
+ iscsit_ordered_pdu_lists(cmd, bl->type);
+ } else if (bl->data_direction & ISCSI_PDU_READ) {
+ if (bl->randomize & RANDOM_DATAIN_PDU_OFFSETS) {
+ if (iscsit_randomize_pdu_lists(cmd, bl->type)
+ < 0)
+ return -1;
+ } else
+ iscsit_ordered_pdu_lists(cmd, bl->type);
+ }
+#if 0
+ iscsit_dump_pdu_list(cmd);
+#endif
+ }
+
+ return 0;
+}
+
+/*
+ * Only called while DataSequenceInOrder=No or DataPDUInOrder=No.
+ */
+int iscsit_do_build_list(
+ struct iscsi_cmd *cmd,
+ struct iscsi_build_list *bl)
+{
+ u32 pdu_count = 0, seq_count = 1;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_pdu *pdu = NULL;
+ struct iscsi_seq *seq = NULL;
+
+ iscsit_determine_counts_for_list(cmd, bl, &seq_count, &pdu_count);
+
+ if (!conn->sess->sess_ops->DataSequenceInOrder) {
+ seq = kzalloc(seq_count * sizeof(struct iscsi_seq), GFP_ATOMIC);
+ if (!seq) {
+ pr_err("Unable to allocate struct iscsi_seq list\n");
+ return -1;
+ }
+ cmd->seq_list = seq;
+ cmd->seq_count = seq_count;
+ }
+
+ if (!conn->sess->sess_ops->DataPDUInOrder) {
+ pdu = kzalloc(pdu_count * sizeof(struct iscsi_pdu), GFP_ATOMIC);
+ if (!pdu) {
+ pr_err("Unable to allocate struct iscsi_pdu list.\n");
+ kfree(seq);
+ return -1;
+ }
+ cmd->pdu_list = pdu;
+ cmd->pdu_count = pdu_count;
+ }
+
+ return iscsit_build_pdu_and_seq_list(cmd, bl);
+}
+
+struct iscsi_pdu *iscsit_get_pdu_holder(
+ struct iscsi_cmd *cmd,
+ u32 offset,
+ u32 length)
+{
+ u32 i;
+ struct iscsi_pdu *pdu = NULL;
+
+ if (!cmd->pdu_list) {
+ pr_err("struct iscsi_cmd->pdu_list is NULL!\n");
+ return NULL;
+ }
+
+ pdu = &cmd->pdu_list[0];
+
+ for (i = 0; i < cmd->pdu_count; i++)
+ if ((pdu[i].offset == offset) && (pdu[i].length == length))
+ return &pdu[i];
+
+ pr_err("Unable to locate PDU holder for ITT: 0x%08x, Offset:"
+ " %u, Length: %u\n", cmd->init_task_tag, offset, length);
+ return NULL;
+}
+
+struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(
+ struct iscsi_cmd *cmd,
+ struct iscsi_seq *seq)
+{
+ u32 i;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_pdu *pdu = NULL;
+
+ if (!cmd->pdu_list) {
+ pr_err("struct iscsi_cmd->pdu_list is NULL!\n");
+ return NULL;
+ }
+
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+redo:
+ pdu = &cmd->pdu_list[cmd->pdu_start];
+
+ for (i = 0; pdu[i].seq_no != cmd->seq_no; i++) {
+#if 0
+ pr_debug("pdu[i].seq_no: %d, pdu[i].pdu"
+ "_send_order: %d, pdu[i].offset: %d,"
+ " pdu[i].length: %d\n", pdu[i].seq_no,
+ pdu[i].pdu_send_order, pdu[i].offset,
+ pdu[i].length);
+#endif
+ if (pdu[i].pdu_send_order == cmd->pdu_send_order) {
+ cmd->pdu_send_order++;
+ return &pdu[i];
+ }
+ }
+
+ cmd->pdu_start += cmd->pdu_send_order;
+ cmd->pdu_send_order = 0;
+ cmd->seq_no++;
+
+ if (cmd->pdu_start < cmd->pdu_count)
+ goto redo;
+
+ pr_err("Command ITT: 0x%08x unable to locate"
+ " struct iscsi_pdu for cmd->pdu_send_order: %u.\n",
+ cmd->init_task_tag, cmd->pdu_send_order);
+ return NULL;
+ } else {
+ if (!seq) {
+ pr_err("struct iscsi_seq is NULL!\n");
+ return NULL;
+ }
+#if 0
+ pr_debug("seq->pdu_start: %d, seq->pdu_count: %d,"
+ " seq->seq_no: %d\n", seq->pdu_start, seq->pdu_count,
+ seq->seq_no);
+#endif
+ pdu = &cmd->pdu_list[seq->pdu_start];
+
+ if (seq->pdu_send_order == seq->pdu_count) {
+ pr_err("Command ITT: 0x%08x seq->pdu_send"
+ "_order: %u equals seq->pdu_count: %u\n",
+ cmd->init_task_tag, seq->pdu_send_order,
+ seq->pdu_count);
+ return NULL;
+ }
+
+ for (i = 0; i < seq->pdu_count; i++) {
+ if (pdu[i].pdu_send_order == seq->pdu_send_order) {
+ seq->pdu_send_order++;
+ return &pdu[i];
+ }
+ }
+
+ pr_err("Command ITT: 0x%08x unable to locate iscsi"
+ "_pdu_t for seq->pdu_send_order: %u.\n",
+ cmd->init_task_tag, seq->pdu_send_order);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+struct iscsi_seq *iscsit_get_seq_holder(
+ struct iscsi_cmd *cmd,
+ u32 offset,
+ u32 length)
+{
+ u32 i;
+
+ if (!cmd->seq_list) {
+ pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+ return NULL;
+ }
+
+ for (i = 0; i < cmd->seq_count; i++) {
+#if 0
+ pr_debug("seq_list[i].orig_offset: %d, seq_list[i]."
+ "xfer_len: %d, seq_list[i].seq_no %u\n",
+ cmd->seq_list[i].orig_offset, cmd->seq_list[i].xfer_len,
+ cmd->seq_list[i].seq_no);
+#endif
+ if ((cmd->seq_list[i].orig_offset +
+ cmd->seq_list[i].xfer_len) >=
+ (offset + length))
+ return &cmd->seq_list[i];
+ }
+
+ pr_err("Unable to locate Sequence holder for ITT: 0x%08x,"
+ " Offset: %u, Length: %u\n", cmd->init_task_tag, offset,
+ length);
+ return NULL;
+}
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
new file mode 100644
index 0000000..0d52a10
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
@@ -0,0 +1,86 @@
+#ifndef ISCSI_SEQ_AND_PDU_LIST_H
+#define ISCSI_SEQ_AND_PDU_LIST_H
+
+/* struct iscsi_pdu->status */
+#define DATAOUT_PDU_SENT 1
+
+/* struct iscsi_seq->type */
+#define SEQTYPE_IMMEDIATE 1
+#define SEQTYPE_UNSOLICITED 2
+#define SEQTYPE_NORMAL 3
+
+/* struct iscsi_seq->status */
+#define DATAOUT_SEQUENCE_GOT_R2T 1
+#define DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY 2
+#define DATAOUT_SEQUENCE_COMPLETE 3
+
+/* iscsi_determine_counts_for_list() type */
+#define PDULIST_NORMAL 1
+#define PDULIST_IMMEDIATE 2
+#define PDULIST_UNSOLICITED 3
+#define PDULIST_IMMEDIATE_AND_UNSOLICITED 4
+
+/* struct iscsi_pdu->type */
+#define PDUTYPE_IMMEDIATE 1
+#define PDUTYPE_UNSOLICITED 2
+#define PDUTYPE_NORMAL 3
+
+/* struct iscsi_pdu->status */
+#define ISCSI_PDU_NOT_RECEIVED 0
+#define ISCSI_PDU_RECEIVED_OK 1
+#define ISCSI_PDU_CRC_FAILED 2
+#define ISCSI_PDU_TIMED_OUT 3
+
+/* struct iscsi_build_list->randomize */
+#define RANDOM_DATAIN_PDU_OFFSETS 0x01
+#define RANDOM_DATAIN_SEQ_OFFSETS 0x02
+#define RANDOM_DATAOUT_PDU_OFFSETS 0x04
+#define RANDOM_R2T_OFFSETS 0x08
+
+/* struct iscsi_build_list->data_direction */
+#define ISCSI_PDU_READ 0x01
+#define ISCSI_PDU_WRITE 0x02
+
+struct iscsi_build_list {
+ int data_direction;
+ int randomize;
+ int type;
+ int immediate_data_length;
+};
+
+struct iscsi_pdu {
+ int status;
+ int type;
+ u8 flags;
+ u32 data_sn;
+ u32 length;
+ u32 offset;
+ u32 pdu_send_order;
+ u32 seq_no;
+} ____cacheline_aligned;
+
+struct iscsi_seq {
+ int sent;
+ int status;
+ int type;
+ u32 data_sn;
+ u32 first_datasn;
+ u32 last_datasn;
+ u32 next_burst_len;
+ u32 pdu_start;
+ u32 pdu_count;
+ u32 offset;
+ u32 orig_offset;
+ u32 pdu_send_order;
+ u32 r2t_sn;
+ u32 seq_send_order;
+ u32 seq_no;
+ u32 xfer_len;
+} ____cacheline_aligned;
+
+extern int iscsit_do_build_list(struct iscsi_cmd *, struct iscsi_build_list *);
+extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32);
+extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *);
+extern struct iscsi_seq *iscsit_get_seq_holder(struct iscsi_cmd *, u32, u32);
+
+#endif /* ISCSI_SEQ_AND_PDU_LIST_H */
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
new file mode 100644
index 0000000..bbdbe93
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -0,0 +1,950 @@
+/*******************************************************************************
+ * Modern ConfigFS group context specific iSCSI statistics based on original
+ * iscsi_target_mib.c code
+ *
+ * Copyright (c) 2011 Rising Tide Systems
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/configfs.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/configfs_macros.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_stat.h"
+
+#ifndef INITIAL_JIFFIES
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+#endif
+
+/* Instance Attributes Table */
+#define ISCSI_INST_NUM_NODES 1
+#define ISCSI_INST_DESCR "Storage Engine Target"
+#define ISCSI_INST_LAST_FAILURE_TYPE 0
+#define ISCSI_DISCONTINUITY_TIME 0
+
+#define ISCSI_NODE_INDEX 1
+
+#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
+
+/****************************************************************************
+ * iSCSI MIB Tables
+ ****************************************************************************/
+/*
+ * Instance Attributes Table
+ */
+CONFIGFS_EATTR_STRUCT(iscsi_stat_instance, iscsi_wwn_stat_grps);
+#define ISCSI_STAT_INSTANCE_ATTR(_name, _mode) \
+static struct iscsi_stat_instance_attribute \
+ iscsi_stat_instance_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ iscsi_stat_instance_show_attr_##_name, \
+ iscsi_stat_instance_store_attr_##_name);
+
+#define ISCSI_STAT_INSTANCE_ATTR_RO(_name) \
+static struct iscsi_stat_instance_attribute \
+ iscsi_stat_instance_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ iscsi_stat_instance_show_attr_##_name);
+
+static ssize_t iscsi_stat_instance_show_attr_inst(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(inst);
+
+static ssize_t iscsi_stat_instance_show_attr_min_ver(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(min_ver);
+
+static ssize_t iscsi_stat_instance_show_attr_max_ver(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(max_ver);
+
+static ssize_t iscsi_stat_instance_show_attr_portals(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_num_tpg_nps);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(portals);
+
+static ssize_t iscsi_stat_instance_show_attr_nodes(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_INST_NUM_NODES);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(nodes);
+
+static ssize_t iscsi_stat_instance_show_attr_sessions(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_nsessions);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(sessions);
+
+static ssize_t iscsi_stat_instance_show_attr_fail_sess(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+ u32 sess_err_count;
+
+ spin_lock_bh(&sess_err->lock);
+ sess_err_count = (sess_err->digest_errors +
+ sess_err->cxn_timeout_errors +
+ sess_err->pdu_format_errors);
+ spin_unlock_bh(&sess_err->lock);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", sess_err_count);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(fail_sess);
+
+static ssize_t iscsi_stat_instance_show_attr_fail_type(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+ return snprintf(page, PAGE_SIZE, "%u\n",
+ sess_err->last_sess_failure_type);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(fail_type);
+
+static ssize_t iscsi_stat_instance_show_attr_fail_rem_name(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ sess_err->last_sess_fail_rem_name[0] ?
+ sess_err->last_sess_fail_rem_name : NONE);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(fail_rem_name);
+
+static ssize_t iscsi_stat_instance_show_attr_disc_time(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DISCONTINUITY_TIME);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(disc_time);
+
+static ssize_t iscsi_stat_instance_show_attr_description(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n", ISCSI_INST_DESCR);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(description);
+
+static ssize_t iscsi_stat_instance_show_attr_vendor(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "RisingTide Systems iSCSI-Target\n");
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(vendor);
+
+static ssize_t iscsi_stat_instance_show_attr_version(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n", ISCSIT_VERSION);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(version);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_instance, iscsi_wwn_stat_grps,
+ iscsi_instance_group);
+
+static struct configfs_attribute *iscsi_stat_instance_attrs[] = {
+ &iscsi_stat_instance_inst.attr,
+ &iscsi_stat_instance_min_ver.attr,
+ &iscsi_stat_instance_max_ver.attr,
+ &iscsi_stat_instance_portals.attr,
+ &iscsi_stat_instance_nodes.attr,
+ &iscsi_stat_instance_sessions.attr,
+ &iscsi_stat_instance_fail_sess.attr,
+ &iscsi_stat_instance_fail_type.attr,
+ &iscsi_stat_instance_fail_rem_name.attr,
+ &iscsi_stat_instance_disc_time.attr,
+ &iscsi_stat_instance_description.attr,
+ &iscsi_stat_instance_vendor.attr,
+ &iscsi_stat_instance_version.attr,
+ NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_instance_item_ops = {
+ .show_attribute = iscsi_stat_instance_attr_show,
+ .store_attribute = iscsi_stat_instance_attr_store,
+};
+
+struct config_item_type iscsi_stat_instance_cit = {
+ .ct_item_ops = &iscsi_stat_instance_item_ops,
+ .ct_attrs = iscsi_stat_instance_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * Instance Session Failure Stats Table
+ */
+CONFIGFS_EATTR_STRUCT(iscsi_stat_sess_err, iscsi_wwn_stat_grps);
+#define ISCSI_STAT_SESS_ERR_ATTR(_name, _mode) \
+static struct iscsi_stat_sess_err_attribute \
+ iscsi_stat_sess_err_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ iscsi_stat_sess_err_show_attr_##_name, \
+ iscsi_stat_sess_err_store_attr_##_name);
+
+#define ISCSI_STAT_SESS_ERR_ATTR_RO(_name) \
+static struct iscsi_stat_sess_err_attribute \
+ iscsi_stat_sess_err_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ iscsi_stat_sess_err_show_attr_##_name);
+
+static ssize_t iscsi_stat_sess_err_show_attr_inst(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_SESS_ERR_ATTR_RO(inst);
+
+static ssize_t iscsi_stat_sess_err_show_attr_digest_errors(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", sess_err->digest_errors);
+}
+ISCSI_STAT_SESS_ERR_ATTR_RO(digest_errors);
+
+static ssize_t iscsi_stat_sess_err_show_attr_cxn_errors(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", sess_err->cxn_timeout_errors);
+}
+ISCSI_STAT_SESS_ERR_ATTR_RO(cxn_errors);
+
+static ssize_t iscsi_stat_sess_err_show_attr_format_errors(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", sess_err->pdu_format_errors);
+}
+ISCSI_STAT_SESS_ERR_ATTR_RO(format_errors);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_sess_err, iscsi_wwn_stat_grps,
+ iscsi_sess_err_group);
+
+static struct configfs_attribute *iscsi_stat_sess_err_attrs[] = {
+ &iscsi_stat_sess_err_inst.attr,
+ &iscsi_stat_sess_err_digest_errors.attr,
+ &iscsi_stat_sess_err_cxn_errors.attr,
+ &iscsi_stat_sess_err_format_errors.attr,
+ NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_sess_err_item_ops = {
+ .show_attribute = iscsi_stat_sess_err_attr_show,
+ .store_attribute = iscsi_stat_sess_err_attr_store,
+};
+
+struct config_item_type iscsi_stat_sess_err_cit = {
+ .ct_item_ops = &iscsi_stat_sess_err_item_ops,
+ .ct_attrs = iscsi_stat_sess_err_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * Target Attributes Table
+ */
+CONFIGFS_EATTR_STRUCT(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps);
+#define ISCSI_STAT_TGT_ATTR(_name, _mode) \
+static struct iscsi_stat_tgt_attr_attribute \
+ iscsi_stat_tgt_attr_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ iscsi_stat_tgt-attr_show_attr_##_name, \
+ iscsi_stat_tgt_attr_store_attr_##_name);
+
+#define ISCSI_STAT_TGT_ATTR_RO(_name) \
+static struct iscsi_stat_tgt_attr_attribute \
+ iscsi_stat_tgt_attr_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ iscsi_stat_tgt_attr_show_attr_##_name);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_inst(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_TGT_ATTR_RO(inst);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_indx(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
+}
+ISCSI_STAT_TGT_ATTR_RO(indx);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_login_fails(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ u32 fail_count;
+
+ spin_lock(&lstat->lock);
+ fail_count = (lstat->redirects + lstat->authorize_fails +
+ lstat->authenticate_fails + lstat->negotiate_fails +
+ lstat->other_fails);
+ spin_unlock(&lstat->lock);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", fail_count);
+}
+ISCSI_STAT_TGT_ATTR_RO(login_fails);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_time(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ u32 last_fail_time;
+
+ spin_lock(&lstat->lock);
+ last_fail_time = lstat->last_fail_time ?
+ (u32)(((u32)lstat->last_fail_time -
+ INITIAL_JIFFIES) * 100 / HZ) : 0;
+ spin_unlock(&lstat->lock);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", last_fail_time);
+}
+ISCSI_STAT_TGT_ATTR_RO(last_fail_time);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_type(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ u32 last_fail_type;
+
+ spin_lock(&lstat->lock);
+ last_fail_type = lstat->last_fail_type;
+ spin_unlock(&lstat->lock);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", last_fail_type);
+}
+ISCSI_STAT_TGT_ATTR_RO(last_fail_type);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_name(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ unsigned char buf[224];
+
+ spin_lock(&lstat->lock);
+ snprintf(buf, 224, "%s", lstat->last_intr_fail_name[0] ?
+ lstat->last_intr_fail_name : NONE);
+ spin_unlock(&lstat->lock);
+
+ return snprintf(page, PAGE_SIZE, "%s\n", buf);
+}
+ISCSI_STAT_TGT_ATTR_RO(fail_intr_name);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ unsigned char buf[8];
+
+ spin_lock(&lstat->lock);
+ snprintf(buf, 8, "%s", (lstat->last_intr_fail_ip_addr != NULL) ?
+ "ipv6" : "ipv4");
+ spin_unlock(&lstat->lock);
+
+ return snprintf(page, PAGE_SIZE, "%s\n", buf);
+}
+ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr_type);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ unsigned char buf[32];
+
+ spin_lock(&lstat->lock);
+ if (lstat->last_intr_fail_ip_family == AF_INET6)
+ snprintf(buf, 32, "[%s]", lstat->last_intr_fail_ip_addr);
+ else
+ snprintf(buf, 32, "%s", lstat->last_intr_fail_ip_addr);
+ spin_unlock(&lstat->lock);
+
+ return snprintf(page, PAGE_SIZE, "%s\n", buf);
+}
+ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps,
+ iscsi_tgt_attr_group);
+
+static struct configfs_attribute *iscsi_stat_tgt_attr_attrs[] = {
+ &iscsi_stat_tgt_attr_inst.attr,
+ &iscsi_stat_tgt_attr_indx.attr,
+ &iscsi_stat_tgt_attr_login_fails.attr,
+ &iscsi_stat_tgt_attr_last_fail_time.attr,
+ &iscsi_stat_tgt_attr_last_fail_type.attr,
+ &iscsi_stat_tgt_attr_fail_intr_name.attr,
+ &iscsi_stat_tgt_attr_fail_intr_addr_type.attr,
+ &iscsi_stat_tgt_attr_fail_intr_addr.attr,
+ NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_tgt_attr_item_ops = {
+ .show_attribute = iscsi_stat_tgt_attr_attr_show,
+ .store_attribute = iscsi_stat_tgt_attr_attr_store,
+};
+
+struct config_item_type iscsi_stat_tgt_attr_cit = {
+ .ct_item_ops = &iscsi_stat_tgt_attr_item_ops,
+ .ct_attrs = iscsi_stat_tgt_attr_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * Target Login Stats Table
+ */
+CONFIGFS_EATTR_STRUCT(iscsi_stat_login, iscsi_wwn_stat_grps);
+#define ISCSI_STAT_LOGIN(_name, _mode) \
+static struct iscsi_stat_login_attribute \
+ iscsi_stat_login_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ iscsi_stat_login_show_attr_##_name, \
+ iscsi_stat_login_store_attr_##_name);
+
+#define ISCSI_STAT_LOGIN_RO(_name) \
+static struct iscsi_stat_login_attribute \
+ iscsi_stat_login_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ iscsi_stat_login_show_attr_##_name);
+
+static ssize_t iscsi_stat_login_show_attr_inst(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_LOGIN_RO(inst);
+
+static ssize_t iscsi_stat_login_show_attr_indx(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
+}
+ISCSI_STAT_LOGIN_RO(indx);
+
+static ssize_t iscsi_stat_login_show_attr_accepts(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ ssize_t ret;
+
+ spin_lock(&lstat->lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->accepts);
+ spin_unlock(&lstat->lock);
+
+ return ret;
+}
+ISCSI_STAT_LOGIN_RO(accepts);
+
+static ssize_t iscsi_stat_login_show_attr_other_fails(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ ssize_t ret;
+
+ spin_lock(&lstat->lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->other_fails);
+ spin_unlock(&lstat->lock);
+
+ return ret;
+}
+ISCSI_STAT_LOGIN_RO(other_fails);
+
+static ssize_t iscsi_stat_login_show_attr_redirects(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ ssize_t ret;
+
+ spin_lock(&lstat->lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->redirects);
+ spin_unlock(&lstat->lock);
+
+ return ret;
+}
+ISCSI_STAT_LOGIN_RO(redirects);
+
+static ssize_t iscsi_stat_login_show_attr_authorize_fails(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ ssize_t ret;
+
+ spin_lock(&lstat->lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authorize_fails);
+ spin_unlock(&lstat->lock);
+
+ return ret;
+}
+ISCSI_STAT_LOGIN_RO(authorize_fails);
+
+static ssize_t iscsi_stat_login_show_attr_authenticate_fails(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ ssize_t ret;
+
+ spin_lock(&lstat->lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authenticate_fails);
+ spin_unlock(&lstat->lock);
+
+ return ret;
+}
+ISCSI_STAT_LOGIN_RO(authenticate_fails);
+
+static ssize_t iscsi_stat_login_show_attr_negotiate_fails(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ ssize_t ret;
+
+ spin_lock(&lstat->lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->negotiate_fails);
+ spin_unlock(&lstat->lock);
+
+ return ret;
+}
+ISCSI_STAT_LOGIN_RO(negotiate_fails);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_login, iscsi_wwn_stat_grps,
+ iscsi_login_stats_group);
+
+static struct configfs_attribute *iscsi_stat_login_stats_attrs[] = {
+ &iscsi_stat_login_inst.attr,
+ &iscsi_stat_login_indx.attr,
+ &iscsi_stat_login_accepts.attr,
+ &iscsi_stat_login_other_fails.attr,
+ &iscsi_stat_login_redirects.attr,
+ &iscsi_stat_login_authorize_fails.attr,
+ &iscsi_stat_login_authenticate_fails.attr,
+ &iscsi_stat_login_negotiate_fails.attr,
+ NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_login_stats_item_ops = {
+ .show_attribute = iscsi_stat_login_attr_show,
+ .store_attribute = iscsi_stat_login_attr_store,
+};
+
+struct config_item_type iscsi_stat_login_cit = {
+ .ct_item_ops = &iscsi_stat_login_stats_item_ops,
+ .ct_attrs = iscsi_stat_login_stats_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * Target Logout Stats Table
+ */
+
+CONFIGFS_EATTR_STRUCT(iscsi_stat_logout, iscsi_wwn_stat_grps);
+#define ISCSI_STAT_LOGOUT(_name, _mode) \
+static struct iscsi_stat_logout_attribute \
+ iscsi_stat_logout_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ iscsi_stat_logout_show_attr_##_name, \
+ iscsi_stat_logout_store_attr_##_name);
+
+#define ISCSI_STAT_LOGOUT_RO(_name) \
+static struct iscsi_stat_logout_attribute \
+ iscsi_stat_logout_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ iscsi_stat_logout_show_attr_##_name);
+
+static ssize_t iscsi_stat_logout_show_attr_inst(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_LOGOUT_RO(inst);
+
+static ssize_t iscsi_stat_logout_show_attr_indx(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
+}
+ISCSI_STAT_LOGOUT_RO(indx);
+
+static ssize_t iscsi_stat_logout_show_attr_normal_logouts(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", lstats->normal_logouts);
+}
+ISCSI_STAT_LOGOUT_RO(normal_logouts);
+
+static ssize_t iscsi_stat_logout_show_attr_abnormal_logouts(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", lstats->abnormal_logouts);
+}
+ISCSI_STAT_LOGOUT_RO(abnormal_logouts);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_logout, iscsi_wwn_stat_grps,
+ iscsi_logout_stats_group);
+
+static struct configfs_attribute *iscsi_stat_logout_stats_attrs[] = {
+ &iscsi_stat_logout_inst.attr,
+ &iscsi_stat_logout_indx.attr,
+ &iscsi_stat_logout_normal_logouts.attr,
+ &iscsi_stat_logout_abnormal_logouts.attr,
+ NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_logout_stats_item_ops = {
+ .show_attribute = iscsi_stat_logout_attr_show,
+ .store_attribute = iscsi_stat_logout_attr_store,
+};
+
+struct config_item_type iscsi_stat_logout_cit = {
+ .ct_item_ops = &iscsi_stat_logout_stats_item_ops,
+ .ct_attrs = iscsi_stat_logout_stats_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * Session Stats Table
+ */
+
+CONFIGFS_EATTR_STRUCT(iscsi_stat_sess, iscsi_node_stat_grps);
+#define ISCSI_STAT_SESS(_name, _mode) \
+static struct iscsi_stat_sess_attribute \
+ iscsi_stat_sess_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ iscsi_stat_sess_show_attr_##_name, \
+ iscsi_stat_sess_store_attr_##_name);
+
+#define ISCSI_STAT_SESS_RO(_name) \
+static struct iscsi_stat_sess_attribute \
+ iscsi_stat_sess_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ iscsi_stat_sess_show_attr_##_name);
+
+static ssize_t iscsi_stat_sess_show_attr_inst(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_wwn *wwn = acl->se_node_acl.se_tpg->se_tpg_wwn;
+ struct iscsi_tiqn *tiqn = container_of(wwn,
+ struct iscsi_tiqn, tiqn_wwn);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_SESS_RO(inst);
+
+static ssize_t iscsi_stat_sess_show_attr_node(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(node);
+
+static ssize_t iscsi_stat_sess_show_attr_indx(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ sess->session_index);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(indx);
+
+static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(cmd_pdus);
+
+static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(rsp_pdus);
+
+static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%llu\n",
+ (unsigned long long)sess->tx_data_octets);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(txdata_octs);
+
+static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%llu\n",
+ (unsigned long long)sess->rx_data_octets);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(rxdata_octs);
+
+static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ sess->conn_digest_errors);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(conn_digest_errors);
+
+static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ sess->conn_timeout_errors);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(conn_timeout_errors);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_sess, iscsi_node_stat_grps,
+ iscsi_sess_stats_group);
+
+static struct configfs_attribute *iscsi_stat_sess_stats_attrs[] = {
+ &iscsi_stat_sess_inst.attr,
+ &iscsi_stat_sess_node.attr,
+ &iscsi_stat_sess_indx.attr,
+ &iscsi_stat_sess_cmd_pdus.attr,
+ &iscsi_stat_sess_rsp_pdus.attr,
+ &iscsi_stat_sess_txdata_octs.attr,
+ &iscsi_stat_sess_rxdata_octs.attr,
+ &iscsi_stat_sess_conn_digest_errors.attr,
+ &iscsi_stat_sess_conn_timeout_errors.attr,
+ NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_sess_stats_item_ops = {
+ .show_attribute = iscsi_stat_sess_attr_show,
+ .store_attribute = iscsi_stat_sess_attr_store,
+};
+
+struct config_item_type iscsi_stat_sess_cit = {
+ .ct_item_ops = &iscsi_stat_sess_stats_item_ops,
+ .ct_attrs = iscsi_stat_sess_stats_attrs,
+ .ct_owner = THIS_MODULE,
+};
diff --git a/drivers/target/iscsi/iscsi_target_stat.h b/drivers/target/iscsi/iscsi_target_stat.h
new file mode 100644
index 0000000..3ff76b4
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_stat.h
@@ -0,0 +1,64 @@
+#ifndef ISCSI_TARGET_STAT_H
+#define ISCSI_TARGET_STAT_H
+
+/*
+ * For struct iscsi_tiqn->tiqn_wwn default groups
+ */
+extern struct config_item_type iscsi_stat_instance_cit;
+extern struct config_item_type iscsi_stat_sess_err_cit;
+extern struct config_item_type iscsi_stat_tgt_attr_cit;
+extern struct config_item_type iscsi_stat_login_cit;
+extern struct config_item_type iscsi_stat_logout_cit;
+
+/*
+ * For struct iscsi_session->se_sess default groups
+ */
+extern struct config_item_type iscsi_stat_sess_cit;
+
+/* iSCSI session error types */
+#define ISCSI_SESS_ERR_UNKNOWN 0
+#define ISCSI_SESS_ERR_DIGEST 1
+#define ISCSI_SESS_ERR_CXN_TIMEOUT 2
+#define ISCSI_SESS_ERR_PDU_FORMAT 3
+
+/* iSCSI session error stats */
+struct iscsi_sess_err_stats {
+ spinlock_t lock;
+ u32 digest_errors;
+ u32 cxn_timeout_errors;
+ u32 pdu_format_errors;
+ u32 last_sess_failure_type;
+ char last_sess_fail_rem_name[224];
+} ____cacheline_aligned;
+
+/* iSCSI login failure types (sub oids) */
+#define ISCSI_LOGIN_FAIL_OTHER 2
+#define ISCSI_LOGIN_FAIL_REDIRECT 3
+#define ISCSI_LOGIN_FAIL_AUTHORIZE 4
+#define ISCSI_LOGIN_FAIL_AUTHENTICATE 5
+#define ISCSI_LOGIN_FAIL_NEGOTIATE 6
+
+/* iSCSI login stats */
+struct iscsi_login_stats {
+ spinlock_t lock;
+ u32 accepts;
+ u32 other_fails;
+ u32 redirects;
+ u32 authorize_fails;
+ u32 authenticate_fails;
+ u32 negotiate_fails; /* used for notifications */
+ u64 last_fail_time; /* time stamp (jiffies) */
+ u32 last_fail_type;
+ int last_intr_fail_ip_family;
+ unsigned char last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE];
+ char last_intr_fail_name[224];
+} ____cacheline_aligned;
+
+/* iSCSI logout stats */
+struct iscsi_logout_stats {
+ spinlock_t lock;
+ u32 normal_logouts;
+ u32 abnormal_logouts;
+} ____cacheline_aligned;
+
+#endif /*** ISCSI_TARGET_STAT_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
new file mode 100644
index 0000000..db1fe1e
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -0,0 +1,849 @@
+/*******************************************************************************
+ * This file contains the iSCSI Target specific Task Management functions.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <asm/unaligned.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_tmr.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+
+u8 iscsit_tmr_abort_task(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ struct iscsi_cmd *ref_cmd;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
+ struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
+ struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
+
+ ref_cmd = iscsit_find_cmd_from_itt(conn, hdr->rtt);
+ if (!ref_cmd) {
+ pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
+ " %hu.\n", hdr->rtt, conn->cid);
+ return ((hdr->refcmdsn >= conn->sess->exp_cmd_sn) &&
+ (hdr->refcmdsn <= conn->sess->max_cmd_sn)) ?
+ ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
+ }
+ if (ref_cmd->cmd_sn != hdr->refcmdsn) {
+ pr_err("RefCmdSN 0x%08x does not equal"
+ " task's CmdSN 0x%08x. Rejecting ABORT_TASK.\n",
+ hdr->refcmdsn, ref_cmd->cmd_sn);
+ return ISCSI_TMF_RSP_REJECTED;
+ }
+
+ se_tmr->ref_task_tag = hdr->rtt;
+ se_tmr->ref_cmd = &ref_cmd->se_cmd;
+ tmr_req->ref_cmd_sn = hdr->refcmdsn;
+ tmr_req->exp_data_sn = hdr->exp_datasn;
+
+ return ISCSI_TMF_RSP_COMPLETE;
+}
+
+/*
+ * Called from iscsit_handle_task_mgt_cmd().
+ */
+int iscsit_tmr_task_warm_reset(
+ struct iscsi_conn *conn,
+ struct iscsi_tmr_req *tmr_req,
+ unsigned char *buf)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+#if 0
+ struct iscsi_init_task_mgt_cmnd *hdr =
+ (struct iscsi_init_task_mgt_cmnd *) buf;
+#endif
+ if (!na->tmr_warm_reset) {
+ pr_err("TMR Opcode TARGET_WARM_RESET authorization"
+ " failed for Initiator Node: %s\n",
+ sess->se_sess->se_node_acl->initiatorname);
+ return -1;
+ }
+ /*
+ * Do the real work in transport_generic_do_tmr().
+ */
+ return 0;
+}
+
+int iscsit_tmr_task_cold_reset(
+ struct iscsi_conn *conn,
+ struct iscsi_tmr_req *tmr_req,
+ unsigned char *buf)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+ if (!na->tmr_cold_reset) {
+ pr_err("TMR Opcode TARGET_COLD_RESET authorization"
+ " failed for Initiator Node: %s\n",
+ sess->se_sess->se_node_acl->initiatorname);
+ return -1;
+ }
+ /*
+ * Do the real work in transport_generic_do_tmr().
+ */
+ return 0;
+}
+
+u8 iscsit_tmr_task_reassign(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ struct iscsi_cmd *ref_cmd = NULL;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_conn_recovery *cr = NULL;
+ struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
+ struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
+ struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
+ int ret;
+
+ pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x,"
+ " RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n",
+ hdr->itt, hdr->rtt, hdr->exp_datasn, conn->cid);
+
+ if (conn->sess->sess_ops->ErrorRecoveryLevel != 2) {
+ pr_err("TMR TASK_REASSIGN not supported in ERL<2,"
+ " ignoring request.\n");
+ return ISCSI_TMF_RSP_NOT_SUPPORTED;
+ }
+
+ ret = iscsit_find_cmd_for_recovery(conn->sess, &ref_cmd, &cr, hdr->rtt);
+ if (ret == -2) {
+ pr_err("Command ITT: 0x%08x is still alligent to CID:"
+ " %hu\n", ref_cmd->init_task_tag, cr->cid);
+ return ISCSI_TMF_RSP_TASK_ALLEGIANT;
+ } else if (ret == -1) {
+ pr_err("Unable to locate RefTaskTag: 0x%08x in"
+ " connection recovery command list.\n", hdr->rtt);
+ return ISCSI_TMF_RSP_NO_TASK;
+ }
+ /*
+ * Temporary check to prevent connection recovery for
+ * connections with a differing MaxRecvDataSegmentLength.
+ */
+ if (cr->maxrecvdatasegmentlength !=
+ conn->conn_ops->MaxRecvDataSegmentLength) {
+ pr_err("Unable to perform connection recovery for"
+ " differing MaxRecvDataSegmentLength, rejecting"
+ " TMR TASK_REASSIGN.\n");
+ return ISCSI_TMF_RSP_REJECTED;
+ }
+
+ se_tmr->ref_task_tag = hdr->rtt;
+ se_tmr->ref_cmd = &ref_cmd->se_cmd;
+ se_tmr->ref_task_lun = get_unaligned_le64(&hdr->lun);
+ tmr_req->ref_cmd_sn = hdr->refcmdsn;
+ tmr_req->exp_data_sn = hdr->exp_datasn;
+ tmr_req->conn_recovery = cr;
+ tmr_req->task_reassign = 1;
+ /*
+ * Command can now be reassigned to a new connection.
+ * The task management response must be sent before the
+ * reassignment actually happens. See iscsi_tmr_post_handler().
+ */
+ return ISCSI_TMF_RSP_COMPLETE;
+}
+
+static void iscsit_task_reassign_remove_cmd(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn_recovery *cr,
+ struct iscsi_session *sess)
+{
+ int ret;
+
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ ret = iscsit_remove_cmd_from_connection_recovery(cmd, sess);
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ if (!ret) {
+ pr_debug("iSCSI connection recovery successful for CID:"
+ " %hu on SID: %u\n", cr->cid, sess->sid);
+ iscsit_remove_active_connection_recovery_entry(cr, sess);
+ }
+}
+
+static int iscsit_task_reassign_complete_nop_out(
+ struct iscsi_tmr_req *tmr_req,
+ struct iscsi_conn *conn)
+{
+ struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
+ struct se_cmd *se_cmd = se_tmr->ref_cmd;
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_conn_recovery *cr;
+
+ if (!cmd->cr) {
+ pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
+ " is NULL!\n", cmd->init_task_tag);
+ return -1;
+ }
+ cr = cmd->cr;
+
+ /*
+ * Reset the StatSN so a new one for this commands new connection
+ * will be assigned.
+ * Reset the ExpStatSN as well so we may receive Status SNACKs.
+ */
+ cmd->stat_sn = cmd->exp_stat_sn = 0;
+
+ iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ cmd->i_state = ISTATE_SEND_NOPIN;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+}
+
+static int iscsit_task_reassign_complete_write(
+ struct iscsi_cmd *cmd,
+ struct iscsi_tmr_req *tmr_req)
+{
+ int no_build_r2ts = 0;
+ u32 length = 0, offset = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ /*
+ * The Initiator must not send a R2T SNACK with a Begrun less than
+ * the TMR TASK_REASSIGN's ExpDataSN.
+ */
+ if (!tmr_req->exp_data_sn) {
+ cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK;
+ cmd->acked_data_sn = 0;
+ } else {
+ cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
+ cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
+ }
+
+ /*
+ * The TMR TASK_REASSIGN's ExpDataSN contains the next R2TSN the
+ * Initiator is expecting. The Target controls all WRITE operations
+ * so if we have received all DataOUT we can safety ignore Initiator.
+ */
+ if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
+ if (!atomic_read(&cmd->transport_sent)) {
+ pr_debug("WRITE ITT: 0x%08x: t_state: %d"
+ " never sent to transport\n",
+ cmd->init_task_tag, cmd->se_cmd.t_state);
+ return transport_generic_handle_data(se_cmd);
+ }
+
+ cmd->i_state = ISTATE_SEND_STATUS;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+ }
+
+ /*
+ * Special case to deal with DataSequenceInOrder=No and Non-Immeidate
+ * Unsolicited DataOut.
+ */
+ if (cmd->unsolicited_data) {
+ cmd->unsolicited_data = 0;
+
+ offset = cmd->next_burst_len = cmd->write_data_done;
+
+ if ((conn->sess->sess_ops->FirstBurstLength - offset) >=
+ cmd->data_length) {
+ no_build_r2ts = 1;
+ length = (cmd->data_length - offset);
+ } else
+ length = (conn->sess->sess_ops->FirstBurstLength - offset);
+
+ spin_lock_bh(&cmd->r2t_lock);
+ if (iscsit_add_r2t_to_list(cmd, offset, length, 0, 0) < 0) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+ cmd->outstanding_r2ts++;
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ if (no_build_r2ts)
+ return 0;
+ }
+ /*
+ * iscsit_build_r2ts_for_cmd() can handle the rest from here.
+ */
+ return iscsit_build_r2ts_for_cmd(cmd, conn, 2);
+}
+
+static int iscsit_task_reassign_complete_read(
+ struct iscsi_cmd *cmd,
+ struct iscsi_tmr_req *tmr_req)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_datain_req *dr;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ /*
+ * The Initiator must not send a Data SNACK with a BegRun less than
+ * the TMR TASK_REASSIGN's ExpDataSN.
+ */
+ if (!tmr_req->exp_data_sn) {
+ cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK;
+ cmd->acked_data_sn = 0;
+ } else {
+ cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
+ cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
+ }
+
+ if (!atomic_read(&cmd->transport_sent)) {
+ pr_debug("READ ITT: 0x%08x: t_state: %d never sent to"
+ " transport\n", cmd->init_task_tag,
+ cmd->se_cmd.t_state);
+ transport_generic_handle_cdb(se_cmd);
+ return 0;
+ }
+
+ if (!atomic_read(&se_cmd->t_transport_complete)) {
+ pr_err("READ ITT: 0x%08x: t_state: %d, never returned"
+ " from transport\n", cmd->init_task_tag,
+ cmd->se_cmd.t_state);
+ return -1;
+ }
+
+ dr = iscsit_allocate_datain_req();
+ if (!dr)
+ return -1;
+ /*
+ * The TMR TASK_REASSIGN's ExpDataSN contains the next DataSN the
+ * Initiator is expecting.
+ */
+ dr->data_sn = dr->begrun = tmr_req->exp_data_sn;
+ dr->runlength = 0;
+ dr->generate_recovery_values = 1;
+ dr->recovery = DATAIN_CONNECTION_RECOVERY;
+
+ iscsit_attach_datain_req(cmd, dr);
+
+ cmd->i_state = ISTATE_SEND_DATAIN;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+}
+
+static int iscsit_task_reassign_complete_none(
+ struct iscsi_cmd *cmd,
+ struct iscsi_tmr_req *tmr_req)
+{
+ struct iscsi_conn *conn = cmd->conn;
+
+ cmd->i_state = ISTATE_SEND_STATUS;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+}
+
+static int iscsit_task_reassign_complete_scsi_cmnd(
+ struct iscsi_tmr_req *tmr_req,
+ struct iscsi_conn *conn)
+{
+ struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
+ struct se_cmd *se_cmd = se_tmr->ref_cmd;
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_conn_recovery *cr;
+
+ if (!cmd->cr) {
+ pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
+ " is NULL!\n", cmd->init_task_tag);
+ return -1;
+ }
+ cr = cmd->cr;
+
+ /*
+ * Reset the StatSN so a new one for this commands new connection
+ * will be assigned.
+ * Reset the ExpStatSN as well so we may receive Status SNACKs.
+ */
+ cmd->stat_sn = cmd->exp_stat_sn = 0;
+
+ iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+ cmd->i_state = ISTATE_SEND_STATUS;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+ }
+
+ switch (cmd->data_direction) {
+ case DMA_TO_DEVICE:
+ return iscsit_task_reassign_complete_write(cmd, tmr_req);
+ case DMA_FROM_DEVICE:
+ return iscsit_task_reassign_complete_read(cmd, tmr_req);
+ case DMA_NONE:
+ return iscsit_task_reassign_complete_none(cmd, tmr_req);
+ default:
+ pr_err("Unknown cmd->data_direction: 0x%02x\n",
+ cmd->data_direction);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int iscsit_task_reassign_complete(
+ struct iscsi_tmr_req *tmr_req,
+ struct iscsi_conn *conn)
+{
+ struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
+ struct se_cmd *se_cmd;
+ struct iscsi_cmd *cmd;
+ int ret = 0;
+
+ if (!se_tmr->ref_cmd) {
+ pr_err("TMR Request is missing a RefCmd struct iscsi_cmd.\n");
+ return -1;
+ }
+ se_cmd = se_tmr->ref_cmd;
+ cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ cmd->conn = conn;
+
+ switch (cmd->iscsi_opcode) {
+ case ISCSI_OP_NOOP_OUT:
+ ret = iscsit_task_reassign_complete_nop_out(tmr_req, conn);
+ break;
+ case ISCSI_OP_SCSI_CMD:
+ ret = iscsit_task_reassign_complete_scsi_cmnd(tmr_req, conn);
+ break;
+ default:
+ pr_err("Illegal iSCSI Opcode 0x%02x during"
+ " command realligence\n", cmd->iscsi_opcode);
+ return -1;
+ }
+
+ if (ret != 0)
+ return ret;
+
+ pr_debug("Completed connection realligence for Opcode: 0x%02x,"
+ " ITT: 0x%08x to CID: %hu.\n", cmd->iscsi_opcode,
+ cmd->init_task_tag, conn->cid);
+
+ return 0;
+}
+
+/*
+ * Handles special after-the-fact actions related to TMRs.
+ * Right now the only one that its really needed for is
+ * connection recovery releated TASK_REASSIGN.
+ */
+extern int iscsit_tmr_post_handler(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+ struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
+ struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
+
+ if (tmr_req->task_reassign &&
+ (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
+ return iscsit_task_reassign_complete(tmr_req, conn);
+
+ return 0;
+}
+
+/*
+ * Nothing to do here, but leave it for good measure. :-)
+ */
+int iscsit_task_reassign_prepare_read(
+ struct iscsi_tmr_req *tmr_req,
+ struct iscsi_conn *conn)
+{
+ return 0;
+}
+
+static void iscsit_task_reassign_prepare_unsolicited_dataout(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ int i, j;
+ struct iscsi_pdu *pdu = NULL;
+ struct iscsi_seq *seq = NULL;
+
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ cmd->data_sn = 0;
+
+ if (cmd->immediate_data)
+ cmd->r2t_offset += (cmd->first_burst_len -
+ cmd->seq_start_offset);
+
+ if (conn->sess->sess_ops->DataPDUInOrder) {
+ cmd->write_data_done -= (cmd->immediate_data) ?
+ (cmd->first_burst_len -
+ cmd->seq_start_offset) :
+ cmd->first_burst_len;
+ cmd->first_burst_len = 0;
+ return;
+ }
+
+ for (i = 0; i < cmd->pdu_count; i++) {
+ pdu = &cmd->pdu_list[i];
+
+ if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+ continue;
+
+ if ((pdu->offset >= cmd->seq_start_offset) &&
+ ((pdu->offset + pdu->length) <=
+ cmd->seq_end_offset)) {
+ cmd->first_burst_len -= pdu->length;
+ cmd->write_data_done -= pdu->length;
+ pdu->status = ISCSI_PDU_NOT_RECEIVED;
+ }
+ }
+ } else {
+ for (i = 0; i < cmd->seq_count; i++) {
+ seq = &cmd->seq_list[i];
+
+ if (seq->type != SEQTYPE_UNSOLICITED)
+ continue;
+
+ cmd->write_data_done -=
+ (seq->offset - seq->orig_offset);
+ cmd->first_burst_len = 0;
+ seq->data_sn = 0;
+ seq->offset = seq->orig_offset;
+ seq->next_burst_len = 0;
+ seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
+
+ if (conn->sess->sess_ops->DataPDUInOrder)
+ continue;
+
+ for (j = 0; j < seq->pdu_count; j++) {
+ pdu = &cmd->pdu_list[j+seq->pdu_start];
+
+ if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+ continue;
+
+ pdu->status = ISCSI_PDU_NOT_RECEIVED;
+ }
+ }
+ }
+}
+
+int iscsit_task_reassign_prepare_write(
+ struct iscsi_tmr_req *tmr_req,
+ struct iscsi_conn *conn)
+{
+ struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
+ struct se_cmd *se_cmd = se_tmr->ref_cmd;
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_pdu *pdu = NULL;
+ struct iscsi_r2t *r2t = NULL, *r2t_tmp;
+ int first_incomplete_r2t = 1, i = 0;
+
+ /*
+ * The command was in the process of receiving Unsolicited DataOUT when
+ * the connection failed.
+ */
+ if (cmd->unsolicited_data)
+ iscsit_task_reassign_prepare_unsolicited_dataout(cmd, conn);
+
+ /*
+ * The Initiator is requesting R2Ts starting from zero, skip
+ * checking acknowledged R2Ts and start checking struct iscsi_r2ts
+ * greater than zero.
+ */
+ if (!tmr_req->exp_data_sn)
+ goto drop_unacknowledged_r2ts;
+
+ /*
+ * We now check that the PDUs in DataOUT sequences below
+ * the TMR TASK_REASSIGN ExpDataSN (R2TSN the Initiator is
+ * expecting next) have all the DataOUT they require to complete
+ * the DataOUT sequence. First scan from R2TSN 0 to TMR
+ * TASK_REASSIGN ExpDataSN-1.
+ *
+ * If we have not received all DataOUT in question, we must
+ * make sure to make the appropriate changes to values in
+ * struct iscsi_cmd (and elsewhere depending on session parameters)
+ * so iscsit_build_r2ts_for_cmd() in iscsit_task_reassign_complete_write()
+ * will resend a new R2T for the DataOUT sequences in question.
+ */
+ spin_lock_bh(&cmd->r2t_lock);
+ if (list_empty(&cmd->cmd_r2t_list)) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+
+ list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+
+ if (r2t->r2t_sn >= tmr_req->exp_data_sn)
+ continue;
+ /*
+ * Safely ignore Recovery R2Ts and R2Ts that have completed
+ * DataOUT sequences.
+ */
+ if (r2t->seq_complete)
+ continue;
+
+ if (r2t->recovery_r2t)
+ continue;
+
+ /*
+ * DataSequenceInOrder=Yes:
+ *
+ * Taking into account the iSCSI implementation requirement of
+ * MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and
+ * DataSequenceInOrder=Yes, we must take into consideration
+ * the following:
+ *
+ * DataSequenceInOrder=No:
+ *
+ * Taking into account that the Initiator controls the (possibly
+ * random) PDU Order in (possibly random) Sequence Order of
+ * DataOUT the target requests with R2Ts, we must take into
+ * consideration the following:
+ *
+ * DataPDUInOrder=Yes for DataSequenceInOrder=[Yes,No]:
+ *
+ * While processing non-complete R2T DataOUT sequence requests
+ * the Target will re-request only the total sequence length
+ * minus current received offset. This is because we must
+ * assume the initiator will continue sending DataOUT from the
+ * last PDU before the connection failed.
+ *
+ * DataPDUInOrder=No for DataSequenceInOrder=[Yes,No]:
+ *
+ * While processing non-complete R2T DataOUT sequence requests
+ * the Target will re-request the entire DataOUT sequence if
+ * any single PDU is missing from the sequence. This is because
+ * we have no logical method to determine the next PDU offset,
+ * and we must assume the Initiator will be sending any random
+ * PDU offset in the current sequence after TASK_REASSIGN
+ * has completed.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if (!first_incomplete_r2t) {
+ cmd->r2t_offset -= r2t->xfer_len;
+ goto next;
+ }
+
+ if (conn->sess->sess_ops->DataPDUInOrder) {
+ cmd->data_sn = 0;
+ cmd->r2t_offset -= (r2t->xfer_len -
+ cmd->next_burst_len);
+ first_incomplete_r2t = 0;
+ goto next;
+ }
+
+ cmd->data_sn = 0;
+ cmd->r2t_offset -= r2t->xfer_len;
+
+ for (i = 0; i < cmd->pdu_count; i++) {
+ pdu = &cmd->pdu_list[i];
+
+ if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+ continue;
+
+ if ((pdu->offset >= r2t->offset) &&
+ (pdu->offset < (r2t->offset +
+ r2t->xfer_len))) {
+ cmd->next_burst_len -= pdu->length;
+ cmd->write_data_done -= pdu->length;
+ pdu->status = ISCSI_PDU_NOT_RECEIVED;
+ }
+ }
+
+ first_incomplete_r2t = 0;
+ } else {
+ struct iscsi_seq *seq;
+
+ seq = iscsit_get_seq_holder(cmd, r2t->offset,
+ r2t->xfer_len);
+ if (!seq) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+
+ cmd->write_data_done -=
+ (seq->offset - seq->orig_offset);
+ seq->data_sn = 0;
+ seq->offset = seq->orig_offset;
+ seq->next_burst_len = 0;
+ seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
+
+ cmd->seq_send_order--;
+
+ if (conn->sess->sess_ops->DataPDUInOrder)
+ goto next;
+
+ for (i = 0; i < seq->pdu_count; i++) {
+ pdu = &cmd->pdu_list[i+seq->pdu_start];
+
+ if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+ continue;
+
+ pdu->status = ISCSI_PDU_NOT_RECEIVED;
+ }
+ }
+
+next:
+ cmd->outstanding_r2ts--;
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ /*
+ * We now drop all unacknowledged R2Ts, ie: ExpDataSN from TMR
+ * TASK_REASSIGN to the last R2T in the list.. We are also careful
+ * to check that the Initiator is not requesting R2Ts for DataOUT
+ * sequences it has already completed.
+ *
+ * Free each R2T in question and adjust values in struct iscsi_cmd
+ * accordingly so iscsit_build_r2ts_for_cmd() do the rest of
+ * the work after the TMR TASK_REASSIGN Response is sent.
+ */
+drop_unacknowledged_r2ts:
+
+ cmd->cmd_flags &= ~ICF_SENT_LAST_R2T;
+ cmd->r2t_sn = tmr_req->exp_data_sn;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list) {
+ /*
+ * Skip up to the R2T Sequence number provided by the
+ * iSCSI TASK_REASSIGN TMR
+ */
+ if (r2t->r2t_sn < tmr_req->exp_data_sn)
+ continue;
+
+ if (r2t->seq_complete) {
+ pr_err("Initiator is requesting R2Ts from"
+ " R2TSN: 0x%08x, but R2TSN: 0x%08x, Offset: %u,"
+ " Length: %u is already complete."
+ " BAD INITIATOR ERL=2 IMPLEMENTATION!\n",
+ tmr_req->exp_data_sn, r2t->r2t_sn,
+ r2t->offset, r2t->xfer_len);
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+
+ if (r2t->recovery_r2t) {
+ iscsit_free_r2t(r2t, cmd);
+ continue;
+ }
+
+ /* DataSequenceInOrder=Yes:
+ *
+ * Taking into account the iSCSI implementation requirement of
+ * MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and
+ * DataSequenceInOrder=Yes, it's safe to subtract the R2Ts
+ * entire transfer length from the commands R2T offset marker.
+ *
+ * DataSequenceInOrder=No:
+ *
+ * We subtract the difference from struct iscsi_seq between the
+ * current offset and original offset from cmd->write_data_done
+ * for account for DataOUT PDUs already received. Then reset
+ * the current offset to the original and zero out the current
+ * burst length, to make sure we re-request the entire DataOUT
+ * sequence.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder)
+ cmd->r2t_offset -= r2t->xfer_len;
+ else
+ cmd->seq_send_order--;
+
+ cmd->outstanding_r2ts--;
+ iscsit_free_r2t(r2t, cmd);
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ return 0;
+}
+
+/*
+ * Performs sanity checks TMR TASK_REASSIGN's ExpDataSN for
+ * a given struct iscsi_cmd.
+ */
+int iscsit_check_task_reassign_expdatasn(
+ struct iscsi_tmr_req *tmr_req,
+ struct iscsi_conn *conn)
+{
+ struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
+ struct se_cmd *se_cmd = se_tmr->ref_cmd;
+ struct iscsi_cmd *ref_cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD)
+ return 0;
+
+ if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
+ return 0;
+
+ if (ref_cmd->data_direction == DMA_NONE)
+ return 0;
+
+ /*
+ * For READs the TMR TASK_REASSIGNs ExpDataSN contains the next DataSN
+ * of DataIN the Initiator is expecting.
+ *
+ * Also check that the Initiator is not re-requesting DataIN that has
+ * already been acknowledged with a DataAck SNACK.
+ */
+ if (ref_cmd->data_direction == DMA_FROM_DEVICE) {
+ if (tmr_req->exp_data_sn > ref_cmd->data_sn) {
+ pr_err("Received ExpDataSN: 0x%08x for READ"
+ " in TMR TASK_REASSIGN greater than command's"
+ " DataSN: 0x%08x.\n", tmr_req->exp_data_sn,
+ ref_cmd->data_sn);
+ return -1;
+ }
+ if ((ref_cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
+ (tmr_req->exp_data_sn <= ref_cmd->acked_data_sn)) {
+ pr_err("Received ExpDataSN: 0x%08x for READ"
+ " in TMR TASK_REASSIGN for previously"
+ " acknowledged DataIN: 0x%08x,"
+ " protocol error\n", tmr_req->exp_data_sn,
+ ref_cmd->acked_data_sn);
+ return -1;
+ }
+ return iscsit_task_reassign_prepare_read(tmr_req, conn);
+ }
+
+ /*
+ * For WRITEs the TMR TASK_REASSIGNs ExpDataSN contains the next R2TSN
+ * for R2Ts the Initiator is expecting.
+ *
+ * Do the magic in iscsit_task_reassign_prepare_write().
+ */
+ if (ref_cmd->data_direction == DMA_TO_DEVICE) {
+ if (tmr_req->exp_data_sn > ref_cmd->r2t_sn) {
+ pr_err("Received ExpDataSN: 0x%08x for WRITE"
+ " in TMR TASK_REASSIGN greater than command's"
+ " R2TSN: 0x%08x.\n", tmr_req->exp_data_sn,
+ ref_cmd->r2t_sn);
+ return -1;
+ }
+ return iscsit_task_reassign_prepare_write(tmr_req, conn);
+ }
+
+ pr_err("Unknown iSCSI data_direction: 0x%02x\n",
+ ref_cmd->data_direction);
+
+ return -1;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tmr.h b/drivers/target/iscsi/iscsi_target_tmr.h
new file mode 100644
index 0000000..142e992
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tmr.h
@@ -0,0 +1,14 @@
+#ifndef ISCSI_TARGET_TMR_H
+#define ISCSI_TARGET_TMR_H
+
+extern u8 iscsit_tmr_abort_task(struct iscsi_cmd *, unsigned char *);
+extern int iscsit_tmr_task_warm_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
+ unsigned char *);
+extern int iscsit_tmr_task_cold_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
+ unsigned char *);
+extern u8 iscsit_tmr_task_reassign(struct iscsi_cmd *, unsigned char *);
+extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_check_task_reassign_expdatasn(struct iscsi_tmr_req *,
+ struct iscsi_conn *);
+
+#endif /* ISCSI_TARGET_TMR_H */
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
new file mode 100644
index 0000000..d4cf2cd
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -0,0 +1,759 @@
+/*******************************************************************************
+ * This file contains iSCSI Target Portal Group related functions.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+#include <target/target_core_tpg.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_login.h"
+#include "iscsi_target_nodeattrib.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_parameters.h"
+
+struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u16 tpgt)
+{
+ struct iscsi_portal_group *tpg;
+
+ tpg = kzalloc(sizeof(struct iscsi_portal_group), GFP_KERNEL);
+ if (!tpg) {
+ pr_err("Unable to allocate struct iscsi_portal_group\n");
+ return NULL;
+ }
+
+ tpg->tpgt = tpgt;
+ tpg->tpg_state = TPG_STATE_FREE;
+ tpg->tpg_tiqn = tiqn;
+ INIT_LIST_HEAD(&tpg->tpg_gnp_list);
+ INIT_LIST_HEAD(&tpg->tpg_list);
+ mutex_init(&tpg->tpg_access_lock);
+ mutex_init(&tpg->np_login_lock);
+ spin_lock_init(&tpg->tpg_state_lock);
+ spin_lock_init(&tpg->tpg_np_lock);
+
+ return tpg;
+}
+
+static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *);
+
+int iscsit_load_discovery_tpg(void)
+{
+ struct iscsi_param *param;
+ struct iscsi_portal_group *tpg;
+ int ret;
+
+ tpg = iscsit_alloc_portal_group(NULL, 1);
+ if (!tpg) {
+ pr_err("Unable to allocate struct iscsi_portal_group\n");
+ return -1;
+ }
+
+ ret = core_tpg_register(
+ &lio_target_fabric_configfs->tf_ops,
+ NULL, &tpg->tpg_se_tpg, (void *)tpg,
+ TRANSPORT_TPG_TYPE_DISCOVERY);
+ if (ret < 0) {
+ kfree(tpg);
+ return -1;
+ }
+
+ tpg->sid = 1; /* First Assigned LIO Session ID */
+ iscsit_set_default_tpg_attribs(tpg);
+
+ if (iscsi_create_default_params(&tpg->param_list) < 0)
+ goto out;
+ /*
+ * By default we disable authentication for discovery sessions,
+ * this can be changed with:
+ *
+ * /sys/kernel/config/target/iscsi/discovery_auth/enforce_discovery_auth
+ */
+ param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
+ if (!param)
+ goto out;
+
+ if (iscsi_update_param_value(param, "CHAP,None") < 0)
+ goto out;
+
+ tpg->tpg_attrib.authentication = 0;
+
+ spin_lock(&tpg->tpg_state_lock);
+ tpg->tpg_state = TPG_STATE_ACTIVE;
+ spin_unlock(&tpg->tpg_state_lock);
+
+ iscsit_global->discovery_tpg = tpg;
+ pr_debug("CORE[0] - Allocated Discovery TPG\n");
+
+ return 0;
+out:
+ if (tpg->sid == 1)
+ core_tpg_deregister(&tpg->tpg_se_tpg);
+ kfree(tpg);
+ return -1;
+}
+
+void iscsit_release_discovery_tpg(void)
+{
+ struct iscsi_portal_group *tpg = iscsit_global->discovery_tpg;
+
+ if (!tpg)
+ return;
+
+ core_tpg_deregister(&tpg->tpg_se_tpg);
+
+ kfree(tpg);
+ iscsit_global->discovery_tpg = NULL;
+}
+
+struct iscsi_portal_group *iscsit_get_tpg_from_np(
+ struct iscsi_tiqn *tiqn,
+ struct iscsi_np *np)
+{
+ struct iscsi_portal_group *tpg = NULL;
+ struct iscsi_tpg_np *tpg_np;
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
+
+ spin_lock(&tpg->tpg_state_lock);
+ if (tpg->tpg_state == TPG_STATE_FREE) {
+ spin_unlock(&tpg->tpg_state_lock);
+ continue;
+ }
+ spin_unlock(&tpg->tpg_state_lock);
+
+ spin_lock(&tpg->tpg_np_lock);
+ list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
+ if (tpg_np->tpg_np == np) {
+ spin_unlock(&tpg->tpg_np_lock);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+ return tpg;
+ }
+ }
+ spin_unlock(&tpg->tpg_np_lock);
+ }
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+
+ return NULL;
+}
+
+int iscsit_get_tpg(
+ struct iscsi_portal_group *tpg)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(&tpg->tpg_access_lock);
+ return ((ret != 0) || signal_pending(current)) ? -1 : 0;
+}
+
+void iscsit_put_tpg(struct iscsi_portal_group *tpg)
+{
+ mutex_unlock(&tpg->tpg_access_lock);
+}
+
+static void iscsit_clear_tpg_np_login_thread(
+ struct iscsi_tpg_np *tpg_np,
+ struct iscsi_portal_group *tpg)
+{
+ if (!tpg_np->tpg_np) {
+ pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
+ return;
+ }
+
+ iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg);
+}
+
+void iscsit_clear_tpg_np_login_threads(
+ struct iscsi_portal_group *tpg)
+{
+ struct iscsi_tpg_np *tpg_np;
+
+ spin_lock(&tpg->tpg_np_lock);
+ list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
+ if (!tpg_np->tpg_np) {
+ pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
+ continue;
+ }
+ spin_unlock(&tpg->tpg_np_lock);
+ iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
+ spin_lock(&tpg->tpg_np_lock);
+ }
+ spin_unlock(&tpg->tpg_np_lock);
+}
+
+void iscsit_tpg_dump_params(struct iscsi_portal_group *tpg)
+{
+ iscsi_print_params(tpg->param_list);
+}
+
+static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ a->authentication = TA_AUTHENTICATION;
+ a->login_timeout = TA_LOGIN_TIMEOUT;
+ a->netif_timeout = TA_NETIF_TIMEOUT;
+ a->default_cmdsn_depth = TA_DEFAULT_CMDSN_DEPTH;
+ a->generate_node_acls = TA_GENERATE_NODE_ACLS;
+ a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS;
+ a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT;
+ a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
+}
+
+int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
+{
+ if (tpg->tpg_state != TPG_STATE_FREE) {
+ pr_err("Unable to add iSCSI Target Portal Group: %d"
+ " while not in TPG_STATE_FREE state.\n", tpg->tpgt);
+ return -EEXIST;
+ }
+ iscsit_set_default_tpg_attribs(tpg);
+
+ if (iscsi_create_default_params(&tpg->param_list) < 0)
+ goto err_out;
+
+ ISCSI_TPG_ATTRIB(tpg)->tpg = tpg;
+
+ spin_lock(&tpg->tpg_state_lock);
+ tpg->tpg_state = TPG_STATE_INACTIVE;
+ spin_unlock(&tpg->tpg_state_lock);
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ list_add_tail(&tpg->tpg_list, &tiqn->tiqn_tpg_list);
+ tiqn->tiqn_ntpgs++;
+ pr_debug("CORE[%s]_TPG[%hu] - Added iSCSI Target Portal Group\n",
+ tiqn->tiqn, tpg->tpgt);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+
+ return 0;
+err_out:
+ if (tpg->param_list) {
+ iscsi_release_param_list(tpg->param_list);
+ tpg->param_list = NULL;
+ }
+ kfree(tpg);
+ return -ENOMEM;
+}
+
+int iscsit_tpg_del_portal_group(
+ struct iscsi_tiqn *tiqn,
+ struct iscsi_portal_group *tpg,
+ int force)
+{
+ u8 old_state = tpg->tpg_state;
+
+ spin_lock(&tpg->tpg_state_lock);
+ tpg->tpg_state = TPG_STATE_INACTIVE;
+ spin_unlock(&tpg->tpg_state_lock);
+
+ if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
+ pr_err("Unable to delete iSCSI Target Portal Group:"
+ " %hu while active sessions exist, and force=0\n",
+ tpg->tpgt);
+ tpg->tpg_state = old_state;
+ return -EPERM;
+ }
+
+ core_tpg_clear_object_luns(&tpg->tpg_se_tpg);
+
+ if (tpg->param_list) {
+ iscsi_release_param_list(tpg->param_list);
+ tpg->param_list = NULL;
+ }
+
+ core_tpg_deregister(&tpg->tpg_se_tpg);
+
+ spin_lock(&tpg->tpg_state_lock);
+ tpg->tpg_state = TPG_STATE_FREE;
+ spin_unlock(&tpg->tpg_state_lock);
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ tiqn->tiqn_ntpgs--;
+ list_del(&tpg->tpg_list);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+
+ pr_debug("CORE[%s]_TPG[%hu] - Deleted iSCSI Target Portal Group\n",
+ tiqn->tiqn, tpg->tpgt);
+
+ kfree(tpg);
+ return 0;
+}
+
+int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
+{
+ struct iscsi_param *param;
+ struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+ spin_lock(&tpg->tpg_state_lock);
+ if (tpg->tpg_state == TPG_STATE_ACTIVE) {
+ pr_err("iSCSI target portal group: %hu is already"
+ " active, ignoring request.\n", tpg->tpgt);
+ spin_unlock(&tpg->tpg_state_lock);
+ return -EINVAL;
+ }
+ /*
+ * Make sure that AuthMethod does not contain None as an option
+ * unless explictly disabled. Set the default to CHAP if authentication
+ * is enforced (as per default), and remove the NONE option.
+ */
+ param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
+ if (!param) {
+ spin_unlock(&tpg->tpg_state_lock);
+ return -ENOMEM;
+ }
+
+ if (ISCSI_TPG_ATTRIB(tpg)->authentication) {
+ if (!strcmp(param->value, NONE))
+ if (iscsi_update_param_value(param, CHAP) < 0) {
+ spin_unlock(&tpg->tpg_state_lock);
+ return -ENOMEM;
+ }
+ if (iscsit_ta_authentication(tpg, 1) < 0) {
+ spin_unlock(&tpg->tpg_state_lock);
+ return -ENOMEM;
+ }
+ }
+
+ tpg->tpg_state = TPG_STATE_ACTIVE;
+ spin_unlock(&tpg->tpg_state_lock);
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ tiqn->tiqn_active_tpgs++;
+ pr_debug("iSCSI_TPG[%hu] - Enabled iSCSI Target Portal Group\n",
+ tpg->tpgt);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+
+ return 0;
+}
+
+int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force)
+{
+ struct iscsi_tiqn *tiqn;
+ u8 old_state = tpg->tpg_state;
+
+ spin_lock(&tpg->tpg_state_lock);
+ if (tpg->tpg_state == TPG_STATE_INACTIVE) {
+ pr_err("iSCSI Target Portal Group: %hu is already"
+ " inactive, ignoring request.\n", tpg->tpgt);
+ spin_unlock(&tpg->tpg_state_lock);
+ return -EINVAL;
+ }
+ tpg->tpg_state = TPG_STATE_INACTIVE;
+ spin_unlock(&tpg->tpg_state_lock);
+
+ iscsit_clear_tpg_np_login_threads(tpg);
+
+ if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
+ spin_lock(&tpg->tpg_state_lock);
+ tpg->tpg_state = old_state;
+ spin_unlock(&tpg->tpg_state_lock);
+ pr_err("Unable to disable iSCSI Target Portal Group:"
+ " %hu while active sessions exist, and force=0\n",
+ tpg->tpgt);
+ return -EPERM;
+ }
+
+ tiqn = tpg->tpg_tiqn;
+ if (!tiqn || (tpg == iscsit_global->discovery_tpg))
+ return 0;
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ tiqn->tiqn_active_tpgs--;
+ pr_debug("iSCSI_TPG[%hu] - Disabled iSCSI Target Portal Group\n",
+ tpg->tpgt);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+
+ return 0;
+}
+
+struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(
+ struct iscsi_session *sess)
+{
+ struct se_session *se_sess = sess->se_sess;
+ struct se_node_acl *se_nacl = se_sess->se_node_acl;
+ struct iscsi_node_acl *acl = container_of(se_nacl, struct iscsi_node_acl,
+ se_node_acl);
+
+ return &acl->node_attrib;
+}
+
+struct iscsi_tpg_np *iscsit_tpg_locate_child_np(
+ struct iscsi_tpg_np *tpg_np,
+ int network_transport)
+{
+ struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp;
+
+ spin_lock(&tpg_np->tpg_np_parent_lock);
+ list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp,
+ &tpg_np->tpg_np_parent_list, tpg_np_child_list) {
+ if (tpg_np_child->tpg_np->np_network_transport ==
+ network_transport) {
+ spin_unlock(&tpg_np->tpg_np_parent_lock);
+ return tpg_np_child;
+ }
+ }
+ spin_unlock(&tpg_np->tpg_np_parent_lock);
+
+ return NULL;
+}
+
+struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
+ struct iscsi_portal_group *tpg,
+ struct __kernel_sockaddr_storage *sockaddr,
+ char *ip_str,
+ struct iscsi_tpg_np *tpg_np_parent,
+ int network_transport)
+{
+ struct iscsi_np *np;
+ struct iscsi_tpg_np *tpg_np;
+
+ tpg_np = kzalloc(sizeof(struct iscsi_tpg_np), GFP_KERNEL);
+ if (!tpg_np) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_tpg_np.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ np = iscsit_add_np(sockaddr, ip_str, network_transport);
+ if (IS_ERR(np)) {
+ kfree(tpg_np);
+ return ERR_CAST(np);
+ }
+
+ INIT_LIST_HEAD(&tpg_np->tpg_np_list);
+ INIT_LIST_HEAD(&tpg_np->tpg_np_child_list);
+ INIT_LIST_HEAD(&tpg_np->tpg_np_parent_list);
+ spin_lock_init(&tpg_np->tpg_np_parent_lock);
+ tpg_np->tpg_np = np;
+ tpg_np->tpg = tpg;
+
+ spin_lock(&tpg->tpg_np_lock);
+ list_add_tail(&tpg_np->tpg_np_list, &tpg->tpg_gnp_list);
+ tpg->num_tpg_nps++;
+ if (tpg->tpg_tiqn)
+ tpg->tpg_tiqn->tiqn_num_tpg_nps++;
+ spin_unlock(&tpg->tpg_np_lock);
+
+ if (tpg_np_parent) {
+ tpg_np->tpg_np_parent = tpg_np_parent;
+ spin_lock(&tpg_np_parent->tpg_np_parent_lock);
+ list_add_tail(&tpg_np->tpg_np_child_list,
+ &tpg_np_parent->tpg_np_parent_list);
+ spin_unlock(&tpg_np_parent->tpg_np_parent_lock);
+ }
+
+ pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n",
+ tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
+ (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP");
+
+ return tpg_np;
+}
+
+static int iscsit_tpg_release_np(
+ struct iscsi_tpg_np *tpg_np,
+ struct iscsi_portal_group *tpg,
+ struct iscsi_np *np)
+{
+ iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
+
+ pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n",
+ tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
+ (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP");
+
+ tpg_np->tpg_np = NULL;
+ tpg_np->tpg = NULL;
+ kfree(tpg_np);
+ /*
+ * iscsit_del_np() will shutdown struct iscsi_np when last TPG reference is released.
+ */
+ return iscsit_del_np(np);
+}
+
+int iscsit_tpg_del_network_portal(
+ struct iscsi_portal_group *tpg,
+ struct iscsi_tpg_np *tpg_np)
+{
+ struct iscsi_np *np;
+ struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp;
+ int ret = 0;
+
+ np = tpg_np->tpg_np;
+ if (!np) {
+ pr_err("Unable to locate struct iscsi_np from"
+ " struct iscsi_tpg_np\n");
+ return -EINVAL;
+ }
+
+ if (!tpg_np->tpg_np_parent) {
+ /*
+ * We are the parent tpg network portal. Release all of the
+ * child tpg_np's (eg: the non ISCSI_TCP ones) on our parent
+ * list first.
+ */
+ list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp,
+ &tpg_np->tpg_np_parent_list,
+ tpg_np_child_list) {
+ ret = iscsit_tpg_del_network_portal(tpg, tpg_np_child);
+ if (ret < 0)
+ pr_err("iscsit_tpg_del_network_portal()"
+ " failed: %d\n", ret);
+ }
+ } else {
+ /*
+ * We are not the parent ISCSI_TCP tpg network portal. Release
+ * our own network portals from the child list.
+ */
+ spin_lock(&tpg_np->tpg_np_parent->tpg_np_parent_lock);
+ list_del(&tpg_np->tpg_np_child_list);
+ spin_unlock(&tpg_np->tpg_np_parent->tpg_np_parent_lock);
+ }
+
+ spin_lock(&tpg->tpg_np_lock);
+ list_del(&tpg_np->tpg_np_list);
+ tpg->num_tpg_nps--;
+ if (tpg->tpg_tiqn)
+ tpg->tpg_tiqn->tiqn_num_tpg_nps--;
+ spin_unlock(&tpg->tpg_np_lock);
+
+ return iscsit_tpg_release_np(tpg_np, tpg, np);
+}
+
+int iscsit_tpg_set_initiator_node_queue_depth(
+ struct iscsi_portal_group *tpg,
+ unsigned char *initiatorname,
+ u32 queue_depth,
+ int force)
+{
+ return core_tpg_set_initiator_node_queue_depth(&tpg->tpg_se_tpg,
+ initiatorname, queue_depth, force);
+}
+
+int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
+{
+ unsigned char buf1[256], buf2[256], *none = NULL;
+ int len;
+ struct iscsi_param *param;
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((authentication != 1) && (authentication != 0)) {
+ pr_err("Illegal value for authentication parameter:"
+ " %u, ignoring request.\n", authentication);
+ return -1;
+ }
+
+ memset(buf1, 0, sizeof(buf1));
+ memset(buf2, 0, sizeof(buf2));
+
+ param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
+ if (!param)
+ return -EINVAL;
+
+ if (authentication) {
+ snprintf(buf1, sizeof(buf1), "%s", param->value);
+ none = strstr(buf1, NONE);
+ if (!none)
+ goto out;
+ if (!strncmp(none + 4, ",", 1)) {
+ if (!strcmp(buf1, none))
+ sprintf(buf2, "%s", none+5);
+ else {
+ none--;
+ *none = '\0';
+ len = sprintf(buf2, "%s", buf1);
+ none += 5;
+ sprintf(buf2 + len, "%s", none);
+ }
+ } else {
+ none--;
+ *none = '\0';
+ sprintf(buf2, "%s", buf1);
+ }
+ if (iscsi_update_param_value(param, buf2) < 0)
+ return -EINVAL;
+ } else {
+ snprintf(buf1, sizeof(buf1), "%s", param->value);
+ none = strstr(buf1, NONE);
+ if ((none))
+ goto out;
+ strncat(buf1, ",", strlen(","));
+ strncat(buf1, NONE, strlen(NONE));
+ if (iscsi_update_param_value(param, buf1) < 0)
+ return -EINVAL;
+ }
+
+out:
+ a->authentication = authentication;
+ pr_debug("%s iSCSI Authentication Methods for TPG: %hu.\n",
+ a->authentication ? "Enforcing" : "Disabling", tpg->tpgt);
+
+ return 0;
+}
+
+int iscsit_ta_login_timeout(
+ struct iscsi_portal_group *tpg,
+ u32 login_timeout)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if (login_timeout > TA_LOGIN_TIMEOUT_MAX) {
+ pr_err("Requested Login Timeout %u larger than maximum"
+ " %u\n", login_timeout, TA_LOGIN_TIMEOUT_MAX);
+ return -EINVAL;
+ } else if (login_timeout < TA_LOGIN_TIMEOUT_MIN) {
+ pr_err("Requested Logout Timeout %u smaller than"
+ " minimum %u\n", login_timeout, TA_LOGIN_TIMEOUT_MIN);
+ return -EINVAL;
+ }
+
+ a->login_timeout = login_timeout;
+ pr_debug("Set Logout Timeout to %u for Target Portal Group"
+ " %hu\n", a->login_timeout, tpg->tpgt);
+
+ return 0;
+}
+
+int iscsit_ta_netif_timeout(
+ struct iscsi_portal_group *tpg,
+ u32 netif_timeout)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if (netif_timeout > TA_NETIF_TIMEOUT_MAX) {
+ pr_err("Requested Network Interface Timeout %u larger"
+ " than maximum %u\n", netif_timeout,
+ TA_NETIF_TIMEOUT_MAX);
+ return -EINVAL;
+ } else if (netif_timeout < TA_NETIF_TIMEOUT_MIN) {
+ pr_err("Requested Network Interface Timeout %u smaller"
+ " than minimum %u\n", netif_timeout,
+ TA_NETIF_TIMEOUT_MIN);
+ return -EINVAL;
+ }
+
+ a->netif_timeout = netif_timeout;
+ pr_debug("Set Network Interface Timeout to %u for"
+ " Target Portal Group %hu\n", a->netif_timeout, tpg->tpgt);
+
+ return 0;
+}
+
+int iscsit_ta_generate_node_acls(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->generate_node_acls = flag;
+ pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n",
+ tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled");
+
+ return 0;
+}
+
+int iscsit_ta_default_cmdsn_depth(
+ struct iscsi_portal_group *tpg,
+ u32 tcq_depth)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if (tcq_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
+ pr_err("Requested Default Queue Depth: %u larger"
+ " than maximum %u\n", tcq_depth,
+ TA_DEFAULT_CMDSN_DEPTH_MAX);
+ return -EINVAL;
+ } else if (tcq_depth < TA_DEFAULT_CMDSN_DEPTH_MIN) {
+ pr_err("Requested Default Queue Depth: %u smaller"
+ " than minimum %u\n", tcq_depth,
+ TA_DEFAULT_CMDSN_DEPTH_MIN);
+ return -EINVAL;
+ }
+
+ a->default_cmdsn_depth = tcq_depth;
+ pr_debug("iSCSI_TPG[%hu] - Set Default CmdSN TCQ Depth to %u\n",
+ tpg->tpgt, a->default_cmdsn_depth);
+
+ return 0;
+}
+
+int iscsit_ta_cache_dynamic_acls(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->cache_dynamic_acls = flag;
+ pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group"
+ " ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ?
+ "Enabled" : "Disabled");
+
+ return 0;
+}
+
+int iscsit_ta_demo_mode_write_protect(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->demo_mode_write_protect = flag;
+ pr_debug("iSCSI_TPG[%hu] - Demo Mode Write Protect bit: %s\n",
+ tpg->tpgt, (a->demo_mode_write_protect) ? "ON" : "OFF");
+
+ return 0;
+}
+
+int iscsit_ta_prod_mode_write_protect(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->prod_mode_write_protect = flag;
+ pr_debug("iSCSI_TPG[%hu] - Production Mode Write Protect bit:"
+ " %s\n", tpg->tpgt, (a->prod_mode_write_protect) ?
+ "ON" : "OFF");
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
new file mode 100644
index 0000000..dda48c1
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -0,0 +1,41 @@
+#ifndef ISCSI_TARGET_TPG_H
+#define ISCSI_TARGET_TPG_H
+
+extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *, u16);
+extern int iscsit_load_discovery_tpg(void);
+extern void iscsit_release_discovery_tpg(void);
+extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *,
+ struct iscsi_np *);
+extern int iscsit_get_tpg(struct iscsi_portal_group *);
+extern void iscsit_put_tpg(struct iscsi_portal_group *);
+extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *);
+extern void iscsit_tpg_dump_params(struct iscsi_portal_group *);
+extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *);
+extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *,
+ int);
+extern int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *);
+extern int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *, int);
+extern struct iscsi_node_acl *iscsit_tpg_add_initiator_node_acl(
+ struct iscsi_portal_group *, const char *, u32);
+extern void iscsit_tpg_del_initiator_node_acl(struct iscsi_portal_group *,
+ struct se_node_acl *);
+extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsi_session *);
+extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *);
+extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int);
+extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *,
+ struct __kernel_sockaddr_storage *, char *, struct iscsi_tpg_np *,
+ int);
+extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
+ struct iscsi_tpg_np *);
+extern int iscsit_tpg_set_initiator_node_queue_depth(struct iscsi_portal_group *,
+ unsigned char *, u32, int);
+extern int iscsit_ta_authentication(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_login_timeout(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_netif_timeout(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_generate_node_acls(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
+
+#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
new file mode 100644
index 0000000..0baac5b
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tq.c
@@ -0,0 +1,551 @@
+/*******************************************************************************
+ * This file contains the iSCSI Login Thread and Thread Queue functions.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/bitmap.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_tq.h"
+#include "iscsi_target.h"
+
+static LIST_HEAD(active_ts_list);
+static LIST_HEAD(inactive_ts_list);
+static DEFINE_SPINLOCK(active_ts_lock);
+static DEFINE_SPINLOCK(inactive_ts_lock);
+static DEFINE_SPINLOCK(ts_bitmap_lock);
+
+static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts)
+{
+ spin_lock(&active_ts_lock);
+ list_add_tail(&ts->ts_list, &active_ts_list);
+ iscsit_global->active_ts++;
+ spin_unlock(&active_ts_lock);
+}
+
+extern void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
+{
+ spin_lock(&inactive_ts_lock);
+ list_add_tail(&ts->ts_list, &inactive_ts_list);
+ iscsit_global->inactive_ts++;
+ spin_unlock(&inactive_ts_lock);
+}
+
+static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts)
+{
+ spin_lock(&active_ts_lock);
+ list_del(&ts->ts_list);
+ iscsit_global->active_ts--;
+ spin_unlock(&active_ts_lock);
+}
+
+static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
+{
+ struct iscsi_thread_set *ts;
+
+ spin_lock(&inactive_ts_lock);
+ if (list_empty(&inactive_ts_list)) {
+ spin_unlock(&inactive_ts_lock);
+ return NULL;
+ }
+
+ list_for_each_entry(ts, &inactive_ts_list, ts_list)
+ break;
+
+ list_del(&ts->ts_list);
+ iscsit_global->inactive_ts--;
+ spin_unlock(&inactive_ts_lock);
+
+ return ts;
+}
+
+extern int iscsi_allocate_thread_sets(u32 thread_pair_count)
+{
+ int allocated_thread_pair_count = 0, i, thread_id;
+ struct iscsi_thread_set *ts = NULL;
+
+ for (i = 0; i < thread_pair_count; i++) {
+ ts = kzalloc(sizeof(struct iscsi_thread_set), GFP_KERNEL);
+ if (!ts) {
+ pr_err("Unable to allocate memory for"
+ " thread set.\n");
+ return allocated_thread_pair_count;
+ }
+ /*
+ * Locate the next available regision in the thread_set_bitmap
+ */
+ spin_lock(&ts_bitmap_lock);
+ thread_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
+ iscsit_global->ts_bitmap_count, get_order(1));
+ spin_unlock(&ts_bitmap_lock);
+ if (thread_id < 0) {
+ pr_err("bitmap_find_free_region() failed for"
+ " thread_set_bitmap\n");
+ kfree(ts);
+ return allocated_thread_pair_count;
+ }
+
+ ts->thread_id = thread_id;
+ ts->status = ISCSI_THREAD_SET_FREE;
+ INIT_LIST_HEAD(&ts->ts_list);
+ spin_lock_init(&ts->ts_state_lock);
+ init_completion(&ts->rx_post_start_comp);
+ init_completion(&ts->tx_post_start_comp);
+ init_completion(&ts->rx_restart_comp);
+ init_completion(&ts->tx_restart_comp);
+ init_completion(&ts->rx_start_comp);
+ init_completion(&ts->tx_start_comp);
+
+ ts->create_threads = 1;
+ ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s",
+ ISCSI_TX_THREAD_NAME);
+ if (IS_ERR(ts->tx_thread)) {
+ dump_stack();
+ pr_err("Unable to start iscsi_target_tx_thread\n");
+ break;
+ }
+
+ ts->rx_thread = kthread_run(iscsi_target_rx_thread, ts, "%s",
+ ISCSI_RX_THREAD_NAME);
+ if (IS_ERR(ts->rx_thread)) {
+ kthread_stop(ts->tx_thread);
+ pr_err("Unable to start iscsi_target_rx_thread\n");
+ break;
+ }
+ ts->create_threads = 0;
+
+ iscsi_add_ts_to_inactive_list(ts);
+ allocated_thread_pair_count++;
+ }
+
+ pr_debug("Spawned %d thread set(s) (%d total threads).\n",
+ allocated_thread_pair_count, allocated_thread_pair_count * 2);
+ return allocated_thread_pair_count;
+}
+
+extern void iscsi_deallocate_thread_sets(void)
+{
+ u32 released_count = 0;
+ struct iscsi_thread_set *ts = NULL;
+
+ while ((ts = iscsi_get_ts_from_inactive_list())) {
+
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->status = ISCSI_THREAD_SET_DIE;
+ spin_unlock_bh(&ts->ts_state_lock);
+
+ if (ts->rx_thread) {
+ send_sig(SIGINT, ts->rx_thread, 1);
+ kthread_stop(ts->rx_thread);
+ }
+ if (ts->tx_thread) {
+ send_sig(SIGINT, ts->tx_thread, 1);
+ kthread_stop(ts->tx_thread);
+ }
+ /*
+ * Release this thread_id in the thread_set_bitmap
+ */
+ spin_lock(&ts_bitmap_lock);
+ bitmap_release_region(iscsit_global->ts_bitmap,
+ ts->thread_id, get_order(1));
+ spin_unlock(&ts_bitmap_lock);
+
+ released_count++;
+ kfree(ts);
+ }
+
+ if (released_count)
+ pr_debug("Stopped %d thread set(s) (%d total threads)."
+ "\n", released_count, released_count * 2);
+}
+
+static void iscsi_deallocate_extra_thread_sets(void)
+{
+ u32 orig_count, released_count = 0;
+ struct iscsi_thread_set *ts = NULL;
+
+ orig_count = TARGET_THREAD_SET_COUNT;
+
+ while ((iscsit_global->inactive_ts + 1) > orig_count) {
+ ts = iscsi_get_ts_from_inactive_list();
+ if (!ts)
+ break;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->status = ISCSI_THREAD_SET_DIE;
+ spin_unlock_bh(&ts->ts_state_lock);
+
+ if (ts->rx_thread) {
+ send_sig(SIGINT, ts->rx_thread, 1);
+ kthread_stop(ts->rx_thread);
+ }
+ if (ts->tx_thread) {
+ send_sig(SIGINT, ts->tx_thread, 1);
+ kthread_stop(ts->tx_thread);
+ }
+ /*
+ * Release this thread_id in the thread_set_bitmap
+ */
+ spin_lock(&ts_bitmap_lock);
+ bitmap_release_region(iscsit_global->ts_bitmap,
+ ts->thread_id, get_order(1));
+ spin_unlock(&ts_bitmap_lock);
+
+ released_count++;
+ kfree(ts);
+ }
+
+ if (released_count) {
+ pr_debug("Stopped %d thread set(s) (%d total threads)."
+ "\n", released_count, released_count * 2);
+ }
+}
+
+void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
+{
+ iscsi_add_ts_to_active_list(ts);
+
+ spin_lock_bh(&ts->ts_state_lock);
+ conn->thread_set = ts;
+ ts->conn = conn;
+ spin_unlock_bh(&ts->ts_state_lock);
+ /*
+ * Start up the RX thread and wait on rx_post_start_comp. The RX
+ * Thread will then do the same for the TX Thread in
+ * iscsi_rx_thread_pre_handler().
+ */
+ complete(&ts->rx_start_comp);
+ wait_for_completion(&ts->rx_post_start_comp);
+}
+
+struct iscsi_thread_set *iscsi_get_thread_set(void)
+{
+ int allocate_ts = 0;
+ struct completion comp;
+ struct iscsi_thread_set *ts = NULL;
+ /*
+ * If no inactive thread set is available on the first call to
+ * iscsi_get_ts_from_inactive_list(), sleep for a second and
+ * try again. If still none are available after two attempts,
+ * allocate a set ourselves.
+ */
+get_set:
+ ts = iscsi_get_ts_from_inactive_list();
+ if (!ts) {
+ if (allocate_ts == 2)
+ iscsi_allocate_thread_sets(1);
+
+ init_completion(&comp);
+ wait_for_completion_timeout(&comp, 1 * HZ);
+
+ allocate_ts++;
+ goto get_set;
+ }
+
+ ts->delay_inactive = 1;
+ ts->signal_sent = 0;
+ ts->thread_count = 2;
+ init_completion(&ts->rx_restart_comp);
+ init_completion(&ts->tx_restart_comp);
+
+ return ts;
+}
+
+void iscsi_set_thread_clear(struct iscsi_conn *conn, u8 thread_clear)
+{
+ struct iscsi_thread_set *ts = NULL;
+
+ if (!conn->thread_set) {
+ pr_err("struct iscsi_conn->thread_set is NULL\n");
+ return;
+ }
+ ts = conn->thread_set;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->thread_clear &= ~thread_clear;
+
+ if ((thread_clear & ISCSI_CLEAR_RX_THREAD) &&
+ (ts->blocked_threads & ISCSI_BLOCK_RX_THREAD))
+ complete(&ts->rx_restart_comp);
+ else if ((thread_clear & ISCSI_CLEAR_TX_THREAD) &&
+ (ts->blocked_threads & ISCSI_BLOCK_TX_THREAD))
+ complete(&ts->tx_restart_comp);
+ spin_unlock_bh(&ts->ts_state_lock);
+}
+
+void iscsi_set_thread_set_signal(struct iscsi_conn *conn, u8 signal_sent)
+{
+ struct iscsi_thread_set *ts = NULL;
+
+ if (!conn->thread_set) {
+ pr_err("struct iscsi_conn->thread_set is NULL\n");
+ return;
+ }
+ ts = conn->thread_set;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->signal_sent |= signal_sent;
+ spin_unlock_bh(&ts->ts_state_lock);
+}
+
+int iscsi_release_thread_set(struct iscsi_conn *conn)
+{
+ int thread_called = 0;
+ struct iscsi_thread_set *ts = NULL;
+
+ if (!conn || !conn->thread_set) {
+ pr_err("connection or thread set pointer is NULL\n");
+ BUG();
+ }
+ ts = conn->thread_set;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->status = ISCSI_THREAD_SET_RESET;
+
+ if (!strncmp(current->comm, ISCSI_RX_THREAD_NAME,
+ strlen(ISCSI_RX_THREAD_NAME)))
+ thread_called = ISCSI_RX_THREAD;
+ else if (!strncmp(current->comm, ISCSI_TX_THREAD_NAME,
+ strlen(ISCSI_TX_THREAD_NAME)))
+ thread_called = ISCSI_TX_THREAD;
+
+ if (ts->rx_thread && (thread_called == ISCSI_TX_THREAD) &&
+ (ts->thread_clear & ISCSI_CLEAR_RX_THREAD)) {
+
+ if (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD)) {
+ send_sig(SIGINT, ts->rx_thread, 1);
+ ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD;
+ }
+ ts->blocked_threads |= ISCSI_BLOCK_RX_THREAD;
+ spin_unlock_bh(&ts->ts_state_lock);
+ wait_for_completion(&ts->rx_restart_comp);
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->blocked_threads &= ~ISCSI_BLOCK_RX_THREAD;
+ }
+ if (ts->tx_thread && (thread_called == ISCSI_RX_THREAD) &&
+ (ts->thread_clear & ISCSI_CLEAR_TX_THREAD)) {
+
+ if (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD)) {
+ send_sig(SIGINT, ts->tx_thread, 1);
+ ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD;
+ }
+ ts->blocked_threads |= ISCSI_BLOCK_TX_THREAD;
+ spin_unlock_bh(&ts->ts_state_lock);
+ wait_for_completion(&ts->tx_restart_comp);
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->blocked_threads &= ~ISCSI_BLOCK_TX_THREAD;
+ }
+
+ ts->conn = NULL;
+ ts->status = ISCSI_THREAD_SET_FREE;
+ spin_unlock_bh(&ts->ts_state_lock);
+
+ return 0;
+}
+
+int iscsi_thread_set_force_reinstatement(struct iscsi_conn *conn)
+{
+ struct iscsi_thread_set *ts;
+
+ if (!conn->thread_set)
+ return -1;
+ ts = conn->thread_set;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ if (ts->status != ISCSI_THREAD_SET_ACTIVE) {
+ spin_unlock_bh(&ts->ts_state_lock);
+ return -1;
+ }
+
+ if (ts->tx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD))) {
+ send_sig(SIGINT, ts->tx_thread, 1);
+ ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD;
+ }
+ if (ts->rx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD))) {
+ send_sig(SIGINT, ts->rx_thread, 1);
+ ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD;
+ }
+ spin_unlock_bh(&ts->ts_state_lock);
+
+ return 0;
+}
+
+static void iscsi_check_to_add_additional_sets(void)
+{
+ int thread_sets_add;
+
+ spin_lock(&inactive_ts_lock);
+ thread_sets_add = iscsit_global->inactive_ts;
+ spin_unlock(&inactive_ts_lock);
+ if (thread_sets_add == 1)
+ iscsi_allocate_thread_sets(1);
+}
+
+static int iscsi_signal_thread_pre_handler(struct iscsi_thread_set *ts)
+{
+ spin_lock_bh(&ts->ts_state_lock);
+ if ((ts->status == ISCSI_THREAD_SET_DIE) || signal_pending(current)) {
+ spin_unlock_bh(&ts->ts_state_lock);
+ return -1;
+ }
+ spin_unlock_bh(&ts->ts_state_lock);
+
+ return 0;
+}
+
+struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
+{
+ int ret;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ if (ts->create_threads) {
+ spin_unlock_bh(&ts->ts_state_lock);
+ goto sleep;
+ }
+
+ flush_signals(current);
+
+ if (ts->delay_inactive && (--ts->thread_count == 0)) {
+ spin_unlock_bh(&ts->ts_state_lock);
+ iscsi_del_ts_from_active_list(ts);
+
+ if (!iscsit_global->in_shutdown)
+ iscsi_deallocate_extra_thread_sets();
+
+ iscsi_add_ts_to_inactive_list(ts);
+ spin_lock_bh(&ts->ts_state_lock);
+ }
+
+ if ((ts->status == ISCSI_THREAD_SET_RESET) &&
+ (ts->thread_clear & ISCSI_CLEAR_RX_THREAD))
+ complete(&ts->rx_restart_comp);
+
+ ts->thread_clear &= ~ISCSI_CLEAR_RX_THREAD;
+ spin_unlock_bh(&ts->ts_state_lock);
+sleep:
+ ret = wait_for_completion_interruptible(&ts->rx_start_comp);
+ if (ret != 0)
+ return NULL;
+
+ if (iscsi_signal_thread_pre_handler(ts) < 0)
+ return NULL;
+
+ if (!ts->conn) {
+ pr_err("struct iscsi_thread_set->conn is NULL for"
+ " thread_id: %d, going back to sleep\n", ts->thread_id);
+ goto sleep;
+ }
+ iscsi_check_to_add_additional_sets();
+ /*
+ * The RX Thread starts up the TX Thread and sleeps.
+ */
+ ts->thread_clear |= ISCSI_CLEAR_RX_THREAD;
+ complete(&ts->tx_start_comp);
+ wait_for_completion(&ts->tx_post_start_comp);
+
+ return ts->conn;
+}
+
+struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
+{
+ int ret;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ if (ts->create_threads) {
+ spin_unlock_bh(&ts->ts_state_lock);
+ goto sleep;
+ }
+
+ flush_signals(current);
+
+ if (ts->delay_inactive && (--ts->thread_count == 0)) {
+ spin_unlock_bh(&ts->ts_state_lock);
+ iscsi_del_ts_from_active_list(ts);
+
+ if (!iscsit_global->in_shutdown)
+ iscsi_deallocate_extra_thread_sets();
+
+ iscsi_add_ts_to_inactive_list(ts);
+ spin_lock_bh(&ts->ts_state_lock);
+ }
+ if ((ts->status == ISCSI_THREAD_SET_RESET) &&
+ (ts->thread_clear & ISCSI_CLEAR_TX_THREAD))
+ complete(&ts->tx_restart_comp);
+
+ ts->thread_clear &= ~ISCSI_CLEAR_TX_THREAD;
+ spin_unlock_bh(&ts->ts_state_lock);
+sleep:
+ ret = wait_for_completion_interruptible(&ts->tx_start_comp);
+ if (ret != 0)
+ return NULL;
+
+ if (iscsi_signal_thread_pre_handler(ts) < 0)
+ return NULL;
+
+ if (!ts->conn) {
+ pr_err("struct iscsi_thread_set->conn is NULL for "
+ " thread_id: %d, going back to sleep\n",
+ ts->thread_id);
+ goto sleep;
+ }
+
+ iscsi_check_to_add_additional_sets();
+ /*
+ * From the TX thread, up the tx_post_start_comp that the RX Thread is
+ * sleeping on in iscsi_rx_thread_pre_handler(), then up the
+ * rx_post_start_comp that iscsi_activate_thread_set() is sleeping on.
+ */
+ ts->thread_clear |= ISCSI_CLEAR_TX_THREAD;
+ complete(&ts->tx_post_start_comp);
+ complete(&ts->rx_post_start_comp);
+
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->status = ISCSI_THREAD_SET_ACTIVE;
+ spin_unlock_bh(&ts->ts_state_lock);
+
+ return ts->conn;
+}
+
+int iscsi_thread_set_init(void)
+{
+ int size;
+
+ iscsit_global->ts_bitmap_count = ISCSI_TS_BITMAP_BITS;
+
+ size = BITS_TO_LONGS(iscsit_global->ts_bitmap_count) * sizeof(long);
+ iscsit_global->ts_bitmap = kzalloc(size, GFP_KERNEL);
+ if (!iscsit_global->ts_bitmap) {
+ pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&active_ts_lock);
+ spin_lock_init(&inactive_ts_lock);
+ spin_lock_init(&ts_bitmap_lock);
+ INIT_LIST_HEAD(&active_ts_list);
+ INIT_LIST_HEAD(&inactive_ts_list);
+
+ return 0;
+}
+
+void iscsi_thread_set_free(void)
+{
+ kfree(iscsit_global->ts_bitmap);
+}
diff --git a/drivers/target/iscsi/iscsi_target_tq.h b/drivers/target/iscsi/iscsi_target_tq.h
new file mode 100644
index 0000000..26e6a95
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tq.h
@@ -0,0 +1,88 @@
+#ifndef ISCSI_THREAD_QUEUE_H
+#define ISCSI_THREAD_QUEUE_H
+
+/*
+ * Defines for thread sets.
+ */
+extern int iscsi_thread_set_force_reinstatement(struct iscsi_conn *);
+extern void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *);
+extern int iscsi_allocate_thread_sets(u32);
+extern void iscsi_deallocate_thread_sets(void);
+extern void iscsi_activate_thread_set(struct iscsi_conn *, struct iscsi_thread_set *);
+extern struct iscsi_thread_set *iscsi_get_thread_set(void);
+extern void iscsi_set_thread_clear(struct iscsi_conn *, u8);
+extern void iscsi_set_thread_set_signal(struct iscsi_conn *, u8);
+extern int iscsi_release_thread_set(struct iscsi_conn *);
+extern struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *);
+extern struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *);
+extern int iscsi_thread_set_init(void);
+extern void iscsi_thread_set_free(void);
+
+extern int iscsi_target_tx_thread(void *);
+extern int iscsi_target_rx_thread(void *);
+
+#define TARGET_THREAD_SET_COUNT 4
+
+#define ISCSI_RX_THREAD 1
+#define ISCSI_TX_THREAD 2
+#define ISCSI_RX_THREAD_NAME "iscsi_trx"
+#define ISCSI_TX_THREAD_NAME "iscsi_ttx"
+#define ISCSI_BLOCK_RX_THREAD 0x1
+#define ISCSI_BLOCK_TX_THREAD 0x2
+#define ISCSI_CLEAR_RX_THREAD 0x1
+#define ISCSI_CLEAR_TX_THREAD 0x2
+#define ISCSI_SIGNAL_RX_THREAD 0x1
+#define ISCSI_SIGNAL_TX_THREAD 0x2
+
+/* struct iscsi_thread_set->status */
+#define ISCSI_THREAD_SET_FREE 1
+#define ISCSI_THREAD_SET_ACTIVE 2
+#define ISCSI_THREAD_SET_DIE 3
+#define ISCSI_THREAD_SET_RESET 4
+#define ISCSI_THREAD_SET_DEALLOCATE_THREADS 5
+
+/* By default allow a maximum of 32K iSCSI connections */
+#define ISCSI_TS_BITMAP_BITS 32768
+
+struct iscsi_thread_set {
+ /* flags used for blocking and restarting sets */
+ int blocked_threads;
+ /* flag for creating threads */
+ int create_threads;
+ /* flag for delaying readding to inactive list */
+ int delay_inactive;
+ /* status for thread set */
+ int status;
+ /* which threads have had signals sent */
+ int signal_sent;
+ /* flag for which threads exited first */
+ int thread_clear;
+ /* Active threads in the thread set */
+ int thread_count;
+ /* Unique thread ID */
+ u32 thread_id;
+ /* pointer to connection if set is active */
+ struct iscsi_conn *conn;
+ /* used for controlling ts state accesses */
+ spinlock_t ts_state_lock;
+ /* Used for rx side post startup */
+ struct completion rx_post_start_comp;
+ /* Used for tx side post startup */
+ struct completion tx_post_start_comp;
+ /* used for restarting thread queue */
+ struct completion rx_restart_comp;
+ /* used for restarting thread queue */
+ struct completion tx_restart_comp;
+ /* used for normal unused blocking */
+ struct completion rx_start_comp;
+ /* used for normal unused blocking */
+ struct completion tx_start_comp;
+ /* OS descriptor for rx thread */
+ struct task_struct *rx_thread;
+ /* OS descriptor for tx thread */
+ struct task_struct *tx_thread;
+ /* struct iscsi_thread_set in list list head*/
+ struct list_head ts_list;
+};
+
+#endif /*** ISCSI_THREAD_QUEUE_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
new file mode 100644
index 0000000..a0d23bc
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -0,0 +1,1817 @@
+/*******************************************************************************
+ * This file contains the iSCSI Target specific utility functions.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/list.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_tq.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+
+#define PRINT_BUFF(buff, len) \
+{ \
+ int zzz; \
+ \
+ pr_debug("%d:\n", __LINE__); \
+ for (zzz = 0; zzz < len; zzz++) { \
+ if (zzz % 16 == 0) { \
+ if (zzz) \
+ pr_debug("\n"); \
+ pr_debug("%4i: ", zzz); \
+ } \
+ pr_debug("%02x ", (unsigned char) (buff)[zzz]); \
+ } \
+ if ((len + 1) % 16) \
+ pr_debug("\n"); \
+}
+
+extern struct list_head g_tiqn_list;
+extern spinlock_t tiqn_lock;
+
+/*
+ * Called with cmd->r2t_lock held.
+ */
+int iscsit_add_r2t_to_list(
+ struct iscsi_cmd *cmd,
+ u32 offset,
+ u32 xfer_len,
+ int recovery,
+ u32 r2t_sn)
+{
+ struct iscsi_r2t *r2t;
+
+ r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
+ if (!r2t) {
+ pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
+ return -1;
+ }
+ INIT_LIST_HEAD(&r2t->r2t_list);
+
+ r2t->recovery_r2t = recovery;
+ r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn;
+ r2t->offset = offset;
+ r2t->xfer_len = xfer_len;
+ list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list);
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
+
+ spin_lock_bh(&cmd->r2t_lock);
+ return 0;
+}
+
+struct iscsi_r2t *iscsit_get_r2t_for_eos(
+ struct iscsi_cmd *cmd,
+ u32 offset,
+ u32 length)
+{
+ struct iscsi_r2t *r2t;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+ if ((r2t->offset <= offset) &&
+ (r2t->offset + r2t->xfer_len) >= (offset + length)) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return r2t;
+ }
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ pr_err("Unable to locate R2T for Offset: %u, Length:"
+ " %u\n", offset, length);
+ return NULL;
+}
+
+struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
+{
+ struct iscsi_r2t *r2t;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+ if (!r2t->sent_r2t) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return r2t;
+ }
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ pr_err("Unable to locate next R2T to send for ITT:"
+ " 0x%08x.\n", cmd->init_task_tag);
+ return NULL;
+}
+
+/*
+ * Called with cmd->r2t_lock held.
+ */
+void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd)
+{
+ list_del(&r2t->r2t_list);
+ kmem_cache_free(lio_r2t_cache, r2t);
+}
+
+void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
+{
+ struct iscsi_r2t *r2t, *r2t_tmp;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list)
+ iscsit_free_r2t(r2t, cmd);
+ spin_unlock_bh(&cmd->r2t_lock);
+}
+
+/*
+ * May be called from software interrupt (timer) context for allocating
+ * iSCSI NopINs.
+ */
+struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
+{
+ struct iscsi_cmd *cmd;
+
+ cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask);
+ if (!cmd) {
+ pr_err("Unable to allocate memory for struct iscsi_cmd.\n");
+ return NULL;
+ }
+
+ cmd->conn = conn;
+ INIT_LIST_HEAD(&cmd->i_list);
+ INIT_LIST_HEAD(&cmd->datain_list);
+ INIT_LIST_HEAD(&cmd->cmd_r2t_list);
+ init_completion(&cmd->reject_comp);
+ spin_lock_init(&cmd->datain_lock);
+ spin_lock_init(&cmd->dataout_timeout_lock);
+ spin_lock_init(&cmd->istate_lock);
+ spin_lock_init(&cmd->error_lock);
+ spin_lock_init(&cmd->r2t_lock);
+
+ return cmd;
+}
+
+/*
+ * Called from iscsi_handle_scsi_cmd()
+ */
+struct iscsi_cmd *iscsit_allocate_se_cmd(
+ struct iscsi_conn *conn,
+ u32 data_length,
+ int data_direction,
+ int iscsi_task_attr)
+{
+ struct iscsi_cmd *cmd;
+ struct se_cmd *se_cmd;
+ int sam_task_attr;
+
+ cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->data_direction = data_direction;
+ cmd->data_length = data_length;
+ /*
+ * Figure out the SAM Task Attribute for the incoming SCSI CDB
+ */
+ if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
+ (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
+ sam_task_attr = MSG_SIMPLE_TAG;
+ else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
+ sam_task_attr = MSG_ORDERED_TAG;
+ else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
+ sam_task_attr = MSG_HEAD_TAG;
+ else if (iscsi_task_attr == ISCSI_ATTR_ACA)
+ sam_task_attr = MSG_ACA_TAG;
+ else {
+ pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
+ " MSG_SIMPLE_TAG\n", iscsi_task_attr);
+ sam_task_attr = MSG_SIMPLE_TAG;
+ }
+
+ se_cmd = &cmd->se_cmd;
+ /*
+ * Initialize struct se_cmd descriptor from target_core_mod infrastructure
+ */
+ transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
+ conn->sess->se_sess, data_length, data_direction,
+ sam_task_attr, &cmd->sense_buffer[0]);
+ return cmd;
+}
+
+struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
+ struct iscsi_conn *conn,
+ u8 function)
+{
+ struct iscsi_cmd *cmd;
+ struct se_cmd *se_cmd;
+ u8 tcm_function;
+
+ cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->data_direction = DMA_NONE;
+
+ cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
+ if (!cmd->tmr_req) {
+ pr_err("Unable to allocate memory for"
+ " Task Management command!\n");
+ goto out;
+ }
+ /*
+ * TASK_REASSIGN for ERL=2 / connection stays inside of
+ * LIO-Target $FABRIC_MOD
+ */
+ if (function == ISCSI_TM_FUNC_TASK_REASSIGN)
+ return cmd;
+
+ se_cmd = &cmd->se_cmd;
+ /*
+ * Initialize struct se_cmd descriptor from target_core_mod infrastructure
+ */
+ transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
+ conn->sess->se_sess, 0, DMA_NONE,
+ MSG_SIMPLE_TAG, &cmd->sense_buffer[0]);
+
+ switch (function) {
+ case ISCSI_TM_FUNC_ABORT_TASK:
+ tcm_function = TMR_ABORT_TASK;
+ break;
+ case ISCSI_TM_FUNC_ABORT_TASK_SET:
+ tcm_function = TMR_ABORT_TASK_SET;
+ break;
+ case ISCSI_TM_FUNC_CLEAR_ACA:
+ tcm_function = TMR_CLEAR_ACA;
+ break;
+ case ISCSI_TM_FUNC_CLEAR_TASK_SET:
+ tcm_function = TMR_CLEAR_TASK_SET;
+ break;
+ case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+ tcm_function = TMR_LUN_RESET;
+ break;
+ case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+ tcm_function = TMR_TARGET_WARM_RESET;
+ break;
+ case ISCSI_TM_FUNC_TARGET_COLD_RESET:
+ tcm_function = TMR_TARGET_COLD_RESET;
+ break;
+ default:
+ pr_err("Unknown iSCSI TMR Function:"
+ " 0x%02x\n", function);
+ goto out;
+ }
+
+ se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd,
+ (void *)cmd->tmr_req, tcm_function);
+ if (!se_cmd->se_tmr_req)
+ goto out;
+
+ cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req;
+
+ return cmd;
+out:
+ iscsit_release_cmd(cmd);
+ return NULL;
+}
+
+int iscsit_decide_list_to_build(
+ struct iscsi_cmd *cmd,
+ u32 immediate_data_length)
+{
+ struct iscsi_build_list bl;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na;
+
+ if (sess->sess_ops->DataSequenceInOrder &&
+ sess->sess_ops->DataPDUInOrder)
+ return 0;
+
+ if (cmd->data_direction == DMA_NONE)
+ return 0;
+
+ na = iscsit_tpg_get_node_attrib(sess);
+ memset(&bl, 0, sizeof(struct iscsi_build_list));
+
+ if (cmd->data_direction == DMA_FROM_DEVICE) {
+ bl.data_direction = ISCSI_PDU_READ;
+ bl.type = PDULIST_NORMAL;
+ if (na->random_datain_pdu_offsets)
+ bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
+ if (na->random_datain_seq_offsets)
+ bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
+ } else {
+ bl.data_direction = ISCSI_PDU_WRITE;
+ bl.immediate_data_length = immediate_data_length;
+ if (na->random_r2t_offsets)
+ bl.randomize |= RANDOM_R2T_OFFSETS;
+
+ if (!cmd->immediate_data && !cmd->unsolicited_data)
+ bl.type = PDULIST_NORMAL;
+ else if (cmd->immediate_data && !cmd->unsolicited_data)
+ bl.type = PDULIST_IMMEDIATE;
+ else if (!cmd->immediate_data && cmd->unsolicited_data)
+ bl.type = PDULIST_UNSOLICITED;
+ else if (cmd->immediate_data && cmd->unsolicited_data)
+ bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
+ }
+
+ return iscsit_do_build_list(cmd, &bl);
+}
+
+struct iscsi_seq *iscsit_get_seq_holder_for_datain(
+ struct iscsi_cmd *cmd,
+ u32 seq_send_order)
+{
+ u32 i;
+
+ for (i = 0; i < cmd->seq_count; i++)
+ if (cmd->seq_list[i].seq_send_order == seq_send_order)
+ return &cmd->seq_list[i];
+
+ return NULL;
+}
+
+struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd)
+{
+ u32 i;
+
+ if (!cmd->seq_list) {
+ pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+ return NULL;
+ }
+
+ for (i = 0; i < cmd->seq_count; i++) {
+ if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
+ continue;
+ if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) {
+ cmd->seq_send_order++;
+ return &cmd->seq_list[i];
+ }
+ }
+
+ return NULL;
+}
+
+struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
+ struct iscsi_cmd *cmd,
+ u32 r2t_sn)
+{
+ struct iscsi_r2t *r2t;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+ if (r2t->r2t_sn == r2t_sn) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return r2t;
+ }
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ return NULL;
+}
+
+static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn)
+{
+ int ret;
+
+ /*
+ * This is the proper method of checking received CmdSN against
+ * ExpCmdSN and MaxCmdSN values, as well as accounting for out
+ * or order CmdSNs due to multiple connection sessions and/or
+ * CRC failures.
+ */
+ if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {
+ pr_err("Received CmdSN: 0x%08x is greater than"
+ " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn,
+ sess->max_cmd_sn);
+ ret = CMDSN_ERROR_CANNOT_RECOVER;
+
+ } else if (cmdsn == sess->exp_cmd_sn) {
+ sess->exp_cmd_sn++;
+ pr_debug("Received CmdSN matches ExpCmdSN,"
+ " incremented ExpCmdSN to: 0x%08x\n",
+ sess->exp_cmd_sn);
+ ret = CMDSN_NORMAL_OPERATION;
+
+ } else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) {
+ pr_debug("Received CmdSN: 0x%08x is greater"
+ " than ExpCmdSN: 0x%08x, not acknowledging.\n",
+ cmdsn, sess->exp_cmd_sn);
+ ret = CMDSN_HIGHER_THAN_EXP;
+
+ } else {
+ pr_err("Received CmdSN: 0x%08x is less than"
+ " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn,
+ sess->exp_cmd_sn);
+ ret = CMDSN_LOWER_THAN_EXP;
+ }
+
+ return ret;
+}
+
+/*
+ * Commands may be received out of order if MC/S is in use.
+ * Ensure they are executed in CmdSN order.
+ */
+int iscsit_sequence_cmd(
+ struct iscsi_conn *conn,
+ struct iscsi_cmd *cmd,
+ u32 cmdsn)
+{
+ int ret;
+ int cmdsn_ret;
+
+ mutex_lock(&conn->sess->cmdsn_mutex);
+
+ cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, cmdsn);
+ switch (cmdsn_ret) {
+ case CMDSN_NORMAL_OPERATION:
+ ret = iscsit_execute_cmd(cmd, 0);
+ if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
+ iscsit_execute_ooo_cmdsns(conn->sess);
+ break;
+ case CMDSN_HIGHER_THAN_EXP:
+ ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, cmdsn);
+ break;
+ case CMDSN_LOWER_THAN_EXP:
+ cmd->i_state = ISTATE_REMOVE;
+ iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
+ ret = cmdsn_ret;
+ break;
+ default:
+ ret = cmdsn_ret;
+ break;
+ }
+ mutex_unlock(&conn->sess->cmdsn_mutex);
+
+ return ret;
+}
+
+int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ if (conn->sess->sess_ops->InitialR2T) {
+ pr_err("Received unexpected unsolicited data"
+ " while InitialR2T=Yes, protocol error.\n");
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
+ return -1;
+ }
+
+ if ((cmd->first_burst_len + payload_length) >
+ conn->sess->sess_ops->FirstBurstLength) {
+ pr_err("Total %u bytes exceeds FirstBurstLength: %u"
+ " for this Unsolicited DataOut Burst.\n",
+ (cmd->first_burst_len + payload_length),
+ conn->sess->sess_ops->FirstBurstLength);
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_INCORRECT_AMOUNT_OF_DATA, 0);
+ return -1;
+ }
+
+ if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
+ return 0;
+
+ if (((cmd->first_burst_len + payload_length) != cmd->data_length) &&
+ ((cmd->first_burst_len + payload_length) !=
+ conn->sess->sess_ops->FirstBurstLength)) {
+ pr_err("Unsolicited non-immediate data received %u"
+ " does not equal FirstBurstLength: %u, and does"
+ " not equal ExpXferLen %u.\n",
+ (cmd->first_burst_len + payload_length),
+ conn->sess->sess_ops->FirstBurstLength, cmd->data_length);
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_INCORRECT_AMOUNT_OF_DATA, 0);
+ return -1;
+ }
+ return 0;
+}
+
+struct iscsi_cmd *iscsit_find_cmd_from_itt(
+ struct iscsi_conn *conn,
+ u32 init_task_tag)
+{
+ struct iscsi_cmd *cmd;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ if (cmd->init_task_tag == init_task_tag) {
+ spin_unlock_bh(&conn->cmd_lock);
+ return cmd;
+ }
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
+ init_task_tag, conn->cid);
+ return NULL;
+}
+
+struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
+ struct iscsi_conn *conn,
+ u32 init_task_tag,
+ u32 length)
+{
+ struct iscsi_cmd *cmd;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ if (cmd->init_task_tag == init_task_tag) {
+ spin_unlock_bh(&conn->cmd_lock);
+ return cmd;
+ }
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
+ " dumping payload\n", init_task_tag, conn->cid);
+ if (length)
+ iscsit_dump_data_payload(conn, length, 1);
+
+ return NULL;
+}
+
+struct iscsi_cmd *iscsit_find_cmd_from_ttt(
+ struct iscsi_conn *conn,
+ u32 targ_xfer_tag)
+{
+ struct iscsi_cmd *cmd = NULL;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ if (cmd->targ_xfer_tag == targ_xfer_tag) {
+ spin_unlock_bh(&conn->cmd_lock);
+ return cmd;
+ }
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
+ targ_xfer_tag, conn->cid);
+ return NULL;
+}
+
+int iscsit_find_cmd_for_recovery(
+ struct iscsi_session *sess,
+ struct iscsi_cmd **cmd_ptr,
+ struct iscsi_conn_recovery **cr_ptr,
+ u32 init_task_tag)
+{
+ struct iscsi_cmd *cmd = NULL;
+ struct iscsi_conn_recovery *cr;
+ /*
+ * Scan through the inactive connection recovery list's command list.
+ * If init_task_tag matches the command is still alligent.
+ */
+ spin_lock(&sess->cr_i_lock);
+ list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
+ if (cmd->init_task_tag == init_task_tag) {
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ spin_unlock(&sess->cr_i_lock);
+
+ *cr_ptr = cr;
+ *cmd_ptr = cmd;
+ return -2;
+ }
+ }
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ }
+ spin_unlock(&sess->cr_i_lock);
+ /*
+ * Scan through the active connection recovery list's command list.
+ * If init_task_tag matches the command is ready to be reassigned.
+ */
+ spin_lock(&sess->cr_a_lock);
+ list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
+ if (cmd->init_task_tag == init_task_tag) {
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ spin_unlock(&sess->cr_a_lock);
+
+ *cr_ptr = cr;
+ *cmd_ptr = cmd;
+ return 0;
+ }
+ }
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ }
+ spin_unlock(&sess->cr_a_lock);
+
+ return -1;
+}
+
+void iscsit_add_cmd_to_immediate_queue(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn,
+ u8 state)
+{
+ struct iscsi_queue_req *qr;
+
+ qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
+ if (!qr) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_queue_req\n");
+ return;
+ }
+ INIT_LIST_HEAD(&qr->qr_list);
+ qr->cmd = cmd;
+ qr->state = state;
+
+ spin_lock_bh(&conn->immed_queue_lock);
+ list_add_tail(&qr->qr_list, &conn->immed_queue_list);
+ atomic_inc(&cmd->immed_queue_count);
+ atomic_set(&conn->check_immediate_queue, 1);
+ spin_unlock_bh(&conn->immed_queue_lock);
+
+ wake_up_process(conn->thread_set->tx_thread);
+}
+
+struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
+{
+ struct iscsi_queue_req *qr;
+
+ spin_lock_bh(&conn->immed_queue_lock);
+ if (list_empty(&conn->immed_queue_list)) {
+ spin_unlock_bh(&conn->immed_queue_lock);
+ return NULL;
+ }
+ list_for_each_entry(qr, &conn->immed_queue_list, qr_list)
+ break;
+
+ list_del(&qr->qr_list);
+ if (qr->cmd)
+ atomic_dec(&qr->cmd->immed_queue_count);
+ spin_unlock_bh(&conn->immed_queue_lock);
+
+ return qr;
+}
+
+static void iscsit_remove_cmd_from_immediate_queue(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_queue_req *qr, *qr_tmp;
+
+ spin_lock_bh(&conn->immed_queue_lock);
+ if (!atomic_read(&cmd->immed_queue_count)) {
+ spin_unlock_bh(&conn->immed_queue_lock);
+ return;
+ }
+
+ list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
+ if (qr->cmd != cmd)
+ continue;
+
+ atomic_dec(&qr->cmd->immed_queue_count);
+ list_del(&qr->qr_list);
+ kmem_cache_free(lio_qr_cache, qr);
+ }
+ spin_unlock_bh(&conn->immed_queue_lock);
+
+ if (atomic_read(&cmd->immed_queue_count)) {
+ pr_err("ITT: 0x%08x immed_queue_count: %d\n",
+ cmd->init_task_tag,
+ atomic_read(&cmd->immed_queue_count));
+ }
+}
+
+void iscsit_add_cmd_to_response_queue(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn,
+ u8 state)
+{
+ struct iscsi_queue_req *qr;
+
+ qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
+ if (!qr) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_queue_req\n");
+ return;
+ }
+ INIT_LIST_HEAD(&qr->qr_list);
+ qr->cmd = cmd;
+ qr->state = state;
+
+ spin_lock_bh(&conn->response_queue_lock);
+ list_add_tail(&qr->qr_list, &conn->response_queue_list);
+ atomic_inc(&cmd->response_queue_count);
+ spin_unlock_bh(&conn->response_queue_lock);
+
+ wake_up_process(conn->thread_set->tx_thread);
+}
+
+struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
+{
+ struct iscsi_queue_req *qr;
+
+ spin_lock_bh(&conn->response_queue_lock);
+ if (list_empty(&conn->response_queue_list)) {
+ spin_unlock_bh(&conn->response_queue_lock);
+ return NULL;
+ }
+
+ list_for_each_entry(qr, &conn->response_queue_list, qr_list)
+ break;
+
+ list_del(&qr->qr_list);
+ if (qr->cmd)
+ atomic_dec(&qr->cmd->response_queue_count);
+ spin_unlock_bh(&conn->response_queue_lock);
+
+ return qr;
+}
+
+static void iscsit_remove_cmd_from_response_queue(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_queue_req *qr, *qr_tmp;
+
+ spin_lock_bh(&conn->response_queue_lock);
+ if (!atomic_read(&cmd->response_queue_count)) {
+ spin_unlock_bh(&conn->response_queue_lock);
+ return;
+ }
+
+ list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
+ qr_list) {
+ if (qr->cmd != cmd)
+ continue;
+
+ atomic_dec(&qr->cmd->response_queue_count);
+ list_del(&qr->qr_list);
+ kmem_cache_free(lio_qr_cache, qr);
+ }
+ spin_unlock_bh(&conn->response_queue_lock);
+
+ if (atomic_read(&cmd->response_queue_count)) {
+ pr_err("ITT: 0x%08x response_queue_count: %d\n",
+ cmd->init_task_tag,
+ atomic_read(&cmd->response_queue_count));
+ }
+}
+
+void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
+{
+ struct iscsi_queue_req *qr, *qr_tmp;
+
+ spin_lock_bh(&conn->immed_queue_lock);
+ list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
+ list_del(&qr->qr_list);
+ if (qr->cmd)
+ atomic_dec(&qr->cmd->immed_queue_count);
+
+ kmem_cache_free(lio_qr_cache, qr);
+ }
+ spin_unlock_bh(&conn->immed_queue_lock);
+
+ spin_lock_bh(&conn->response_queue_lock);
+ list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
+ qr_list) {
+ list_del(&qr->qr_list);
+ if (qr->cmd)
+ atomic_dec(&qr->cmd->response_queue_count);
+
+ kmem_cache_free(lio_qr_cache, qr);
+ }
+ spin_unlock_bh(&conn->response_queue_lock);
+}
+
+void iscsit_release_cmd(struct iscsi_cmd *cmd)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ int i;
+
+ iscsit_free_r2ts_from_list(cmd);
+ iscsit_free_all_datain_reqs(cmd);
+
+ kfree(cmd->buf_ptr);
+ kfree(cmd->pdu_list);
+ kfree(cmd->seq_list);
+ kfree(cmd->tmr_req);
+ kfree(cmd->iov_data);
+
+ for (i = 0; i < cmd->t_mem_sg_nents; i++)
+ __free_page(sg_page(&cmd->t_mem_sg[i]));
+
+ kfree(cmd->t_mem_sg);
+
+ if (conn) {
+ iscsit_remove_cmd_from_immediate_queue(cmd, conn);
+ iscsit_remove_cmd_from_response_queue(cmd, conn);
+ }
+
+ kmem_cache_free(lio_cmd_cache, cmd);
+}
+
+int iscsit_check_session_usage_count(struct iscsi_session *sess)
+{
+ spin_lock_bh(&sess->session_usage_lock);
+ if (sess->session_usage_count != 0) {
+ sess->session_waiting_on_uc = 1;
+ spin_unlock_bh(&sess->session_usage_lock);
+ if (in_interrupt())
+ return 2;
+
+ wait_for_completion(&sess->session_waiting_on_uc_comp);
+ return 1;
+ }
+ spin_unlock_bh(&sess->session_usage_lock);
+
+ return 0;
+}
+
+void iscsit_dec_session_usage_count(struct iscsi_session *sess)
+{
+ spin_lock_bh(&sess->session_usage_lock);
+ sess->session_usage_count--;
+
+ if (!sess->session_usage_count && sess->session_waiting_on_uc)
+ complete(&sess->session_waiting_on_uc_comp);
+
+ spin_unlock_bh(&sess->session_usage_lock);
+}
+
+void iscsit_inc_session_usage_count(struct iscsi_session *sess)
+{
+ spin_lock_bh(&sess->session_usage_lock);
+ sess->session_usage_count++;
+ spin_unlock_bh(&sess->session_usage_lock);
+}
+
+/*
+ * Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker
+ * array counts needed for sync and steering.
+ */
+static int iscsit_determine_sync_and_steering_counts(
+ struct iscsi_conn *conn,
+ struct iscsi_data_count *count)
+{
+ u32 length = count->data_length;
+ u32 marker, markint;
+
+ count->sync_and_steering = 1;
+
+ marker = (count->type == ISCSI_RX_DATA) ?
+ conn->of_marker : conn->if_marker;
+ markint = (count->type == ISCSI_RX_DATA) ?
+ (conn->conn_ops->OFMarkInt * 4) :
+ (conn->conn_ops->IFMarkInt * 4);
+ count->ss_iov_count = count->iov_count;
+
+ while (length > 0) {
+ if (length >= marker) {
+ count->ss_iov_count += 3;
+ count->ss_marker_count += 2;
+
+ length -= marker;
+ marker = markint;
+ } else
+ length = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * Setup conn->if_marker and conn->of_marker values based upon
+ * the initial marker-less interval. (see iSCSI v19 A.2)
+ */
+int iscsit_set_sync_and_steering_values(struct iscsi_conn *conn)
+{
+ int login_ifmarker_count = 0, login_ofmarker_count = 0, next_marker = 0;
+ /*
+ * IFMarkInt and OFMarkInt are negotiated as 32-bit words.
+ */
+ u32 IFMarkInt = (conn->conn_ops->IFMarkInt * 4);
+ u32 OFMarkInt = (conn->conn_ops->OFMarkInt * 4);
+
+ if (conn->conn_ops->OFMarker) {
+ /*
+ * Account for the first Login Command received not
+ * via iscsi_recv_msg().
+ */
+ conn->of_marker += ISCSI_HDR_LEN;
+ if (conn->of_marker <= OFMarkInt) {
+ conn->of_marker = (OFMarkInt - conn->of_marker);
+ } else {
+ login_ofmarker_count = (conn->of_marker / OFMarkInt);
+ next_marker = (OFMarkInt * (login_ofmarker_count + 1)) +
+ (login_ofmarker_count * MARKER_SIZE);
+ conn->of_marker = (next_marker - conn->of_marker);
+ }
+ conn->of_marker_offset = 0;
+ pr_debug("Setting OFMarker value to %u based on Initial"
+ " Markerless Interval.\n", conn->of_marker);
+ }
+
+ if (conn->conn_ops->IFMarker) {
+ if (conn->if_marker <= IFMarkInt) {
+ conn->if_marker = (IFMarkInt - conn->if_marker);
+ } else {
+ login_ifmarker_count = (conn->if_marker / IFMarkInt);
+ next_marker = (IFMarkInt * (login_ifmarker_count + 1)) +
+ (login_ifmarker_count * MARKER_SIZE);
+ conn->if_marker = (next_marker - conn->if_marker);
+ }
+ pr_debug("Setting IFMarker value to %u based on Initial"
+ " Markerless Interval.\n", conn->if_marker);
+ }
+
+ return 0;
+}
+
+struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
+{
+ struct iscsi_conn *conn;
+
+ spin_lock_bh(&sess->conn_lock);
+ list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+ if ((conn->cid == cid) &&
+ (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) {
+ iscsit_inc_conn_usage_count(conn);
+ spin_unlock_bh(&sess->conn_lock);
+ return conn;
+ }
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ return NULL;
+}
+
+struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid)
+{
+ struct iscsi_conn *conn;
+
+ spin_lock_bh(&sess->conn_lock);
+ list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+ if (conn->cid == cid) {
+ iscsit_inc_conn_usage_count(conn);
+ spin_lock(&conn->state_lock);
+ atomic_set(&conn->connection_wait_rcfr, 1);
+ spin_unlock(&conn->state_lock);
+ spin_unlock_bh(&sess->conn_lock);
+ return conn;
+ }
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ return NULL;
+}
+
+void iscsit_check_conn_usage_count(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->conn_usage_lock);
+ if (conn->conn_usage_count != 0) {
+ conn->conn_waiting_on_uc = 1;
+ spin_unlock_bh(&conn->conn_usage_lock);
+
+ wait_for_completion(&conn->conn_waiting_on_uc_comp);
+ return;
+ }
+ spin_unlock_bh(&conn->conn_usage_lock);
+}
+
+void iscsit_dec_conn_usage_count(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->conn_usage_lock);
+ conn->conn_usage_count--;
+
+ if (!conn->conn_usage_count && conn->conn_waiting_on_uc)
+ complete(&conn->conn_waiting_on_uc_comp);
+
+ spin_unlock_bh(&conn->conn_usage_lock);
+}
+
+void iscsit_inc_conn_usage_count(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->conn_usage_lock);
+ conn->conn_usage_count++;
+ spin_unlock_bh(&conn->conn_usage_lock);
+}
+
+static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
+{
+ u8 state;
+ struct iscsi_cmd *cmd;
+
+ cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC);
+ if (!cmd)
+ return -1;
+
+ cmd->iscsi_opcode = ISCSI_OP_NOOP_IN;
+ state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
+ ISTATE_SEND_NOPIN_NO_RESPONSE;
+ cmd->init_task_tag = 0xFFFFFFFF;
+ spin_lock_bh(&conn->sess->ttt_lock);
+ cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ :
+ 0xFFFFFFFF;
+ if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF))
+ cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
+ spin_unlock_bh(&conn->sess->ttt_lock);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (want_response)
+ iscsit_start_nopin_response_timer(conn);
+ iscsit_add_cmd_to_immediate_queue(cmd, conn, state);
+
+ return 0;
+}
+
+static void iscsit_handle_nopin_response_timeout(unsigned long data)
+{
+ struct iscsi_conn *conn = (struct iscsi_conn *) data;
+
+ iscsit_inc_conn_usage_count(conn);
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ iscsit_dec_conn_usage_count(conn);
+ return;
+ }
+
+ pr_debug("Did not receive response to NOPIN on CID: %hu on"
+ " SID: %u, failing connection.\n", conn->cid,
+ conn->sess->sid);
+ conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
+ spin_unlock_bh(&conn->nopin_timer_lock);
+
+ {
+ struct iscsi_portal_group *tpg = conn->sess->tpg;
+ struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+ if (tiqn) {
+ spin_lock_bh(&tiqn->sess_err_stats.lock);
+ strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
+ (void *)conn->sess->sess_ops->InitiatorName);
+ tiqn->sess_err_stats.last_sess_failure_type =
+ ISCSI_SESS_ERR_CXN_TIMEOUT;
+ tiqn->sess_err_stats.cxn_timeout_errors++;
+ conn->sess->conn_timeout_errors++;
+ spin_unlock_bh(&tiqn->sess_err_stats.lock);
+ }
+ }
+
+ iscsit_cause_connection_reinstatement(conn, 0);
+ iscsit_dec_conn_usage_count(conn);
+}
+
+void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ return;
+ }
+
+ mod_timer(&conn->nopin_response_timer,
+ (get_jiffies_64() + na->nopin_response_timeout * HZ));
+ spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+/*
+ * Called with conn->nopin_timer_lock held.
+ */
+void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ return;
+ }
+
+ init_timer(&conn->nopin_response_timer);
+ conn->nopin_response_timer.expires =
+ (get_jiffies_64() + na->nopin_response_timeout * HZ);
+ conn->nopin_response_timer.data = (unsigned long)conn;
+ conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout;
+ conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP;
+ conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING;
+ add_timer(&conn->nopin_response_timer);
+
+ pr_debug("Started NOPIN Response Timer on CID: %d to %u"
+ " seconds\n", conn->cid, na->nopin_response_timeout);
+ spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ return;
+ }
+ conn->nopin_response_timer_flags |= ISCSI_TF_STOP;
+ spin_unlock_bh(&conn->nopin_timer_lock);
+
+ del_timer_sync(&conn->nopin_response_timer);
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
+ spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+static void iscsit_handle_nopin_timeout(unsigned long data)
+{
+ struct iscsi_conn *conn = (struct iscsi_conn *) data;
+
+ iscsit_inc_conn_usage_count(conn);
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (conn->nopin_timer_flags & ISCSI_TF_STOP) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ iscsit_dec_conn_usage_count(conn);
+ return;
+ }
+ conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
+ spin_unlock_bh(&conn->nopin_timer_lock);
+
+ iscsit_add_nopin(conn, 1);
+ iscsit_dec_conn_usage_count(conn);
+}
+
+/*
+ * Called with conn->nopin_timer_lock held.
+ */
+void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+ /*
+ * NOPIN timeout is disabled.
+ */
+ if (!na->nopin_timeout)
+ return;
+
+ if (conn->nopin_timer_flags & ISCSI_TF_RUNNING)
+ return;
+
+ init_timer(&conn->nopin_timer);
+ conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
+ conn->nopin_timer.data = (unsigned long)conn;
+ conn->nopin_timer.function = iscsit_handle_nopin_timeout;
+ conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
+ conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
+ add_timer(&conn->nopin_timer);
+
+ pr_debug("Started NOPIN Timer on CID: %d at %u second"
+ " interval\n", conn->cid, na->nopin_timeout);
+}
+
+void iscsit_start_nopin_timer(struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+ /*
+ * NOPIN timeout is disabled..
+ */
+ if (!na->nopin_timeout)
+ return;
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ return;
+ }
+
+ init_timer(&conn->nopin_timer);
+ conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
+ conn->nopin_timer.data = (unsigned long)conn;
+ conn->nopin_timer.function = iscsit_handle_nopin_timeout;
+ conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
+ conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
+ add_timer(&conn->nopin_timer);
+
+ pr_debug("Started NOPIN Timer on CID: %d at %u second"
+ " interval\n", conn->cid, na->nopin_timeout);
+ spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+void iscsit_stop_nopin_timer(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ return;
+ }
+ conn->nopin_timer_flags |= ISCSI_TF_STOP;
+ spin_unlock_bh(&conn->nopin_timer_lock);
+
+ del_timer_sync(&conn->nopin_timer);
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
+ spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+int iscsit_send_tx_data(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn,
+ int use_misc)
+{
+ int tx_sent, tx_size;
+ u32 iov_count;
+ struct kvec *iov;
+
+send_data:
+ tx_size = cmd->tx_size;
+
+ if (!use_misc) {
+ iov = &cmd->iov_data[0];
+ iov_count = cmd->iov_data_count;
+ } else {
+ iov = &cmd->iov_misc[0];
+ iov_count = cmd->iov_misc_count;
+ }
+
+ tx_sent = tx_data(conn, &iov[0], iov_count, tx_size);
+ if (tx_size != tx_sent) {
+ if (tx_sent == -EAGAIN) {
+ pr_err("tx_data() returned -EAGAIN\n");
+ goto send_data;
+ } else
+ return -1;
+ }
+ cmd->tx_size = 0;
+
+ return 0;
+}
+
+int iscsit_fe_sendpage_sg(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct scatterlist *sg = cmd->first_data_sg;
+ struct kvec iov;
+ u32 tx_hdr_size, data_len;
+ u32 offset = cmd->first_data_sg_off;
+ int tx_sent;
+
+send_hdr:
+ tx_hdr_size = ISCSI_HDR_LEN;
+ if (conn->conn_ops->HeaderDigest)
+ tx_hdr_size += ISCSI_CRC_LEN;
+
+ iov.iov_base = cmd->pdu;
+ iov.iov_len = tx_hdr_size;
+
+ tx_sent = tx_data(conn, &iov, 1, tx_hdr_size);
+ if (tx_hdr_size != tx_sent) {
+ if (tx_sent == -EAGAIN) {
+ pr_err("tx_data() returned -EAGAIN\n");
+ goto send_hdr;
+ }
+ return -1;
+ }
+
+ data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
+ if (conn->conn_ops->DataDigest)
+ data_len -= ISCSI_CRC_LEN;
+
+ /*
+ * Perform sendpage() for each page in the scatterlist
+ */
+ while (data_len) {
+ u32 space = (sg->length - offset);
+ u32 sub_len = min_t(u32, data_len, space);
+send_pg:
+ tx_sent = conn->sock->ops->sendpage(conn->sock,
+ sg_page(sg), sg->offset + offset, sub_len, 0);
+ if (tx_sent != sub_len) {
+ if (tx_sent == -EAGAIN) {
+ pr_err("tcp_sendpage() returned"
+ " -EAGAIN\n");
+ goto send_pg;
+ }
+
+ pr_err("tcp_sendpage() failure: %d\n",
+ tx_sent);
+ return -1;
+ }
+
+ data_len -= sub_len;
+ offset = 0;
+ sg = sg_next(sg);
+ }
+
+send_padding:
+ if (cmd->padding) {
+ struct kvec *iov_p =
+ &cmd->iov_data[cmd->iov_data_count-1];
+
+ tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
+ if (cmd->padding != tx_sent) {
+ if (tx_sent == -EAGAIN) {
+ pr_err("tx_data() returned -EAGAIN\n");
+ goto send_padding;
+ }
+ return -1;
+ }
+ }
+
+send_datacrc:
+ if (conn->conn_ops->DataDigest) {
+ struct kvec *iov_d =
+ &cmd->iov_data[cmd->iov_data_count];
+
+ tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
+ if (ISCSI_CRC_LEN != tx_sent) {
+ if (tx_sent == -EAGAIN) {
+ pr_err("tx_data() returned -EAGAIN\n");
+ goto send_datacrc;
+ }
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU
+ * back to the Initiator when an expection condition occurs with the
+ * errors set in status_class and status_detail.
+ *
+ * Parameters: iSCSI Connection, Status Class, Status Detail.
+ * Returns: 0 on success, -1 on error.
+ */
+int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail)
+{
+ u8 iscsi_hdr[ISCSI_HDR_LEN];
+ int err;
+ struct kvec iov;
+ struct iscsi_login_rsp *hdr;
+
+ iscsit_collect_login_stats(conn, status_class, status_detail);
+
+ memset(&iov, 0, sizeof(struct kvec));
+ memset(&iscsi_hdr, 0x0, ISCSI_HDR_LEN);
+
+ hdr = (struct iscsi_login_rsp *)&iscsi_hdr;
+ hdr->opcode = ISCSI_OP_LOGIN_RSP;
+ hdr->status_class = status_class;
+ hdr->status_detail = status_detail;
+ hdr->itt = cpu_to_be32(conn->login_itt);
+
+ iov.iov_base = &iscsi_hdr;
+ iov.iov_len = ISCSI_HDR_LEN;
+
+ PRINT_BUFF(iscsi_hdr, ISCSI_HDR_LEN);
+
+ err = tx_data(conn, &iov, 1, ISCSI_HDR_LEN);
+ if (err != ISCSI_HDR_LEN) {
+ pr_err("tx_data returned less than expected\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+void iscsit_print_session_params(struct iscsi_session *sess)
+{
+ struct iscsi_conn *conn;
+
+ pr_debug("-----------------------------[Session Params for"
+ " SID: %u]-----------------------------\n", sess->sid);
+ spin_lock_bh(&sess->conn_lock);
+ list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
+ iscsi_dump_conn_ops(conn->conn_ops);
+ spin_unlock_bh(&sess->conn_lock);
+
+ iscsi_dump_sess_ops(sess->sess_ops);
+}
+
+static int iscsit_do_rx_data(
+ struct iscsi_conn *conn,
+ struct iscsi_data_count *count)
+{
+ int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
+ u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0;
+ struct kvec iov[count->ss_iov_count], *iov_p;
+ struct msghdr msg;
+
+ if (!conn || !conn->sock || !conn->conn_ops)
+ return -1;
+
+ memset(&msg, 0, sizeof(struct msghdr));
+
+ if (count->sync_and_steering) {
+ int size = 0;
+ u32 i, orig_iov_count = 0;
+ u32 orig_iov_len = 0, orig_iov_loc = 0;
+ u32 iov_count = 0, per_iov_bytes = 0;
+ u32 *rx_marker, old_rx_marker = 0;
+ struct kvec *iov_record;
+
+ memset(&rx_marker_val, 0,
+ count->ss_marker_count * sizeof(u32));
+ memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
+
+ iov_record = count->iov;
+ orig_iov_count = count->iov_count;
+ rx_marker = &conn->of_marker;
+
+ i = 0;
+ size = data;
+ orig_iov_len = iov_record[orig_iov_loc].iov_len;
+ while (size > 0) {
+ pr_debug("rx_data: #1 orig_iov_len %u,"
+ " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
+ pr_debug("rx_data: #2 rx_marker %u, size"
+ " %u\n", *rx_marker, size);
+
+ if (orig_iov_len >= *rx_marker) {
+ iov[iov_count].iov_len = *rx_marker;
+ iov[iov_count++].iov_base =
+ (iov_record[orig_iov_loc].iov_base +
+ per_iov_bytes);
+
+ iov[iov_count].iov_len = (MARKER_SIZE / 2);
+ iov[iov_count++].iov_base =
+ &rx_marker_val[rx_marker_iov++];
+ iov[iov_count].iov_len = (MARKER_SIZE / 2);
+ iov[iov_count++].iov_base =
+ &rx_marker_val[rx_marker_iov++];
+ old_rx_marker = *rx_marker;
+
+ /*
+ * OFMarkInt is in 32-bit words.
+ */
+ *rx_marker = (conn->conn_ops->OFMarkInt * 4);
+ size -= old_rx_marker;
+ orig_iov_len -= old_rx_marker;
+ per_iov_bytes += old_rx_marker;
+
+ pr_debug("rx_data: #3 new_rx_marker"
+ " %u, size %u\n", *rx_marker, size);
+ } else {
+ iov[iov_count].iov_len = orig_iov_len;
+ iov[iov_count++].iov_base =
+ (iov_record[orig_iov_loc].iov_base +
+ per_iov_bytes);
+
+ per_iov_bytes = 0;
+ *rx_marker -= orig_iov_len;
+ size -= orig_iov_len;
+
+ if (size)
+ orig_iov_len =
+ iov_record[++orig_iov_loc].iov_len;
+
+ pr_debug("rx_data: #4 new_rx_marker"
+ " %u, size %u\n", *rx_marker, size);
+ }
+ }
+ data += (rx_marker_iov * (MARKER_SIZE / 2));
+
+ iov_p = &iov[0];
+ iov_len = iov_count;
+
+ if (iov_count > count->ss_iov_count) {
+ pr_err("iov_count: %d, count->ss_iov_count:"
+ " %d\n", iov_count, count->ss_iov_count);
+ return -1;
+ }
+ if (rx_marker_iov > count->ss_marker_count) {
+ pr_err("rx_marker_iov: %d, count->ss_marker"
+ "_count: %d\n", rx_marker_iov,
+ count->ss_marker_count);
+ return -1;
+ }
+ } else {
+ iov_p = count->iov;
+ iov_len = count->iov_count;
+ }
+
+ while (total_rx < data) {
+ rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
+ (data - total_rx), MSG_WAITALL);
+ if (rx_loop <= 0) {
+ pr_debug("rx_loop: %d total_rx: %d\n",
+ rx_loop, total_rx);
+ return rx_loop;
+ }
+ total_rx += rx_loop;
+ pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
+ rx_loop, total_rx, data);
+ }
+
+ if (count->sync_and_steering) {
+ int j;
+ for (j = 0; j < rx_marker_iov; j++) {
+ pr_debug("rx_data: #5 j: %d, offset: %d\n",
+ j, rx_marker_val[j]);
+ conn->of_marker_offset = rx_marker_val[j];
+ }
+ total_rx -= (rx_marker_iov * (MARKER_SIZE / 2));
+ }
+
+ return total_rx;
+}
+
+static int iscsit_do_tx_data(
+ struct iscsi_conn *conn,
+ struct iscsi_data_count *count)
+{
+ int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
+ u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0;
+ struct kvec iov[count->ss_iov_count], *iov_p;
+ struct msghdr msg;
+
+ if (!conn || !conn->sock || !conn->conn_ops)
+ return -1;
+
+ if (data <= 0) {
+ pr_err("Data length is: %d\n", data);
+ return -1;
+ }
+
+ memset(&msg, 0, sizeof(struct msghdr));
+
+ if (count->sync_and_steering) {
+ int size = 0;
+ u32 i, orig_iov_count = 0;
+ u32 orig_iov_len = 0, orig_iov_loc = 0;
+ u32 iov_count = 0, per_iov_bytes = 0;
+ u32 *tx_marker, old_tx_marker = 0;
+ struct kvec *iov_record;
+
+ memset(&tx_marker_val, 0,
+ count->ss_marker_count * sizeof(u32));
+ memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
+
+ iov_record = count->iov;
+ orig_iov_count = count->iov_count;
+ tx_marker = &conn->if_marker;
+
+ i = 0;
+ size = data;
+ orig_iov_len = iov_record[orig_iov_loc].iov_len;
+ while (size > 0) {
+ pr_debug("tx_data: #1 orig_iov_len %u,"
+ " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
+ pr_debug("tx_data: #2 tx_marker %u, size"
+ " %u\n", *tx_marker, size);
+
+ if (orig_iov_len >= *tx_marker) {
+ iov[iov_count].iov_len = *tx_marker;
+ iov[iov_count++].iov_base =
+ (iov_record[orig_iov_loc].iov_base +
+ per_iov_bytes);
+
+ tx_marker_val[tx_marker_iov] =
+ (size - *tx_marker);
+ iov[iov_count].iov_len = (MARKER_SIZE / 2);
+ iov[iov_count++].iov_base =
+ &tx_marker_val[tx_marker_iov++];
+ iov[iov_count].iov_len = (MARKER_SIZE / 2);
+ iov[iov_count++].iov_base =
+ &tx_marker_val[tx_marker_iov++];
+ old_tx_marker = *tx_marker;
+
+ /*
+ * IFMarkInt is in 32-bit words.
+ */
+ *tx_marker = (conn->conn_ops->IFMarkInt * 4);
+ size -= old_tx_marker;
+ orig_iov_len -= old_tx_marker;
+ per_iov_bytes += old_tx_marker;
+
+ pr_debug("tx_data: #3 new_tx_marker"
+ " %u, size %u\n", *tx_marker, size);
+ pr_debug("tx_data: #4 offset %u\n",
+ tx_marker_val[tx_marker_iov-1]);
+ } else {
+ iov[iov_count].iov_len = orig_iov_len;
+ iov[iov_count++].iov_base
+ = (iov_record[orig_iov_loc].iov_base +
+ per_iov_bytes);
+
+ per_iov_bytes = 0;
+ *tx_marker -= orig_iov_len;
+ size -= orig_iov_len;
+
+ if (size)
+ orig_iov_len =
+ iov_record[++orig_iov_loc].iov_len;
+
+ pr_debug("tx_data: #5 new_tx_marker"
+ " %u, size %u\n", *tx_marker, size);
+ }
+ }
+
+ data += (tx_marker_iov * (MARKER_SIZE / 2));
+
+ iov_p = &iov[0];
+ iov_len = iov_count;
+
+ if (iov_count > count->ss_iov_count) {
+ pr_err("iov_count: %d, count->ss_iov_count:"
+ " %d\n", iov_count, count->ss_iov_count);
+ return -1;
+ }
+ if (tx_marker_iov > count->ss_marker_count) {
+ pr_err("tx_marker_iov: %d, count->ss_marker"
+ "_count: %d\n", tx_marker_iov,
+ count->ss_marker_count);
+ return -1;
+ }
+ } else {
+ iov_p = count->iov;
+ iov_len = count->iov_count;
+ }
+
+ while (total_tx < data) {
+ tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
+ (data - total_tx));
+ if (tx_loop <= 0) {
+ pr_debug("tx_loop: %d total_tx %d\n",
+ tx_loop, total_tx);
+ return tx_loop;
+ }
+ total_tx += tx_loop;
+ pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
+ tx_loop, total_tx, data);
+ }
+
+ if (count->sync_and_steering)
+ total_tx -= (tx_marker_iov * (MARKER_SIZE / 2));
+
+ return total_tx;
+}
+
+int rx_data(
+ struct iscsi_conn *conn,
+ struct kvec *iov,
+ int iov_count,
+ int data)
+{
+ struct iscsi_data_count c;
+
+ if (!conn || !conn->sock || !conn->conn_ops)
+ return -1;
+
+ memset(&c, 0, sizeof(struct iscsi_data_count));
+ c.iov = iov;
+ c.iov_count = iov_count;
+ c.data_length = data;
+ c.type = ISCSI_RX_DATA;
+
+ if (conn->conn_ops->OFMarker &&
+ (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
+ if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
+ return -1;
+ }
+
+ return iscsit_do_rx_data(conn, &c);
+}
+
+int tx_data(
+ struct iscsi_conn *conn,
+ struct kvec *iov,
+ int iov_count,
+ int data)
+{
+ struct iscsi_data_count c;
+
+ if (!conn || !conn->sock || !conn->conn_ops)
+ return -1;
+
+ memset(&c, 0, sizeof(struct iscsi_data_count));
+ c.iov = iov;
+ c.iov_count = iov_count;
+ c.data_length = data;
+ c.type = ISCSI_TX_DATA;
+
+ if (conn->conn_ops->IFMarker &&
+ (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
+ if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
+ return -1;
+ }
+
+ return iscsit_do_tx_data(conn, &c);
+}
+
+void iscsit_collect_login_stats(
+ struct iscsi_conn *conn,
+ u8 status_class,
+ u8 status_detail)
+{
+ struct iscsi_param *intrname = NULL;
+ struct iscsi_tiqn *tiqn;
+ struct iscsi_login_stats *ls;
+
+ tiqn = iscsit_snmp_get_tiqn(conn);
+ if (!tiqn)
+ return;
+
+ ls = &tiqn->login_stats;
+
+ spin_lock(&ls->lock);
+ if (!strcmp(conn->login_ip, ls->last_intr_fail_ip_addr) &&
+ ((get_jiffies_64() - ls->last_fail_time) < 10)) {
+ /* We already have the failure info for this login */
+ spin_unlock(&ls->lock);
+ return;
+ }
+
+ if (status_class == ISCSI_STATUS_CLS_SUCCESS)
+ ls->accepts++;
+ else if (status_class == ISCSI_STATUS_CLS_REDIRECT) {
+ ls->redirects++;
+ ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT;
+ } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
+ (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) {
+ ls->authenticate_fails++;
+ ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHENTICATE;
+ } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
+ (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) {
+ ls->authorize_fails++;
+ ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE;
+ } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
+ (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) {
+ ls->negotiate_fails++;
+ ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE;
+ } else {
+ ls->other_fails++;
+ ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER;
+ }
+
+ /* Save initiator name, ip address and time, if it is a failed login */
+ if (status_class != ISCSI_STATUS_CLS_SUCCESS) {
+ if (conn->param_list)
+ intrname = iscsi_find_param_from_key(INITIATORNAME,
+ conn->param_list);
+ strcpy(ls->last_intr_fail_name,
+ (intrname ? intrname->value : "Unknown"));
+
+ ls->last_intr_fail_ip_family = conn->sock->sk->sk_family;
+ snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE,
+ "%s", conn->login_ip);
+ ls->last_fail_time = get_jiffies_64();
+ }
+
+ spin_unlock(&ls->lock);
+}
+
+struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
+{
+ struct iscsi_portal_group *tpg;
+
+ if (!conn || !conn->sess)
+ return NULL;
+
+ tpg = conn->sess->tpg;
+ if (!tpg)
+ return NULL;
+
+ if (!tpg->tpg_tiqn)
+ return NULL;
+
+ return tpg->tpg_tiqn;
+}
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
new file mode 100644
index 0000000..2cd49d6
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -0,0 +1,60 @@
+#ifndef ISCSI_TARGET_UTIL_H
+#define ISCSI_TARGET_UTIL_H
+
+#define MARKER_SIZE 8
+
+extern int iscsit_add_r2t_to_list(struct iscsi_cmd *, u32, u32, int, u32);
+extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsi_cmd *, u32, u32);
+extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
+extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
+extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
+extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
+extern struct iscsi_cmd *iscsit_allocate_se_cmd(struct iscsi_conn *, u32, int, int);
+extern struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(struct iscsi_conn *, u8);
+extern int iscsit_decide_list_to_build(struct iscsi_cmd *, u32);
+extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
+extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
+extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
+int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, u32 cmdsn);
+extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
+extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, u32);
+extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
+ u32, u32);
+extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32);
+extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd **,
+ struct iscsi_conn_recovery **, u32);
+extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
+extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
+extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
+extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
+extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
+extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
+extern void iscsit_release_cmd(struct iscsi_cmd *);
+extern int iscsit_check_session_usage_count(struct iscsi_session *);
+extern void iscsit_dec_session_usage_count(struct iscsi_session *);
+extern void iscsit_inc_session_usage_count(struct iscsi_session *);
+extern int iscsit_set_sync_and_steering_values(struct iscsi_conn *);
+extern struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *, u16);
+extern struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *, u16);
+extern void iscsit_check_conn_usage_count(struct iscsi_conn *);
+extern void iscsit_dec_conn_usage_count(struct iscsi_conn *);
+extern void iscsit_inc_conn_usage_count(struct iscsi_conn *);
+extern void iscsit_mod_nopin_response_timer(struct iscsi_conn *);
+extern void iscsit_start_nopin_response_timer(struct iscsi_conn *);
+extern void iscsit_stop_nopin_response_timer(struct iscsi_conn *);
+extern void __iscsit_start_nopin_timer(struct iscsi_conn *);
+extern void iscsit_start_nopin_timer(struct iscsi_conn *);
+extern void iscsit_stop_nopin_timer(struct iscsi_conn *);
+extern int iscsit_send_tx_data(struct iscsi_cmd *, struct iscsi_conn *, int);
+extern int iscsit_fe_sendpage_sg(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_tx_login_rsp(struct iscsi_conn *, u8, u8);
+extern void iscsit_print_session_params(struct iscsi_session *);
+extern int iscsit_print_dev_to_proc(char *, char **, off_t, int);
+extern int iscsit_print_sessions_to_proc(char *, char **, off_t, int);
+extern int iscsit_print_tpg_to_proc(char *, char **, off_t, int);
+extern int rx_data(struct iscsi_conn *, struct kvec *, int, int);
+extern int tx_data(struct iscsi_conn *, struct kvec *, int, int);
+extern void iscsit_collect_login_stats(struct iscsi_conn *, u8, u8);
+extern struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *);
+
+#endif /*** ISCSI_TARGET_UTIL_H ***/
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 8ae09a1..89ae923 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -67,6 +67,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
{
struct se_lun *lun = cmd->se_lun;
struct se_device *dev = cmd->se_dev;
+ struct se_portal_group *tpg = lun->lun_sep->sep_tpg;
unsigned char *buf;
/*
@@ -81,9 +82,13 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
buf = transport_kmap_first_data_page(cmd);
- buf[0] = dev->transport->get_device_type(dev);
- if (buf[0] == TYPE_TAPE)
- buf[1] = 0x80;
+ if (dev == tpg->tpg_virt_lun0.lun_se_dev) {
+ buf[0] = 0x3f; /* Not connected */
+ } else {
+ buf[0] = dev->transport->get_device_type(dev);
+ if (buf[0] == TYPE_TAPE)
+ buf[1] = 0x80;
+ }
buf[2] = dev->transport->get_device_rev(dev);
/*
@@ -915,8 +920,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
length += target_modesense_control(dev, &buf[offset+length]);
break;
default:
- pr_err("Got Unknown Mode Page: 0x%02x\n",
- cdb[2] & 0x3f);
+ pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
+ cdb[2] & 0x3f, cdb[3]);
return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
}
offset += length;
@@ -1072,8 +1077,6 @@ target_emulate_unmap(struct se_task *task)
size -= 16;
}
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
err:
transport_kunmap_first_data_page(cmd);
@@ -1085,24 +1088,17 @@ err:
* Note this is not used for TCM/pSCSI passthrough
*/
static int
-target_emulate_write_same(struct se_task *task, int write_same32)
+target_emulate_write_same(struct se_task *task, u32 num_blocks)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
sector_t range;
sector_t lba = cmd->t_task_lba;
- unsigned int num_blocks;
int ret;
/*
- * Extract num_blocks from the WRITE_SAME_* CDB. Then use the explict
- * range when non zero is supplied, otherwise calculate the remaining
- * range based on ->get_blocks() - starting LBA.
+ * Use the explicit range when non zero is supplied, otherwise calculate
+ * the remaining range based on ->get_blocks() - starting LBA.
*/
- if (write_same32)
- num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
- else
- num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
-
if (num_blocks != 0)
range = num_blocks;
else
@@ -1117,8 +1113,6 @@ target_emulate_write_same(struct se_task *task, int write_same32)
return ret;
}
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
return 0;
}
@@ -1165,13 +1159,23 @@ transport_emulate_control_cdb(struct se_task *task)
}
ret = target_emulate_unmap(task);
break;
+ case WRITE_SAME:
+ if (!dev->transport->do_discard) {
+ pr_err("WRITE_SAME emulation not supported"
+ " for: %s\n", dev->transport->name);
+ return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ }
+ ret = target_emulate_write_same(task,
+ get_unaligned_be16(&cmd->t_task_cdb[7]));
+ break;
case WRITE_SAME_16:
if (!dev->transport->do_discard) {
pr_err("WRITE_SAME_16 emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
- ret = target_emulate_write_same(task, 0);
+ ret = target_emulate_write_same(task,
+ get_unaligned_be32(&cmd->t_task_cdb[10]));
break;
case VARIABLE_LENGTH_CMD:
service_action =
@@ -1184,7 +1188,8 @@ transport_emulate_control_cdb(struct se_task *task)
dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
- ret = target_emulate_write_same(task, 1);
+ ret = target_emulate_write_same(task,
+ get_unaligned_be32(&cmd->t_task_cdb[28]));
break;
default:
pr_err("Unsupported VARIABLE_LENGTH_CMD SA:"
@@ -1219,8 +1224,14 @@ transport_emulate_control_cdb(struct se_task *task)
if (ret < 0)
return ret;
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
+ /*
+ * Handle the successful completion here unless a caller
+ * has explictly requested an asychronous completion.
+ */
+ if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+ }
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index b38b6c9..ca6e4a4 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -472,9 +472,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
struct se_dev_entry *deve;
u32 i;
- spin_lock_bh(&tpg->acl_node_lock);
+ spin_lock_irq(&tpg->acl_node_lock);
list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
- spin_unlock_bh(&tpg->acl_node_lock);
+ spin_unlock_irq(&tpg->acl_node_lock);
spin_lock_irq(&nacl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
@@ -491,9 +491,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
}
spin_unlock_irq(&nacl->device_list_lock);
- spin_lock_bh(&tpg->acl_node_lock);
+ spin_lock_irq(&tpg->acl_node_lock);
}
- spin_unlock_bh(&tpg->acl_node_lock);
+ spin_unlock_irq(&tpg->acl_node_lock);
}
static struct se_port *core_alloc_port(struct se_device *dev)
@@ -839,6 +839,24 @@ int se_dev_check_shutdown(struct se_device *dev)
return ret;
}
+u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
+{
+ u32 tmp, aligned_max_sectors;
+ /*
+ * Limit max_sectors to a PAGE_SIZE aligned value for modern
+ * transport_allocate_data_tasks() operation.
+ */
+ tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
+ aligned_max_sectors = (tmp / block_size);
+ if (max_sectors != aligned_max_sectors) {
+ printk(KERN_INFO "Rounding down aligned max_sectors from %u"
+ " to %u\n", max_sectors, aligned_max_sectors);
+ return aligned_max_sectors;
+ }
+
+ return max_sectors;
+}
+
void se_dev_set_default_attribs(
struct se_device *dev,
struct se_dev_limits *dev_limits)
@@ -878,6 +896,11 @@ void se_dev_set_default_attribs(
* max_sectors is based on subsystem plugin dependent requirements.
*/
dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
+ /*
+ * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
+ */
+ limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
+ limits->logical_block_size);
dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
/*
* Set optimal_sectors from max_sectors, which can be lowered via
@@ -1242,6 +1265,11 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
return -EINVAL;
}
}
+ /*
+ * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
+ */
+ max_sectors = se_dev_align_max_sectors(max_sectors,
+ dev->se_sub_dev->se_dev_attrib.block_size);
dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
@@ -1344,15 +1372,17 @@ struct se_lun *core_dev_add_lun(
*/
if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
struct se_node_acl *acl;
- spin_lock_bh(&tpg->acl_node_lock);
+ spin_lock_irq(&tpg->acl_node_lock);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
- if (acl->dynamic_node_acl) {
- spin_unlock_bh(&tpg->acl_node_lock);
+ if (acl->dynamic_node_acl &&
+ (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
+ !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
+ spin_unlock_irq(&tpg->acl_node_lock);
core_tpg_add_node_to_devs(acl, tpg);
- spin_lock_bh(&tpg->acl_node_lock);
+ spin_lock_irq(&tpg->acl_node_lock);
}
}
- spin_unlock_bh(&tpg->acl_node_lock);
+ spin_unlock_irq(&tpg->acl_node_lock);
}
return lun_p;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 3ba7512..09b6f87 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -480,7 +480,7 @@ static struct config_group *target_fabric_make_nodeacl(
se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
if (IS_ERR(se_nacl))
- return ERR_PTR(PTR_ERR(se_nacl));
+ return ERR_CAST(se_nacl);
nacl_cg = &se_nacl->acl_group;
nacl_cg->default_groups = se_nacl->acl_default_groups;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index c05d93b..0c4f783 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1597,14 +1597,14 @@ static int core_scsi3_decode_spec_i_port(
* from the decoded fabric module specific TransportID
* at *i_str.
*/
- spin_lock_bh(&tmp_tpg->acl_node_lock);
+ spin_lock_irq(&tmp_tpg->acl_node_lock);
dest_node_acl = __core_tpg_get_initiator_node_acl(
tmp_tpg, i_str);
if (dest_node_acl) {
atomic_inc(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_inc();
}
- spin_unlock_bh(&tmp_tpg->acl_node_lock);
+ spin_unlock_irq(&tmp_tpg->acl_node_lock);
if (!dest_node_acl) {
core_scsi3_tpg_undepend_item(tmp_tpg);
@@ -3495,14 +3495,14 @@ after_iport_check:
/*
* Locate the destination struct se_node_acl from the received Transport ID
*/
- spin_lock_bh(&dest_se_tpg->acl_node_lock);
+ spin_lock_irq(&dest_se_tpg->acl_node_lock);
dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
initiator_str);
if (dest_node_acl) {
atomic_inc(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_inc();
}
- spin_unlock_bh(&dest_se_tpg->acl_node_lock);
+ spin_unlock_irq(&dest_se_tpg->acl_node_lock);
if (!dest_node_acl) {
pr_err("Unable to locate %s dest_node_acl for"
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index b43cc405..1ab69f3 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -389,12 +389,10 @@ static int rd_MEMCPY_read(struct rd_request *req)
length = req->rd_size;
dst = sg_virt(&sg_d[i++]) + dst_offset;
- if (!dst)
- BUG();
+ BUG_ON(!dst);
src = sg_virt(&sg_s[j]) + src_offset;
- if (!src)
- BUG();
+ BUG_ON(!src);
dst_offset = 0;
src_offset = length;
@@ -414,8 +412,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
length = req->rd_size;
dst = sg_virt(&sg_d[i]) + dst_offset;
- if (!dst)
- BUG();
+ BUG_ON(!dst);
if (sg_d[i].length == length) {
i++;
@@ -424,8 +421,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
dst_offset = length;
src = sg_virt(&sg_s[j++]) + src_offset;
- if (!src)
- BUG();
+ BUG_ON(!src);
src_offset = 0;
page_end = 1;
@@ -509,12 +505,10 @@ static int rd_MEMCPY_write(struct rd_request *req)
length = req->rd_size;
src = sg_virt(&sg_s[i++]) + src_offset;
- if (!src)
- BUG();
+ BUG_ON(!src);
dst = sg_virt(&sg_d[j]) + dst_offset;
- if (!dst)
- BUG();
+ BUG_ON(!dst);
src_offset = 0;
dst_offset = length;
@@ -534,8 +528,7 @@ static int rd_MEMCPY_write(struct rd_request *req)
length = req->rd_size;
src = sg_virt(&sg_s[i]) + src_offset;
- if (!src)
- BUG();
+ BUG_ON(!src);
if (sg_s[i].length == length) {
i++;
@@ -544,8 +537,7 @@ static int rd_MEMCPY_write(struct rd_request *req)
src_offset = length;
dst = sg_virt(&sg_d[j++]) + dst_offset;
- if (!dst)
- BUG();
+ BUG_ON(!dst);
dst_offset = 0;
page_end = 1;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 4f1ba4c..162b736 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -137,15 +137,15 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
{
struct se_node_acl *acl;
- spin_lock_bh(&tpg->acl_node_lock);
+ spin_lock_irq(&tpg->acl_node_lock);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (!strcmp(acl->initiatorname, initiatorname) &&
!acl->dynamic_node_acl) {
- spin_unlock_bh(&tpg->acl_node_lock);
+ spin_unlock_irq(&tpg->acl_node_lock);
return acl;
}
}
- spin_unlock_bh(&tpg->acl_node_lock);
+ spin_unlock_irq(&tpg->acl_node_lock);
return NULL;
}
@@ -298,13 +298,21 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
return NULL;
}
+ /*
+ * Here we only create demo-mode MappedLUNs from the active
+ * TPG LUNs if the fabric is not explictly asking for
+ * tpg_check_demo_mode_login_only() == 1.
+ */
+ if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
+ (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
+ do { ; } while (0);
+ else
+ core_tpg_add_node_to_devs(acl, tpg);
- core_tpg_add_node_to_devs(acl, tpg);
-
- spin_lock_bh(&tpg->acl_node_lock);
+ spin_lock_irq(&tpg->acl_node_lock);
list_add_tail(&acl->acl_list, &tpg->acl_node_list);
tpg->num_node_acls++;
- spin_unlock_bh(&tpg->acl_node_lock);
+ spin_unlock_irq(&tpg->acl_node_lock);
pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
@@ -354,7 +362,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
{
struct se_node_acl *acl = NULL;
- spin_lock_bh(&tpg->acl_node_lock);
+ spin_lock_irq(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (acl) {
if (acl->dynamic_node_acl) {
@@ -362,7 +370,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
- spin_unlock_bh(&tpg->acl_node_lock);
+ spin_unlock_irq(&tpg->acl_node_lock);
/*
* Release the locally allocated struct se_node_acl
* because * core_tpg_add_initiator_node_acl() returned
@@ -378,10 +386,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
" Node %s already exists for TPG %u, ignoring"
" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock_bh(&tpg->acl_node_lock);
+ spin_unlock_irq(&tpg->acl_node_lock);
return ERR_PTR(-EEXIST);
}
- spin_unlock_bh(&tpg->acl_node_lock);
+ spin_unlock_irq(&tpg->acl_node_lock);
if (!se_nacl) {
pr_err("struct se_node_acl pointer is NULL\n");
@@ -418,10 +426,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
return ERR_PTR(-EINVAL);
}
- spin_lock_bh(&tpg->acl_node_lock);
+ spin_lock_irq(&tpg->acl_node_lock);
list_add_tail(&acl->acl_list, &tpg->acl_node_list);
tpg->num_node_acls++;
- spin_unlock_bh(&tpg->acl_node_lock);
+ spin_unlock_irq(&tpg->acl_node_lock);
done:
pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
@@ -445,14 +453,14 @@ int core_tpg_del_initiator_node_acl(
struct se_session *sess, *sess_tmp;
int dynamic_acl = 0;
- spin_lock_bh(&tpg->acl_node_lock);
+ spin_lock_irq(&tpg->acl_node_lock);
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
dynamic_acl = 1;
}
list_del(&acl->acl_list);
tpg->num_node_acls--;
- spin_unlock_bh(&tpg->acl_node_lock);
+ spin_unlock_irq(&tpg->acl_node_lock);
spin_lock_bh(&tpg->session_lock);
list_for_each_entry_safe(sess, sess_tmp,
@@ -503,21 +511,21 @@ int core_tpg_set_initiator_node_queue_depth(
struct se_node_acl *acl;
int dynamic_acl = 0;
- spin_lock_bh(&tpg->acl_node_lock);
+ spin_lock_irq(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (!acl) {
pr_err("Access Control List entry for %s Initiator"
" Node %s does not exists for TPG %hu, ignoring"
" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock_bh(&tpg->acl_node_lock);
+ spin_unlock_irq(&tpg->acl_node_lock);
return -ENODEV;
}
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
dynamic_acl = 1;
}
- spin_unlock_bh(&tpg->acl_node_lock);
+ spin_unlock_irq(&tpg->acl_node_lock);
spin_lock_bh(&tpg->session_lock);
list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
@@ -533,10 +541,10 @@ int core_tpg_set_initiator_node_queue_depth(
tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
spin_unlock_bh(&tpg->session_lock);
- spin_lock_bh(&tpg->acl_node_lock);
+ spin_lock_irq(&tpg->acl_node_lock);
if (dynamic_acl)
acl->dynamic_node_acl = 1;
- spin_unlock_bh(&tpg->acl_node_lock);
+ spin_unlock_irq(&tpg->acl_node_lock);
return -EEXIST;
}
/*
@@ -571,10 +579,10 @@ int core_tpg_set_initiator_node_queue_depth(
if (init_sess)
tpg->se_tpg_tfo->close_session(init_sess);
- spin_lock_bh(&tpg->acl_node_lock);
+ spin_lock_irq(&tpg->acl_node_lock);
if (dynamic_acl)
acl->dynamic_node_acl = 1;
- spin_unlock_bh(&tpg->acl_node_lock);
+ spin_unlock_irq(&tpg->acl_node_lock);
return -EINVAL;
}
spin_unlock_bh(&tpg->session_lock);
@@ -590,10 +598,10 @@ int core_tpg_set_initiator_node_queue_depth(
initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_lock_bh(&tpg->acl_node_lock);
+ spin_lock_irq(&tpg->acl_node_lock);
if (dynamic_acl)
acl->dynamic_node_acl = 1;
- spin_unlock_bh(&tpg->acl_node_lock);
+ spin_unlock_irq(&tpg->acl_node_lock);
return 0;
}
@@ -717,20 +725,20 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
* not been released because of TFO->tpg_check_demo_mode_cache() == 1
* in transport_deregister_session().
*/
- spin_lock_bh(&se_tpg->acl_node_lock);
+ spin_lock_irq(&se_tpg->acl_node_lock);
list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
acl_list) {
list_del(&nacl->acl_list);
se_tpg->num_node_acls--;
- spin_unlock_bh(&se_tpg->acl_node_lock);
+ spin_unlock_irq(&se_tpg->acl_node_lock);
core_tpg_wait_for_nacl_pr_ref(nacl);
core_free_device_list_for_node(nacl, se_tpg);
se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
- spin_lock_bh(&se_tpg->acl_node_lock);
+ spin_lock_irq(&se_tpg->acl_node_lock);
}
- spin_unlock_bh(&se_tpg->acl_node_lock);
+ spin_unlock_irq(&se_tpg->acl_node_lock);
if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
core_tpg_release_virtual_lun0(se_tpg);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index c6c49b1..0304e76 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -388,17 +388,18 @@ void transport_deregister_session(struct se_session *se_sess)
{
struct se_portal_group *se_tpg = se_sess->se_tpg;
struct se_node_acl *se_nacl;
+ unsigned long flags;
if (!se_tpg) {
transport_free_session(se_sess);
return;
}
- spin_lock_bh(&se_tpg->session_lock);
+ spin_lock_irqsave(&se_tpg->session_lock, flags);
list_del(&se_sess->sess_list);
se_sess->se_tpg = NULL;
se_sess->fabric_sess_ptr = NULL;
- spin_unlock_bh(&se_tpg->session_lock);
+ spin_unlock_irqrestore(&se_tpg->session_lock, flags);
/*
* Determine if we need to do extra work for this initiator node's
@@ -406,22 +407,22 @@ void transport_deregister_session(struct se_session *se_sess)
*/
se_nacl = se_sess->se_node_acl;
if (se_nacl) {
- spin_lock_bh(&se_tpg->acl_node_lock);
+ spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
if (se_nacl->dynamic_node_acl) {
if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
se_tpg)) {
list_del(&se_nacl->acl_list);
se_tpg->num_node_acls--;
- spin_unlock_bh(&se_tpg->acl_node_lock);
+ spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
core_tpg_wait_for_nacl_pr_ref(se_nacl);
core_free_device_list_for_node(se_nacl, se_tpg);
se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
se_nacl);
- spin_lock_bh(&se_tpg->acl_node_lock);
+ spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
}
}
- spin_unlock_bh(&se_tpg->acl_node_lock);
+ spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
}
transport_free_session(se_sess);
@@ -1746,6 +1747,8 @@ int transport_generic_handle_cdb(
}
EXPORT_SYMBOL(transport_generic_handle_cdb);
+static void transport_generic_request_failure(struct se_cmd *,
+ struct se_device *, int, int);
/*
* Used by fabric module frontends to queue tasks directly.
* Many only be used from process context only
@@ -1753,6 +1756,8 @@ EXPORT_SYMBOL(transport_generic_handle_cdb);
int transport_handle_cdb_direct(
struct se_cmd *cmd)
{
+ int ret;
+
if (!cmd->se_lun) {
dump_stack();
pr_err("cmd->se_lun is NULL\n");
@@ -1764,8 +1769,31 @@ int transport_handle_cdb_direct(
" from interrupt context\n");
return -EINVAL;
}
-
- return transport_generic_new_cmd(cmd);
+ /*
+ * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
+ * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
+ * in existing usage to ensure that outstanding descriptors are handled
+ * correctly during shutdown via transport_generic_wait_for_tasks()
+ *
+ * Also, we don't take cmd->t_state_lock here as we only expect
+ * this to be called for initial descriptor submission.
+ */
+ cmd->t_state = TRANSPORT_NEW_CMD;
+ atomic_set(&cmd->t_transport_active, 1);
+ /*
+ * transport_generic_new_cmd() is already handling QUEUE_FULL,
+ * so follow TRANSPORT_NEW_CMD processing thread context usage
+ * and call transport_generic_request_failure() if necessary..
+ */
+ ret = transport_generic_new_cmd(cmd);
+ if (ret == -EAGAIN)
+ return 0;
+ else if (ret < 0) {
+ cmd->transport_error_status = ret;
+ transport_generic_request_failure(cmd, NULL, 0,
+ (cmd->data_direction != DMA_TO_DEVICE));
+ }
+ return 0;
}
EXPORT_SYMBOL(transport_handle_cdb_direct);
@@ -2025,8 +2053,14 @@ static void transport_generic_request_failure(
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
break;
}
-
- if (!sc)
+ /*
+ * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
+ * make the call to transport_send_check_condition_and_sense()
+ * directly. Otherwise expect the fabric to make the call to
+ * transport_send_check_condition_and_sense() after handling
+ * possible unsoliticied write data payloads.
+ */
+ if (!sc && !cmd->se_tfo->new_cmd_map)
transport_new_cmd_failure(cmd);
else {
ret = transport_send_check_condition_and_sense(cmd,
@@ -2819,12 +2853,42 @@ static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
" transport_dev_end_lba(): %llu\n",
cmd->t_task_lba, sectors,
transport_dev_end_lba(dev));
- pr_err(" We should return CHECK_CONDITION"
- " but we don't yet\n");
- return 0;
+ return -EINVAL;
}
- return sectors;
+ return 0;
+}
+
+static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
+{
+ /*
+ * Determine if the received WRITE_SAME is used to for direct
+ * passthrough into Linux/SCSI with struct request via TCM/pSCSI
+ * or we are signaling the use of internal WRITE_SAME + UNMAP=1
+ * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
+ */
+ int passthrough = (dev->transport->transport_type ==
+ TRANSPORT_PLUGIN_PHBA_PDEV);
+
+ if (!passthrough) {
+ if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
+ pr_err("WRITE_SAME PBDATA and LBDATA"
+ " bits not supported for Block Discard"
+ " Emulation\n");
+ return -ENOSYS;
+ }
+ /*
+ * Currently for the emulated case we only accept
+ * tpws with the UNMAP=1 bit set.
+ */
+ if (!(flags[0] & 0x08)) {
+ pr_err("WRITE_SAME w/o UNMAP bit not"
+ " supported for Block Discard Emulation\n");
+ return -ENOSYS;
+ }
+ }
+
+ return 0;
}
/* transport_generic_cmd_sequencer():
@@ -3037,7 +3101,7 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
if (sectors)
- size = transport_get_size(sectors, cdb, cmd);
+ size = transport_get_size(1, cdb, cmd);
else {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
" supported\n");
@@ -3047,27 +3111,9 @@ static int transport_generic_cmd_sequencer(
cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- /*
- * Skip the remaining assignments for TCM/PSCSI passthrough
- */
- if (passthrough)
- break;
-
- if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
- pr_err("WRITE_SAME PBDATA and LBDATA"
- " bits not supported for Block Discard"
- " Emulation\n");
- goto out_invalid_cdb_field;
- }
- /*
- * Currently for the emulated case we only accept
- * tpws with the UNMAP=1 bit set.
- */
- if (!(cdb[10] & 0x08)) {
- pr_err("WRITE_SAME w/o UNMAP bit not"
- " supported for Block Discard Emulation\n");
+ if (target_check_write_same_discard(&cdb[10], dev) < 0)
goto out_invalid_cdb_field;
- }
+
break;
default:
pr_err("VARIABLE_LENGTH_CMD service action"
@@ -3302,10 +3348,12 @@ static int transport_generic_cmd_sequencer(
cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
/*
* Check to ensure that LBA + Range does not exceed past end of
- * device.
+ * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
*/
- if (!transport_cmd_get_valid_sectors(cmd))
- goto out_invalid_cdb_field;
+ if ((cmd->t_task_lba != 0) || (sectors != 0)) {
+ if (transport_cmd_get_valid_sectors(cmd) < 0)
+ goto out_invalid_cdb_field;
+ }
break;
case UNMAP:
size = get_unaligned_be16(&cdb[7]);
@@ -3317,40 +3365,38 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
if (sectors)
- size = transport_get_size(sectors, cdb, cmd);
+ size = transport_get_size(1, cdb, cmd);
else {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
goto out_invalid_cdb_field;
}
- cmd->t_task_lba = get_unaligned_be16(&cdb[2]);
- passthrough = (dev->transport->transport_type ==
- TRANSPORT_PLUGIN_PHBA_PDEV);
- /*
- * Determine if the received WRITE_SAME_16 is used to for direct
- * passthrough into Linux/SCSI with struct request via TCM/pSCSI
- * or we are signaling the use of internal WRITE_SAME + UNMAP=1
- * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
- * TCM/FILEIO subsystem plugin backstores.
- */
- if (!passthrough) {
- if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
- pr_err("WRITE_SAME PBDATA and LBDATA"
- " bits not supported for Block Discard"
- " Emulation\n");
- goto out_invalid_cdb_field;
- }
- /*
- * Currently for the emulated case we only accept
- * tpws with the UNMAP=1 bit set.
- */
- if (!(cdb[1] & 0x08)) {
- pr_err("WRITE_SAME w/o UNMAP bit not "
- " supported for Block Discard Emulation\n");
- goto out_invalid_cdb_field;
- }
+ cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+
+ if (target_check_write_same_discard(&cdb[1], dev) < 0)
+ goto out_invalid_cdb_field;
+ break;
+ case WRITE_SAME:
+ sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+ if (sector_ret)
+ goto out_unsupported_cdb;
+
+ if (sectors)
+ size = transport_get_size(1, cdb, cmd);
+ else {
+ pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
+ goto out_invalid_cdb_field;
}
+
+ cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+ /*
+ * Follow sbcr26 with WRITE_SAME (10) and check for the existence
+ * of byte 1 bit 3 UNMAP instead of original reserved field
+ */
+ if (target_check_write_same_discard(&cdb[1], dev) < 0)
+ goto out_invalid_cdb_field;
break;
case ALLOW_MEDIUM_REMOVAL:
case GPCMD_CLOSE_TRACK:
@@ -3845,9 +3891,7 @@ EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
static int transport_new_cmd_obj(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- u32 task_cdbs;
- u32 rc;
- int set_counts = 1;
+ int set_counts = 1, rc, task_cdbs;
/*
* Setup any BIDI READ tasks and memory from
@@ -3865,7 +3909,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ return -EINVAL;
}
atomic_inc(&cmd->t_fe_count);
atomic_inc(&cmd->t_se_count);
@@ -3884,7 +3928,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ return -EINVAL;
}
if (set_counts) {
@@ -4000,8 +4044,6 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
if (!task->task_sg)
continue;
- BUG_ON(!task->task_padded_sg);
-
if (!sg_first) {
sg_first = task->task_sg;
chained_nents = task->task_sg_nents;
@@ -4009,9 +4051,19 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
sg_chain(sg_prev, sg_prev_nents, task->task_sg);
chained_nents += task->task_sg_nents;
}
+ /*
+ * For the padded tasks, use the extra SGL vector allocated
+ * in transport_allocate_data_tasks() for the sg_prev_nents
+ * offset into sg_chain() above.. The last task of a
+ * multi-task list, or a single task will not have
+ * task->task_sg_padded set..
+ */
+ if (task->task_padded_sg)
+ sg_prev_nents = (task->task_sg_nents + 1);
+ else
+ sg_prev_nents = task->task_sg_nents;
sg_prev = task->task_sg;
- sg_prev_nents = task->task_sg_nents;
}
/*
* Setup the starting pointer and total t_tasks_sg_linked_no including
@@ -4051,20 +4103,19 @@ static int transport_allocate_data_tasks(
struct se_task *task;
struct se_device *dev = cmd->se_dev;
unsigned long flags;
- sector_t sectors;
int task_count, i, ret;
- sector_t dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
+ sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
struct scatterlist *sg;
struct scatterlist *cmd_sg;
WARN_ON(cmd->data_length % sector_size);
sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
- task_count = DIV_ROUND_UP(sectors, dev_max_sectors);
-
+ task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
+
cmd_sg = sgl;
for (i = 0; i < task_count; i++) {
- unsigned int task_size;
+ unsigned int task_size, task_sg_nents_padded;
int count;
task = transport_generic_get_task(cmd, data_direction);
@@ -4083,30 +4134,33 @@ static int transport_allocate_data_tasks(
/* Update new cdb with updated lba/sectors */
cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb);
-
+ /*
+ * This now assumes that passed sg_ents are in PAGE_SIZE chunks
+ * in order to calculate the number per task SGL entries
+ */
+ task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
/*
* Check if the fabric module driver is requesting that all
* struct se_task->task_sg[] be chained together.. If so,
* then allocate an extra padding SG entry for linking and
- * marking the end of the chained SGL.
- * Possibly over-allocate task sgl size by using cmd sgl size.
- * It's so much easier and only a waste when task_count > 1.
- * That is extremely rare.
+ * marking the end of the chained SGL for every task except
+ * the last one for (task_count > 1) operation, or skipping
+ * the extra padding for the (task_count == 1) case.
*/
- task->task_sg_nents = sgl_nents;
- if (cmd->se_tfo->task_sg_chaining) {
- task->task_sg_nents++;
+ if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
+ task_sg_nents_padded = (task->task_sg_nents + 1);
task->task_padded_sg = 1;
- }
+ } else
+ task_sg_nents_padded = task->task_sg_nents;
task->task_sg = kmalloc(sizeof(struct scatterlist) *
- task->task_sg_nents, GFP_KERNEL);
+ task_sg_nents_padded, GFP_KERNEL);
if (!task->task_sg) {
cmd->se_dev->transport->free_task(task);
return -ENOMEM;
}
- sg_init_table(task->task_sg, task->task_sg_nents);
+ sg_init_table(task->task_sg, task_sg_nents_padded);
task_size = task->task_size;
@@ -4203,10 +4257,13 @@ static u32 transport_allocate_tasks(
struct scatterlist *sgl,
unsigned int sgl_nents)
{
- if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)
+ if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
+ if (transport_cmd_get_valid_sectors(cmd) < 0)
+ return -EINVAL;
+
return transport_allocate_data_tasks(cmd, lba, data_direction,
sgl, sgl_nents);
- else
+ } else
return transport_allocate_control_task(cmd);
}
@@ -4699,6 +4756,13 @@ int transport_send_check_condition_and_sense(
*/
switch (reason) {
case TCM_NON_EXISTENT_LUN:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ /* ILLEGAL REQUEST */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL UNIT NOT SUPPORTED */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
+ break;
case TCM_UNSUPPORTED_SCSI_OPCODE:
case TCM_SECTOR_COUNT_TOO_MANY:
/* CURRENT ERROR */
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index f7fff7e..bd4fe21 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -187,4 +187,9 @@ void ft_dump_cmd(struct ft_cmd *, const char *caller);
ssize_t ft_format_wwn(char *, size_t, u64);
+/*
+ * Underlying HW specific helper function
+ */
+void ft_invl_hw_context(struct ft_cmd *);
+
#endif /* __TCM_FC_H__ */
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index e095d81..3633f69 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -319,6 +319,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
default:
pr_debug("%s: unhandled frame r_ctl %x\n",
__func__, fh->fh_r_ctl);
+ ft_invl_hw_context(cmd);
fc_frame_free(fp);
transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
break;
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index d526896..ea30e3f 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -255,7 +255,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
struct se_portal_group *se_tpg = &tpg->se_tpg;
struct se_node_acl *se_acl;
- spin_lock_bh(&se_tpg->acl_node_lock);
+ spin_lock_irq(&se_tpg->acl_node_lock);
list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
pr_debug("acl %p port_name %llx\n",
@@ -269,7 +269,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
break;
}
}
- spin_unlock_bh(&se_tpg->acl_node_lock);
+ spin_unlock_irq(&se_tpg->acl_node_lock);
return found;
}
@@ -654,9 +654,7 @@ static void __exit ft_exit(void)
synchronize_rcu();
}
-#ifdef MODULE
MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION);
MODULE_LICENSE("GPL");
module_init(ft_init);
module_exit(ft_exit);
-#endif /* MODULE */
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index a4ae12f..ea0e7af 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -212,62 +212,49 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
goto drop;
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ ep = fc_seq_exch(seq);
+ lport = ep->lp;
+ if (cmd->was_ddp_setup) {
+ BUG_ON(!ep);
+ BUG_ON(!lport);
+ }
+
/*
- * Doesn't expect even single byte of payload. Payload
+ * Doesn't expect payload if DDP is setup. Payload
* is expected to be copied directly to user buffers
- * due to DDP (Large Rx offload) feature, hence
- * BUG_ON if BUF is non-NULL
+ * due to DDP (Large Rx offload),
*/
buf = fc_frame_payload_get(fp, 1);
- if (cmd->was_ddp_setup && buf) {
- pr_debug("%s: When DDP was setup, not expected to"
- "receive frame with payload, Payload shall be"
- "copied directly to buffer instead of coming "
- "via. legacy receive queues\n", __func__);
- BUG_ON(buf);
- }
+ if (buf)
+ pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
+ "cmd->sg_cnt 0x%x. DDP was setup"
+ " hence not expected to receive frame with "
+ "payload, Frame will be dropped if "
+ "'Sequence Initiative' bit in f_ctl is "
+ "not set\n", __func__, ep->xid, f_ctl,
+ cmd->sg, cmd->sg_cnt);
+ /*
+ * Invalidate HW DDP context if it was setup for respective
+ * command. Invalidation of HW DDP context is requited in both
+ * situation (success and error).
+ */
+ ft_invl_hw_context(cmd);
/*
- * If ft_cmd indicated 'ddp_setup', in that case only the last frame
- * should come with 'TSI bit being set'. If 'TSI bit is not set and if
- * data frame appears here, means error condition. In both the cases
- * release the DDP context (ddp_put) and in error case, as well
- * initiate error recovery mechanism.
+ * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
+ * write data frame is received successfully where payload is
+ * posted directly to user buffer and only the last frame's
+ * header is posted in receive queue.
+ *
+ * If "Sequence Initiative (TSI)" bit is not set, means error
+ * condition w.r.t. DDP, hence drop the packet and let explict
+ * ABORTS from other end of exchange timer trigger the recovery.
*/
- ep = fc_seq_exch(seq);
- if (cmd->was_ddp_setup) {
- BUG_ON(!ep);
- lport = ep->lp;
- BUG_ON(!lport);
- }
- if (cmd->was_ddp_setup && ep->xid != FC_XID_UNKNOWN) {
- f_ctl = ntoh24(fh->fh_f_ctl);
- /*
- * If TSI bit set in f_ctl, means last write data frame is
- * received successfully where payload is posted directly
- * to user buffer and only the last frame's header is posted
- * in legacy receive queue
- */
- if (f_ctl & FC_FC_SEQ_INIT) { /* TSI bit set in FC frame */
- cmd->write_data_len = lport->tt.ddp_done(lport,
- ep->xid);
- goto last_frame;
- } else {
- /*
- * Updating the write_data_len may be meaningless at
- * this point, but just in case if required in future
- * for debugging or any other purpose
- */
- pr_err("%s: Received frame with TSI bit not"
- " being SET, dropping the frame, "
- "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n",
- __func__, cmd->sg, cmd->sg_cnt);
- cmd->write_data_len = lport->tt.ddp_done(lport,
- ep->xid);
- lport->tt.seq_exch_abort(cmd->seq, 0);
- goto drop;
- }
- }
+ if (f_ctl & FC_FC_SEQ_INIT)
+ goto last_frame;
+ else
+ goto drop;
rel_off = ntohl(fh->fh_parm_offset);
frame_len = fr_len(fp);
@@ -330,3 +317,39 @@ last_frame:
drop:
fc_frame_free(fp);
}
+
+/*
+ * Handle and cleanup any HW specific resources if
+ * received ABORTS, errors, timeouts.
+ */
+void ft_invl_hw_context(struct ft_cmd *cmd)
+{
+ struct fc_seq *seq = cmd->seq;
+ struct fc_exch *ep = NULL;
+ struct fc_lport *lport = NULL;
+
+ BUG_ON(!cmd);
+
+ /* Cleanup the DDP context in HW if DDP was setup */
+ if (cmd->was_ddp_setup && seq) {
+ ep = fc_seq_exch(seq);
+ if (ep) {
+ lport = ep->lp;
+ if (lport && (ep->xid <= lport->lro_xid))
+ /*
+ * "ddp_done" trigger invalidation of HW
+ * specific DDP context
+ */
+ cmd->write_data_len = lport->tt.ddp_done(lport,
+ ep->xid);
+
+ /*
+ * Resetting same variable to indicate HW's
+ * DDP context has been invalidated to avoid
+ * re_invalidation of same context (context is
+ * identified using ep->xid)
+ */
+ cmd->was_ddp_setup = 0;
+ }
+ }
+}
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index bf7c687..f7f71b2 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -14,11 +14,7 @@ menuconfig THERMAL
If you want this support, you should say Y or M here.
config THERMAL_HWMON
- bool "Hardware monitoring support"
+ bool
depends on THERMAL
depends on HWMON=y || HWMON=THERMAL
- help
- The generic thermal sysfs driver's hardware monitoring support
- requires a 2.10.7/3.0.2 or later lm-sensors userspace.
-
- Say Y if your user-space is new enough.
+ default y
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 0b1c82a..708f8e9 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -420,6 +420,29 @@ thermal_cooling_device_trip_point_show(struct device *dev,
/* hwmon sys I/F */
#include <linux/hwmon.h>
+
+/* thermal zone devices with the same type share one hwmon device */
+struct thermal_hwmon_device {
+ char type[THERMAL_NAME_LENGTH];
+ struct device *device;
+ int count;
+ struct list_head tz_list;
+ struct list_head node;
+};
+
+struct thermal_hwmon_attr {
+ struct device_attribute attr;
+ char name[16];
+};
+
+/* one temperature input for each thermal zone */
+struct thermal_hwmon_temp {
+ struct list_head hwmon_node;
+ struct thermal_zone_device *tz;
+ struct thermal_hwmon_attr temp_input; /* hwmon sys attr */
+ struct thermal_hwmon_attr temp_crit; /* hwmon sys attr */
+};
+
static LIST_HEAD(thermal_hwmon_list);
static ssize_t
@@ -437,9 +460,10 @@ temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
int ret;
struct thermal_hwmon_attr *hwmon_attr
= container_of(attr, struct thermal_hwmon_attr, attr);
- struct thermal_zone_device *tz
- = container_of(hwmon_attr, struct thermal_zone_device,
+ struct thermal_hwmon_temp *temp
+ = container_of(hwmon_attr, struct thermal_hwmon_temp,
temp_input);
+ struct thermal_zone_device *tz = temp->tz;
ret = tz->ops->get_temp(tz, &temperature);
@@ -455,9 +479,10 @@ temp_crit_show(struct device *dev, struct device_attribute *attr,
{
struct thermal_hwmon_attr *hwmon_attr
= container_of(attr, struct thermal_hwmon_attr, attr);
- struct thermal_zone_device *tz
- = container_of(hwmon_attr, struct thermal_zone_device,
+ struct thermal_hwmon_temp *temp
+ = container_of(hwmon_attr, struct thermal_hwmon_temp,
temp_crit);
+ struct thermal_zone_device *tz = temp->tz;
long temperature;
int ret;
@@ -469,22 +494,54 @@ temp_crit_show(struct device *dev, struct device_attribute *attr,
}
-static int
-thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+static struct thermal_hwmon_device *
+thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz)
{
struct thermal_hwmon_device *hwmon;
- int new_hwmon_device = 1;
- int result;
mutex_lock(&thermal_list_lock);
list_for_each_entry(hwmon, &thermal_hwmon_list, node)
if (!strcmp(hwmon->type, tz->type)) {
- new_hwmon_device = 0;
mutex_unlock(&thermal_list_lock);
- goto register_sys_interface;
+ return hwmon;
+ }
+ mutex_unlock(&thermal_list_lock);
+
+ return NULL;
+}
+
+/* Find the temperature input matching a given thermal zone */
+static struct thermal_hwmon_temp *
+thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon,
+ const struct thermal_zone_device *tz)
+{
+ struct thermal_hwmon_temp *temp;
+
+ mutex_lock(&thermal_list_lock);
+ list_for_each_entry(temp, &hwmon->tz_list, hwmon_node)
+ if (temp->tz == tz) {
+ mutex_unlock(&thermal_list_lock);
+ return temp;
}
mutex_unlock(&thermal_list_lock);
+ return NULL;
+}
+
+static int
+thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+{
+ struct thermal_hwmon_device *hwmon;
+ struct thermal_hwmon_temp *temp;
+ int new_hwmon_device = 1;
+ int result;
+
+ hwmon = thermal_hwmon_lookup_by_type(tz);
+ if (hwmon) {
+ new_hwmon_device = 0;
+ goto register_sys_interface;
+ }
+
hwmon = kzalloc(sizeof(struct thermal_hwmon_device), GFP_KERNEL);
if (!hwmon)
return -ENOMEM;
@@ -502,30 +559,36 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
goto free_mem;
register_sys_interface:
- tz->hwmon = hwmon;
+ temp = kzalloc(sizeof(struct thermal_hwmon_temp), GFP_KERNEL);
+ if (!temp) {
+ result = -ENOMEM;
+ goto unregister_name;
+ }
+
+ temp->tz = tz;
hwmon->count++;
- snprintf(tz->temp_input.name, THERMAL_NAME_LENGTH,
+ snprintf(temp->temp_input.name, THERMAL_NAME_LENGTH,
"temp%d_input", hwmon->count);
- tz->temp_input.attr.attr.name = tz->temp_input.name;
- tz->temp_input.attr.attr.mode = 0444;
- tz->temp_input.attr.show = temp_input_show;
- sysfs_attr_init(&tz->temp_input.attr.attr);
- result = device_create_file(hwmon->device, &tz->temp_input.attr);
+ temp->temp_input.attr.attr.name = temp->temp_input.name;
+ temp->temp_input.attr.attr.mode = 0444;
+ temp->temp_input.attr.show = temp_input_show;
+ sysfs_attr_init(&temp->temp_input.attr.attr);
+ result = device_create_file(hwmon->device, &temp->temp_input.attr);
if (result)
- goto unregister_name;
+ goto free_temp_mem;
if (tz->ops->get_crit_temp) {
unsigned long temperature;
if (!tz->ops->get_crit_temp(tz, &temperature)) {
- snprintf(tz->temp_crit.name, THERMAL_NAME_LENGTH,
+ snprintf(temp->temp_crit.name, THERMAL_NAME_LENGTH,
"temp%d_crit", hwmon->count);
- tz->temp_crit.attr.attr.name = tz->temp_crit.name;
- tz->temp_crit.attr.attr.mode = 0444;
- tz->temp_crit.attr.show = temp_crit_show;
- sysfs_attr_init(&tz->temp_crit.attr.attr);
+ temp->temp_crit.attr.attr.name = temp->temp_crit.name;
+ temp->temp_crit.attr.attr.mode = 0444;
+ temp->temp_crit.attr.show = temp_crit_show;
+ sysfs_attr_init(&temp->temp_crit.attr.attr);
result = device_create_file(hwmon->device,
- &tz->temp_crit.attr);
+ &temp->temp_crit.attr);
if (result)
goto unregister_input;
}
@@ -534,13 +597,15 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
mutex_lock(&thermal_list_lock);
if (new_hwmon_device)
list_add_tail(&hwmon->node, &thermal_hwmon_list);
- list_add_tail(&tz->hwmon_node, &hwmon->tz_list);
+ list_add_tail(&temp->hwmon_node, &hwmon->tz_list);
mutex_unlock(&thermal_list_lock);
return 0;
unregister_input:
- device_remove_file(hwmon->device, &tz->temp_input.attr);
+ device_remove_file(hwmon->device, &temp->temp_input.attr);
+ free_temp_mem:
+ kfree(temp);
unregister_name:
if (new_hwmon_device) {
device_remove_file(hwmon->device, &dev_attr_name);
@@ -556,15 +621,30 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
static void
thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
{
- struct thermal_hwmon_device *hwmon = tz->hwmon;
+ struct thermal_hwmon_device *hwmon;
+ struct thermal_hwmon_temp *temp;
+
+ hwmon = thermal_hwmon_lookup_by_type(tz);
+ if (unlikely(!hwmon)) {
+ /* Should never happen... */
+ dev_dbg(&tz->device, "hwmon device lookup failed!\n");
+ return;
+ }
+
+ temp = thermal_hwmon_lookup_temp(hwmon, tz);
+ if (unlikely(!temp)) {
+ /* Should never happen... */
+ dev_dbg(&tz->device, "temperature input lookup failed!\n");
+ return;
+ }
- tz->hwmon = NULL;
- device_remove_file(hwmon->device, &tz->temp_input.attr);
+ device_remove_file(hwmon->device, &temp->temp_input.attr);
if (tz->ops->get_crit_temp)
- device_remove_file(hwmon->device, &tz->temp_crit.attr);
+ device_remove_file(hwmon->device, &temp->temp_crit.attr);
mutex_lock(&thermal_list_lock);
- list_del(&tz->hwmon_node);
+ list_del(&temp->hwmon_node);
+ kfree(temp);
if (!list_empty(&hwmon->tz_list)) {
mutex_unlock(&thermal_list_lock);
return;
diff --git a/drivers/tty/bfin_jtag_comm.c b/drivers/tty/bfin_jtag_comm.c
index 03c285b..3a99776 100644
--- a/drivers/tty/bfin_jtag_comm.c
+++ b/drivers/tty/bfin_jtag_comm.c
@@ -25,7 +25,7 @@
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define pr_init(fmt, args...) ({ static const __initconst char __fmt[] = fmt; printk(__fmt, ## args); })
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 98b6e3b..e809e9d 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -446,8 +446,19 @@ static inline void legacy_pty_init(void) { }
int pty_limit = NR_UNIX98_PTY_DEFAULT;
static int pty_limit_min;
static int pty_limit_max = NR_UNIX98_PTY_MAX;
+static int tty_count;
static int pty_count;
+static inline void pty_inc_count(void)
+{
+ pty_count = (++tty_count) / 2;
+}
+
+static inline void pty_dec_count(void)
+{
+ pty_count = (--tty_count) / 2;
+}
+
static struct cdev ptmx_cdev;
static struct ctl_table pty_table[] = {
@@ -542,6 +553,7 @@ static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver,
static void pty_unix98_shutdown(struct tty_struct *tty)
{
+ tty_driver_remove_tty(tty->driver, tty);
/* We have our own method as we don't use the tty index */
kfree(tty->termios);
}
@@ -588,7 +600,8 @@ static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
*/
tty_driver_kref_get(driver);
tty->count++;
- pty_count++;
+ pty_inc_count(); /* tty */
+ pty_inc_count(); /* tty->link */
return 0;
err_free_mem:
deinitialize_tty_struct(o_tty);
@@ -602,7 +615,7 @@ err_free_tty:
static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
{
- pty_count--;
+ pty_dec_count();
}
static const struct tty_operations ptm_unix98_ops = {
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index 13043e8..6a1241c 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -83,7 +83,7 @@
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/unaligned.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
index f2dfec8..7f50999 100644
--- a/drivers/tty/serial/8250.c
+++ b/drivers/tty/serial/8250.c
@@ -1819,6 +1819,8 @@ static void serial8250_backup_timeout(unsigned long data)
unsigned int iir, ier = 0, lsr;
unsigned long flags;
+ spin_lock_irqsave(&up->port.lock, flags);
+
/*
* Must disable interrupts or else we risk racing with the interrupt
* based handler.
@@ -1836,10 +1838,8 @@ static void serial8250_backup_timeout(unsigned long data)
* the "Diva" UART used on the management processor on many HP
* ia64 and parisc boxes.
*/
- spin_lock_irqsave(&up->port.lock, flags);
lsr = serial_in(up, UART_LSR);
up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
- spin_unlock_irqrestore(&up->port.lock, flags);
if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) &&
(!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) &&
(lsr & UART_LSR_THRE)) {
@@ -1848,11 +1848,13 @@ static void serial8250_backup_timeout(unsigned long data)
}
if (!(iir & UART_IIR_NO_INT))
- serial8250_handle_port(up);
+ transmit_chars(up);
if (is_real_interrupt(up->port.irq))
serial_out(up, UART_IER, ier);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
/* Standard timer interval plus 0.2s to keep the port running */
mod_timer(&up->timer,
jiffies + uart_poll_timeout(&up->port) + HZ / 5);
diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
index 6b887d9..3abeca2 100644
--- a/drivers/tty/serial/8250_pci.c
+++ b/drivers/tty/serial/8250_pci.c
@@ -1599,11 +1599,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.device = 0x800D,
.init = pci_eg20t_init,
},
- {
- .vendor = 0x10DB,
- .device = 0x800D,
- .init = pci_eg20t_init,
- },
/*
* Cronyx Omega PCI (PLX-chip based)
*/
@@ -4021,7 +4016,7 @@ static struct pci_device_id serial_pci_tbl[] = {
0, 0, pbn_NETMOS9900_2s_115200 },
/*
- * Best Connectivity PCI Multi I/O cards
+ * Best Connectivity and Rosewill PCI Multi I/O cards
*/
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
@@ -4029,6 +4024,10 @@ static struct pci_device_id serial_pci_tbl[] = {
0, 0, pbn_b0_1_115200 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
+ 0xA000, 0x3002,
+ 0, 0, pbn_b0_bt_2_115200 },
+
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
0xA000, 0x3004,
0, 0, pbn_b0_bt_4_115200 },
/* Intel CE4100 */
diff --git a/drivers/tty/serial/8250_pnp.c b/drivers/tty/serial/8250_pnp.c
index fc301f6..a2f2365 100644
--- a/drivers/tty/serial/8250_pnp.c
+++ b/drivers/tty/serial/8250_pnp.c
@@ -109,6 +109,9 @@ static const struct pnp_device_id pnp_dev_table[] = {
/* IBM */
/* IBM Thinkpad 701 Internal Modem Voice */
{ "IBM0033", 0 },
+ /* Intermec */
+ /* Intermec CV60 touchscreen port */
+ { "PNP4972", 0 },
/* Intertex */
/* Intertex 28k8 33k6 Voice EXT PnP */
{ "IXDC801", 0 },
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index cb40b82..4dcb37b 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -959,7 +959,7 @@ config SERIAL_IP22_ZILOG_CONSOLE
config SERIAL_SH_SCI
tristate "SuperH SCI(F) serial port support"
- depends on HAVE_CLK && (SUPERH || H8300 || ARCH_SHMOBILE)
+ depends on HAVE_CLK && (SUPERH || ARCH_SHMOBILE)
select SERIAL_CORE
config SERIAL_SH_SCI_NR_UARTS
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index af9b781..b922f5d 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1609,9 +1609,11 @@ static struct console atmel_console = {
static int __init atmel_console_init(void)
{
if (atmel_default_console_device) {
- add_preferred_console(ATMEL_DEVICENAME,
- atmel_default_console_device->id, NULL);
- atmel_init_port(&atmel_ports[atmel_default_console_device->id],
+ struct atmel_uart_data *pdata =
+ atmel_default_console_device->dev.platform_data;
+
+ add_preferred_console(ATMEL_DEVICENAME, pdata->num, NULL);
+ atmel_init_port(&atmel_ports[pdata->num],
atmel_default_console_device);
register_console(&atmel_console);
}
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 225123b..58be715 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -4450,7 +4450,7 @@ static int __init rs_init(void)
#if defined(CONFIG_ETRAX_RS485)
#if defined(CONFIG_ETRAX_RS485_ON_PA)
- if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit,
+ if (cris_io_interface_allocate_pins(if_serial_0, 'a', rs485_pa_bit,
rs485_pa_bit)) {
printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
"RS485 pin\n");
@@ -4459,7 +4459,7 @@ static int __init rs_init(void)
}
#endif
#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
- if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit,
+ if (cris_io_interface_allocate_pins(if_serial_0, 'g', rs485_pa_bit,
rs485_port_g_bit)) {
printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
"RS485 pin\n");
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
index 57421d7..ddc487a 100644
--- a/drivers/tty/serial/dz.c
+++ b/drivers/tty/serial/dz.c
@@ -48,7 +48,7 @@
#include <linux/sysrq.h>
#include <linux/tty.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/bootinfo.h>
#include <asm/io.h>
#include <asm/system.h>
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 22fe801..7e91b3d 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -45,10 +45,11 @@
#include <linux/delay.h>
#include <linux/rational.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <mach/hardware.h>
#include <mach/imx-uart.h>
/* Register definitions */
@@ -66,8 +67,9 @@
#define UBIR 0xa4 /* BRM Incremental Register */
#define UBMR 0xa8 /* BRM Modulator Register */
#define UBRC 0xac /* Baud Rate Count Register */
-#define MX2_ONEMS 0xb0 /* One Millisecond register */
-#define UTS (cpu_is_mx1() ? 0xd0 : 0xb4) /* UART Test Register */
+#define IMX21_ONEMS 0xb0 /* One Millisecond register */
+#define IMX1_UTS 0xd0 /* UART Test Register on i.mx1 */
+#define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/
/* UART Control Register Bit Fields.*/
#define URXD_CHARRDY (1<<15)
@@ -87,7 +89,7 @@
#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */
#define UCR1_SNDBRK (1<<4) /* Send break */
#define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */
-#define MX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, mx1 only */
+#define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */
#define UCR1_DOZE (1<<1) /* Doze */
#define UCR1_UARTEN (1<<0) /* UART enabled */
#define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */
@@ -113,9 +115,7 @@
#define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */
#define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */
#define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */
-#define MX1_UCR3_REF25 (1<<3) /* Ref freq 25 MHz, only on mx1 */
-#define MX1_UCR3_REF30 (1<<2) /* Ref Freq 30 MHz, only on mx1 */
-#define MX2_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select, on mx2/mx3 */
+#define IMX21_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select */
#define UCR3_INVT (1<<1) /* Inverted Infrared transmission */
#define UCR3_BPEN (1<<0) /* Preset registers enable */
#define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */
@@ -181,6 +181,18 @@
#define UART_NR 8
+/* i.mx21 type uart runs on all i.mx except i.mx1 */
+enum imx_uart_type {
+ IMX1_UART,
+ IMX21_UART,
+};
+
+/* device type dependent stuff */
+struct imx_uart_data {
+ unsigned uts_reg;
+ enum imx_uart_type devtype;
+};
+
struct imx_port {
struct uart_port port;
struct timer_list timer;
@@ -192,6 +204,7 @@ struct imx_port {
unsigned int irda_inv_tx:1;
unsigned short trcv_delay; /* transceiver delay */
struct clk *clk;
+ struct imx_uart_data *devdata;
};
#ifdef CONFIG_IRDA
@@ -200,6 +213,52 @@ struct imx_port {
#define USE_IRDA(sport) (0)
#endif
+static struct imx_uart_data imx_uart_devdata[] = {
+ [IMX1_UART] = {
+ .uts_reg = IMX1_UTS,
+ .devtype = IMX1_UART,
+ },
+ [IMX21_UART] = {
+ .uts_reg = IMX21_UTS,
+ .devtype = IMX21_UART,
+ },
+};
+
+static struct platform_device_id imx_uart_devtype[] = {
+ {
+ .name = "imx1-uart",
+ .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX1_UART],
+ }, {
+ .name = "imx21-uart",
+ .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX21_UART],
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, imx_uart_devtype);
+
+static struct of_device_id imx_uart_dt_ids[] = {
+ { .compatible = "fsl,imx1-uart", .data = &imx_uart_devdata[IMX1_UART], },
+ { .compatible = "fsl,imx21-uart", .data = &imx_uart_devdata[IMX21_UART], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_uart_dt_ids);
+
+static inline unsigned uts_reg(struct imx_port *sport)
+{
+ return sport->devdata->uts_reg;
+}
+
+static inline int is_imx1_uart(struct imx_port *sport)
+{
+ return sport->devdata->devtype == IMX1_UART;
+}
+
+static inline int is_imx21_uart(struct imx_port *sport)
+{
+ return sport->devdata->devtype == IMX21_UART;
+}
+
/*
* Handle any change of modem status signal since we were last called.
*/
@@ -326,7 +385,8 @@ static inline void imx_transmit_buffer(struct imx_port *sport)
struct circ_buf *xmit = &sport->port.state->xmit;
while (!uart_circ_empty(xmit) &&
- !(readl(sport->port.membase + UTS) & UTS_TXFULL)) {
+ !(readl(sport->port.membase + uts_reg(sport))
+ & UTS_TXFULL)) {
/* send xmit->buf[xmit->tail]
* out the port here */
writel(xmit->buf[xmit->tail], sport->port.membase + URTX0);
@@ -373,7 +433,7 @@ static void imx_start_tx(struct uart_port *port)
writel(temp, sport->port.membase + UCR4);
}
- if (readl(sport->port.membase + UTS) & UTS_TXEMPTY)
+ if (readl(sport->port.membase + uts_reg(sport)) & UTS_TXEMPTY)
imx_transmit_buffer(sport);
}
@@ -689,9 +749,9 @@ static int imx_startup(struct uart_port *port)
}
}
- if (!cpu_is_mx1()) {
+ if (is_imx21_uart(sport)) {
temp = readl(sport->port.membase + UCR3);
- temp |= MX2_UCR3_RXDMUXSEL;
+ temp |= IMX21_UCR3_RXDMUXSEL;
writel(temp, sport->port.membase + UCR3);
}
@@ -923,9 +983,9 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
writel(num, sport->port.membase + UBIR);
writel(denom, sport->port.membase + UBMR);
- if (!cpu_is_mx1())
+ if (is_imx21_uart(sport))
writel(sport->port.uartclk / div / 1000,
- sport->port.membase + MX2_ONEMS);
+ sport->port.membase + IMX21_ONEMS);
writel(old_ucr1, sport->port.membase + UCR1);
@@ -1041,7 +1101,7 @@ static void imx_console_putchar(struct uart_port *port, int ch)
{
struct imx_port *sport = (struct imx_port *)port;
- while (readl(sport->port.membase + UTS) & UTS_TXFULL)
+ while (readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)
barrier();
writel(ch, sport->port.membase + URTX0);
@@ -1062,8 +1122,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
ucr1 = old_ucr1 = readl(sport->port.membase + UCR1);
old_ucr2 = readl(sport->port.membase + UCR2);
- if (cpu_is_mx1())
- ucr1 |= MX1_UCR1_UARTCLKEN;
+ if (is_imx1_uart(sport))
+ ucr1 |= IMX1_UCR1_UARTCLKEN;
ucr1 |= UCR1_UARTEN;
ucr1 &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN);
@@ -1222,6 +1282,58 @@ static int serial_imx_resume(struct platform_device *dev)
return 0;
}
+#ifdef CONFIG_OF
+static int serial_imx_probe_dt(struct imx_port *sport,
+ struct platform_device *pdev)
+{
+ static int portnum = 0;
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id =
+ of_match_device(imx_uart_dt_ids, &pdev->dev);
+
+ if (!np)
+ return -ENODEV;
+
+ sport->port.line = portnum++;
+ if (sport->port.line >= UART_NR)
+ return -EINVAL;
+
+ if (of_get_property(np, "fsl,uart-has-rtscts", NULL))
+ sport->have_rtscts = 1;
+
+ if (of_get_property(np, "fsl,irda-mode", NULL))
+ sport->use_irda = 1;
+
+ sport->devdata = of_id->data;
+
+ return 0;
+}
+#else
+static inline int serial_imx_probe_dt(struct imx_port *sport,
+ struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+#endif
+
+static void serial_imx_probe_pdata(struct imx_port *sport,
+ struct platform_device *pdev)
+{
+ struct imxuart_platform_data *pdata = pdev->dev.platform_data;
+
+ sport->port.line = pdev->id;
+ sport->devdata = (struct imx_uart_data *) pdev->id_entry->driver_data;
+
+ if (!pdata)
+ return;
+
+ if (pdata->flags & IMXUART_HAVE_RTSCTS)
+ sport->have_rtscts = 1;
+
+ if (pdata->flags & IMXUART_IRDA)
+ sport->use_irda = 1;
+}
+
static int serial_imx_probe(struct platform_device *pdev)
{
struct imx_port *sport;
@@ -1234,6 +1346,10 @@ static int serial_imx_probe(struct platform_device *pdev)
if (!sport)
return -ENOMEM;
+ ret = serial_imx_probe_dt(sport, pdev);
+ if (ret == -ENODEV)
+ serial_imx_probe_pdata(sport, pdev);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
@@ -1258,7 +1374,6 @@ static int serial_imx_probe(struct platform_device *pdev)
sport->port.fifosize = 32;
sport->port.ops = &imx_pops;
sport->port.flags = UPF_BOOT_AUTOCONF;
- sport->port.line = pdev->id;
init_timer(&sport->timer);
sport->timer.function = imx_timeout;
sport->timer.data = (unsigned long)sport;
@@ -1272,17 +1387,9 @@ static int serial_imx_probe(struct platform_device *pdev)
sport->port.uartclk = clk_get_rate(sport->clk);
- imx_ports[pdev->id] = sport;
+ imx_ports[sport->port.line] = sport;
pdata = pdev->dev.platform_data;
- if (pdata && (pdata->flags & IMXUART_HAVE_RTSCTS))
- sport->have_rtscts = 1;
-
-#ifdef CONFIG_IRDA
- if (pdata && (pdata->flags & IMXUART_IRDA))
- sport->use_irda = 1;
-#endif
-
if (pdata && pdata->init) {
ret = pdata->init(pdev);
if (ret)
@@ -1340,9 +1447,11 @@ static struct platform_driver serial_imx_driver = {
.suspend = serial_imx_suspend,
.resume = serial_imx_resume,
+ .id_table = imx_uart_devtype,
.driver = {
.name = "imx-uart",
.owner = THIS_MODULE,
+ .of_match_table = imx_uart_dt_ids,
},
};
diff --git a/drivers/tty/serial/max3107-aava.c b/drivers/tty/serial/max3107-aava.c
index a1fe304..d73aadd 100644
--- a/drivers/tty/serial/max3107-aava.c
+++ b/drivers/tty/serial/max3107-aava.c
@@ -340,5 +340,5 @@ module_exit(max3107_exit);
MODULE_DESCRIPTION("MAX3107 driver");
MODULE_AUTHOR("Aavamobile");
-MODULE_ALIAS("aava-max3107-spi");
+MODULE_ALIAS("spi:aava-max3107");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c
index 750b4f6..a816460 100644
--- a/drivers/tty/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
@@ -1209,5 +1209,5 @@ module_exit(max3107_exit);
MODULE_DESCRIPTION("MAX3107 driver");
MODULE_AUTHOR("Aavamobile");
-MODULE_ALIAS("max3107-spi");
+MODULE_ALIAS("spi:max3107");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index fbd9261..492c14d 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -917,4 +917,4 @@ module_init(serial_m3110_init);
module_exit(serial_m3110_exit);
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("max3110-uart");
+MODULE_ALIAS("spi:max3110-uart");
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index c37df8d..5e713d3 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -806,8 +806,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_omap_set_mctrl(&up->port, up->port.mctrl);
/* Software Flow Control Configuration */
- if (termios->c_iflag & (IXON | IXOFF))
- serial_omap_configure_xonxoff(up, termios);
+ serial_omap_configure_xonxoff(up, termios);
spin_unlock_irqrestore(&up->port.lock, flags);
dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id);
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 846dfcd..b46218d 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -598,7 +598,8 @@ static void pch_request_dma(struct uart_port *port)
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- dma_dev = pci_get_bus_and_slot(2, PCI_DEVFN(0xa, 0)); /* Get DMA's dev
+ dma_dev = pci_get_bus_and_slot(priv->pdev->bus->number,
+ PCI_DEVFN(0xa, 0)); /* Get DMA's dev
information */
/* Set Tx DMA */
param = &priv->param_tx;
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index afc6294..6edafb5 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1225,15 +1225,19 @@ static const struct dev_pm_ops s3c24xx_serial_pm_ops = {
.suspend = s3c24xx_serial_suspend,
.resume = s3c24xx_serial_resume,
};
+#define SERIAL_SAMSUNG_PM_OPS (&s3c24xx_serial_pm_ops)
+
#else /* !CONFIG_PM_SLEEP */
-#define s3c24xx_serial_pm_ops NULL
+
+#define SERIAL_SAMSUNG_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
int s3c24xx_serial_init(struct platform_driver *drv,
struct s3c24xx_uart_info *info)
{
dbg("s3c24xx_serial_init(%p,%p)\n", drv, info);
- drv->driver.pm = &s3c24xx_serial_pm_ops;
+
+ drv->driver.pm = SERIAL_SAMSUNG_PM_OPS;
return platform_driver_register(drv);
}
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index ea2340b..6bc2e3f 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -39,7 +39,7 @@
#include <linux/tty.h>
#include <linux/types.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/war.h>
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index db7912c..a3efbea 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -200,6 +200,11 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state, int in
clear_bit(TTY_IO_ERROR, &tty->flags);
}
+ /*
+ * This is to allow setserial on this port. People may want to set
+ * port/irq/type and then reconfigure the port properly if it failed
+ * now.
+ */
if (retval && capable(CAP_SYS_ADMIN))
retval = 0;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index ebd8629..5ea6ec3 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -47,6 +47,7 @@
#include <linux/ctype.h>
#include <linux/err.h>
#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
@@ -54,10 +55,6 @@
#include <asm/sh_bios.h>
#endif
-#ifdef CONFIG_H8300
-#include <asm/gpio.h>
-#endif
-
#include "sh-sci.h"
struct sci_port {
@@ -66,12 +63,6 @@ struct sci_port {
/* Platform configuration */
struct plat_sci_port *cfg;
- /* Port enable callback */
- void (*enable)(struct uart_port *port);
-
- /* Port disable callback */
- void (*disable)(struct uart_port *port);
-
/* Break timer */
struct timer_list break_timer;
int break_flag;
@@ -81,6 +72,8 @@ struct sci_port {
/* Function clock */
struct clk *fclk;
+ char *irqstr[SCIx_NR_IRQS];
+
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
@@ -103,6 +96,12 @@ struct sci_port {
#endif
struct notifier_block freq_transition;
+
+#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
+ unsigned short saved_smr;
+ unsigned short saved_fcr;
+ unsigned char saved_brr;
+#endif
};
/* Function prototypes */
@@ -121,6 +120,278 @@ to_sci_port(struct uart_port *uart)
return container_of(uart, struct sci_port, port);
}
+struct plat_sci_reg {
+ u8 offset, size;
+};
+
+/* Helper for invalidating specific entries of an inherited map. */
+#define sci_reg_invalid { .offset = 0, .size = 0 }
+
+static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
+ [SCIx_PROBE_REGTYPE] = {
+ [0 ... SCIx_NR_REGS - 1] = sci_reg_invalid,
+ },
+
+ /*
+ * Common SCI definitions, dependent on the port's regshift
+ * value.
+ */
+ [SCIx_SCI_REGTYPE] = {
+ [SCSMR] = { 0x00, 8 },
+ [SCBRR] = { 0x01, 8 },
+ [SCSCR] = { 0x02, 8 },
+ [SCxTDR] = { 0x03, 8 },
+ [SCxSR] = { 0x04, 8 },
+ [SCxRDR] = { 0x05, 8 },
+ [SCFCR] = sci_reg_invalid,
+ [SCFDR] = sci_reg_invalid,
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = sci_reg_invalid,
+ },
+
+ /*
+ * Common definitions for legacy IrDA ports, dependent on
+ * regshift value.
+ */
+ [SCIx_IRDA_REGTYPE] = {
+ [SCSMR] = { 0x00, 8 },
+ [SCBRR] = { 0x01, 8 },
+ [SCSCR] = { 0x02, 8 },
+ [SCxTDR] = { 0x03, 8 },
+ [SCxSR] = { 0x04, 8 },
+ [SCxRDR] = { 0x05, 8 },
+ [SCFCR] = { 0x06, 8 },
+ [SCFDR] = { 0x07, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = sci_reg_invalid,
+ },
+
+ /*
+ * Common SCIFA definitions.
+ */
+ [SCIx_SCIFA_REGTYPE] = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x20, 8 },
+ [SCxSR] = { 0x14, 16 },
+ [SCxRDR] = { 0x24, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = sci_reg_invalid,
+ },
+
+ /*
+ * Common SCIFB definitions.
+ */
+ [SCIx_SCIFB_REGTYPE] = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x40, 8 },
+ [SCxSR] = { 0x14, 16 },
+ [SCxRDR] = { 0x60, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = sci_reg_invalid,
+ },
+
+ /*
+ * Common SH-3 SCIF definitions.
+ */
+ [SCIx_SH3_SCIF_REGTYPE] = {
+ [SCSMR] = { 0x00, 8 },
+ [SCBRR] = { 0x02, 8 },
+ [SCSCR] = { 0x04, 8 },
+ [SCxTDR] = { 0x06, 8 },
+ [SCxSR] = { 0x08, 16 },
+ [SCxRDR] = { 0x0a, 8 },
+ [SCFCR] = { 0x0c, 8 },
+ [SCFDR] = { 0x0e, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = sci_reg_invalid,
+ },
+
+ /*
+ * Common SH-4(A) SCIF(B) definitions.
+ */
+ [SCIx_SH4_SCIF_REGTYPE] = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x0c, 8 },
+ [SCxSR] = { 0x10, 16 },
+ [SCxRDR] = { 0x14, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = { 0x20, 16 },
+ [SCLSR] = { 0x24, 16 },
+ },
+
+ /*
+ * Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR
+ * register.
+ */
+ [SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE] = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x0c, 8 },
+ [SCxSR] = { 0x10, 16 },
+ [SCxRDR] = { 0x14, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = { 0x24, 16 },
+ },
+
+ /*
+ * Common SH-4(A) SCIF(B) definitions for ports with FIFO data
+ * count registers.
+ */
+ [SCIx_SH4_SCIF_FIFODATA_REGTYPE] = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x0c, 8 },
+ [SCxSR] = { 0x10, 16 },
+ [SCxRDR] = { 0x14, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCTFDR] = { 0x1c, 16 }, /* aliased to SCFDR */
+ [SCRFDR] = { 0x20, 16 },
+ [SCSPTR] = { 0x24, 16 },
+ [SCLSR] = { 0x28, 16 },
+ },
+
+ /*
+ * SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR
+ * registers.
+ */
+ [SCIx_SH7705_SCIF_REGTYPE] = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x20, 8 },
+ [SCxSR] = { 0x14, 16 },
+ [SCxRDR] = { 0x24, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = sci_reg_invalid,
+ },
+};
+
+#define sci_getreg(up, offset) (sci_regmap[to_sci_port(up)->cfg->regtype] + offset)
+
+/*
+ * The "offset" here is rather misleading, in that it refers to an enum
+ * value relative to the port mapping rather than the fixed offset
+ * itself, which needs to be manually retrieved from the platform's
+ * register map for the given port.
+ */
+static unsigned int sci_serial_in(struct uart_port *p, int offset)
+{
+ struct plat_sci_reg *reg = sci_getreg(p, offset);
+
+ if (reg->size == 8)
+ return ioread8(p->membase + (reg->offset << p->regshift));
+ else if (reg->size == 16)
+ return ioread16(p->membase + (reg->offset << p->regshift));
+ else
+ WARN(1, "Invalid register access\n");
+
+ return 0;
+}
+
+static void sci_serial_out(struct uart_port *p, int offset, int value)
+{
+ struct plat_sci_reg *reg = sci_getreg(p, offset);
+
+ if (reg->size == 8)
+ iowrite8(value, p->membase + (reg->offset << p->regshift));
+ else if (reg->size == 16)
+ iowrite16(value, p->membase + (reg->offset << p->regshift));
+ else
+ WARN(1, "Invalid register access\n");
+}
+
+#define sci_in(up, offset) (up->serial_in(up, offset))
+#define sci_out(up, offset, value) (up->serial_out(up, offset, value))
+
+static int sci_probe_regmap(struct plat_sci_port *cfg)
+{
+ switch (cfg->type) {
+ case PORT_SCI:
+ cfg->regtype = SCIx_SCI_REGTYPE;
+ break;
+ case PORT_IRDA:
+ cfg->regtype = SCIx_IRDA_REGTYPE;
+ break;
+ case PORT_SCIFA:
+ cfg->regtype = SCIx_SCIFA_REGTYPE;
+ break;
+ case PORT_SCIFB:
+ cfg->regtype = SCIx_SCIFB_REGTYPE;
+ break;
+ case PORT_SCIF:
+ /*
+ * The SH-4 is a bit of a misnomer here, although that's
+ * where this particular port layout originated. This
+ * configuration (or some slight variation thereof)
+ * remains the dominant model for all SCIFs.
+ */
+ cfg->regtype = SCIx_SH4_SCIF_REGTYPE;
+ break;
+ default:
+ printk(KERN_ERR "Can't probe register map for given port\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void sci_port_enable(struct sci_port *sci_port)
+{
+ if (!sci_port->port.dev)
+ return;
+
+ pm_runtime_get_sync(sci_port->port.dev);
+
+ clk_enable(sci_port->iclk);
+ sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
+ clk_enable(sci_port->fclk);
+}
+
+static void sci_port_disable(struct sci_port *sci_port)
+{
+ if (!sci_port->port.dev)
+ return;
+
+ clk_disable(sci_port->fclk);
+ clk_disable(sci_port->iclk);
+
+ pm_runtime_put_sync(sci_port->port.dev);
+}
+
#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
#ifdef CONFIG_CONSOLE_POLL
@@ -164,223 +435,76 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c)
}
#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
-#if defined(__H8300H__) || defined(__H8300S__)
static void sci_init_pins(struct uart_port *port, unsigned int cflag)
{
- int ch = (port->mapbase - SMR0) >> 3;
-
- /* set DDR regs */
- H8300_GPIO_DDR(h8300_sci_pins[ch].port,
- h8300_sci_pins[ch].rx,
- H8300_GPIO_INPUT);
- H8300_GPIO_DDR(h8300_sci_pins[ch].port,
- h8300_sci_pins[ch].tx,
- H8300_GPIO_OUTPUT);
-
- /* tx mark output*/
- H8300_SCI_DR(ch) |= h8300_sci_pins[ch].tx;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
- if (port->mapbase == 0xA4400000) {
- __raw_writew(__raw_readw(PACR) & 0xffc0, PACR);
- __raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR);
- } else if (port->mapbase == 0xA4410000)
- __raw_writew(__raw_readw(PBCR) & 0xf003, PBCR);
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7721)
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
- unsigned short data;
-
- if (cflag & CRTSCTS) {
- /* enable RTS/CTS */
- if (port->mapbase == 0xa4430000) { /* SCIF0 */
- /* Clear PTCR bit 9-2; enable all scif pins but sck */
- data = __raw_readw(PORT_PTCR);
- __raw_writew((data & 0xfc03), PORT_PTCR);
- } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
- /* Clear PVCR bit 9-2 */
- data = __raw_readw(PORT_PVCR);
- __raw_writew((data & 0xfc03), PORT_PVCR);
- }
- } else {
- if (port->mapbase == 0xa4430000) { /* SCIF0 */
- /* Clear PTCR bit 5-2; enable only tx and rx */
- data = __raw_readw(PORT_PTCR);
- __raw_writew((data & 0xffc3), PORT_PTCR);
- } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
- /* Clear PVCR bit 5-2 */
- data = __raw_readw(PORT_PVCR);
- __raw_writew((data & 0xffc3), PORT_PVCR);
- }
- }
-}
-#elif defined(CONFIG_CPU_SH3)
-/* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
- unsigned short data;
-
- /* We need to set SCPCR to enable RTS/CTS */
- data = __raw_readw(SCPCR);
- /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/
- __raw_writew(data & 0x0fcf, SCPCR);
-
- if (!(cflag & CRTSCTS)) {
- /* We need to set SCPCR to enable RTS/CTS */
- data = __raw_readw(SCPCR);
- /* Clear out SCP7MD1,0, SCP4MD1,0,
- Set SCP6MD1,0 = {01} (output) */
- __raw_writew((data & 0x0fcf) | 0x1000, SCPCR);
+ struct sci_port *s = to_sci_port(port);
+ struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR;
- data = __raw_readb(SCPDR);
- /* Set /RTS2 (bit6) = 0 */
- __raw_writeb(data & 0xbf, SCPDR);
+ /*
+ * Use port-specific handler if provided.
+ */
+ if (s->cfg->ops && s->cfg->ops->init_pins) {
+ s->cfg->ops->init_pins(port, cflag);
+ return;
}
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
- unsigned short data;
- if (port->mapbase == 0xffe00000) {
- data = __raw_readw(PSCR);
- data &= ~0x03cf;
- if (!(cflag & CRTSCTS))
- data |= 0x0340;
+ /*
+ * For the generic path SCSPTR is necessary. Bail out if that's
+ * unavailable, too.
+ */
+ if (!reg->size)
+ return;
- __raw_writew(data, PSCR);
- }
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7757) || \
- defined(CONFIG_CPU_SUBTYPE_SH7763) || \
- defined(CONFIG_CPU_SUBTYPE_SH7780) || \
- defined(CONFIG_CPU_SUBTYPE_SH7785) || \
- defined(CONFIG_CPU_SUBTYPE_SH7786) || \
- defined(CONFIG_CPU_SUBTYPE_SHX3)
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
- if (!(cflag & CRTSCTS))
- __raw_writew(0x0080, SCSPTR0); /* Set RTS = 1 */
-}
-#elif defined(CONFIG_CPU_SH4) && !defined(CONFIG_CPU_SH4A)
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
if (!(cflag & CRTSCTS))
- __raw_writew(0x0080, SCSPTR2); /* Set RTS = 1 */
+ sci_out(port, SCSPTR, 0x0080); /* Set RTS = 1 */
}
-#else
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
- /* Nothing to do */
-}
-#endif
-#if defined(CONFIG_CPU_SUBTYPE_SH7760) || \
- defined(CONFIG_CPU_SUBTYPE_SH7780) || \
- defined(CONFIG_CPU_SUBTYPE_SH7785) || \
- defined(CONFIG_CPU_SUBTYPE_SH7786)
-static int scif_txfill(struct uart_port *port)
-{
- return sci_in(port, SCTFDR) & 0xff;
-}
-
-static int scif_txroom(struct uart_port *port)
+static int sci_txfill(struct uart_port *port)
{
- return SCIF_TXROOM_MAX - scif_txfill(port);
-}
+ struct plat_sci_reg *reg;
-static int scif_rxfill(struct uart_port *port)
-{
- return sci_in(port, SCRFDR) & 0xff;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
-static int scif_txfill(struct uart_port *port)
-{
- if (port->mapbase == 0xffe00000 ||
- port->mapbase == 0xffe08000)
- /* SCIF0/1*/
+ reg = sci_getreg(port, SCTFDR);
+ if (reg->size)
return sci_in(port, SCTFDR) & 0xff;
- else
- /* SCIF2 */
+
+ reg = sci_getreg(port, SCFDR);
+ if (reg->size)
return sci_in(port, SCFDR) >> 8;
-}
-static int scif_txroom(struct uart_port *port)
-{
- if (port->mapbase == 0xffe00000 ||
- port->mapbase == 0xffe08000)
- /* SCIF0/1*/
- return SCIF_TXROOM_MAX - scif_txfill(port);
- else
- /* SCIF2 */
- return SCIF2_TXROOM_MAX - scif_txfill(port);
+ return !(sci_in(port, SCxSR) & SCI_TDRE);
}
-static int scif_rxfill(struct uart_port *port)
-{
- if ((port->mapbase == 0xffe00000) ||
- (port->mapbase == 0xffe08000)) {
- /* SCIF0/1*/
- return sci_in(port, SCRFDR) & 0xff;
- } else {
- /* SCIF2 */
- return sci_in(port, SCFDR) & SCIF2_RFDC_MASK;
- }
-}
-#elif defined(CONFIG_ARCH_SH7372)
-static int scif_txfill(struct uart_port *port)
+static int sci_txroom(struct uart_port *port)
{
- if (port->type == PORT_SCIFA)
- return sci_in(port, SCFDR) >> 8;
- else
- return sci_in(port, SCTFDR);
+ return port->fifosize - sci_txfill(port);
}
-static int scif_txroom(struct uart_port *port)
+static int sci_rxfill(struct uart_port *port)
{
- return port->fifosize - scif_txfill(port);
-}
+ struct plat_sci_reg *reg;
-static int scif_rxfill(struct uart_port *port)
-{
- if (port->type == PORT_SCIFA)
- return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
- else
- return sci_in(port, SCRFDR);
-}
-#else
-static int scif_txfill(struct uart_port *port)
-{
- return sci_in(port, SCFDR) >> 8;
-}
+ reg = sci_getreg(port, SCRFDR);
+ if (reg->size)
+ return sci_in(port, SCRFDR) & 0xff;
-static int scif_txroom(struct uart_port *port)
-{
- return SCIF_TXROOM_MAX - scif_txfill(port);
-}
+ reg = sci_getreg(port, SCFDR);
+ if (reg->size)
+ return sci_in(port, SCFDR) & ((port->fifosize << 1) - 1);
-static int scif_rxfill(struct uart_port *port)
-{
- return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
+ return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
}
-#endif
-static int sci_txfill(struct uart_port *port)
+/*
+ * SCI helper for checking the state of the muxed port/RXD pins.
+ */
+static inline int sci_rxd_in(struct uart_port *port)
{
- return !(sci_in(port, SCxSR) & SCI_TDRE);
-}
+ struct sci_port *s = to_sci_port(port);
-static int sci_txroom(struct uart_port *port)
-{
- return !sci_txfill(port);
-}
+ if (s->cfg->port_reg <= 0)
+ return 1;
-static int sci_rxfill(struct uart_port *port)
-{
- return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
+ return !!__raw_readb(s->cfg->port_reg);
}
/* ********************************************************************** *
@@ -406,10 +530,7 @@ static void sci_transmit_chars(struct uart_port *port)
return;
}
- if (port->type == PORT_SCI)
- count = sci_txroom(port);
- else
- count = scif_txroom(port);
+ count = sci_txroom(port);
do {
unsigned char c;
@@ -464,13 +585,8 @@ static void sci_receive_chars(struct uart_port *port)
return;
while (1) {
- if (port->type == PORT_SCI)
- count = sci_rxfill(port);
- else
- count = scif_rxfill(port);
-
/* Don't copy more bytes than there is room for in the buffer */
- count = tty_buffer_request_room(tty, count);
+ count = tty_buffer_request_room(tty, sci_rxfill(port));
/* If for any reason we can't copy more data, we're done! */
if (count == 0)
@@ -561,8 +677,7 @@ static void sci_break_timer(unsigned long data)
{
struct sci_port *port = (struct sci_port *)data;
- if (port->enable)
- port->enable(&port->port);
+ sci_port_enable(port);
if (sci_rxd_in(&port->port) == 0) {
port->break_flag = 1;
@@ -574,8 +689,7 @@ static void sci_break_timer(unsigned long data)
} else
port->break_flag = 0;
- if (port->disable)
- port->disable(&port->port);
+ sci_port_disable(port);
}
static int sci_handle_errors(struct uart_port *port)
@@ -583,13 +697,19 @@ static int sci_handle_errors(struct uart_port *port)
int copied = 0;
unsigned short status = sci_in(port, SCxSR);
struct tty_struct *tty = port->state->port.tty;
+ struct sci_port *s = to_sci_port(port);
- if (status & SCxSR_ORER(port)) {
- /* overrun error */
- if (tty_insert_flip_char(tty, 0, TTY_OVERRUN))
- copied++;
+ /*
+ * Handle overruns, if supported.
+ */
+ if (s->cfg->overrun_bit != SCIx_NOT_SUPPORTED) {
+ if (status & (1 << s->cfg->overrun_bit)) {
+ /* overrun error */
+ if (tty_insert_flip_char(tty, 0, TTY_OVERRUN))
+ copied++;
- dev_notice(port->dev, "overrun error");
+ dev_notice(port->dev, "overrun error");
+ }
}
if (status & SCxSR_FER(port)) {
@@ -637,12 +757,15 @@ static int sci_handle_errors(struct uart_port *port)
static int sci_handle_fifo_overrun(struct uart_port *port)
{
struct tty_struct *tty = port->state->port.tty;
+ struct sci_port *s = to_sci_port(port);
+ struct plat_sci_reg *reg;
int copied = 0;
- if (port->type != PORT_SCIF)
+ reg = sci_getreg(port, SCLSR);
+ if (!reg->size)
return 0;
- if ((sci_in(port, SCLSR) & SCIF_ORER) != 0) {
+ if ((sci_in(port, SCLSR) & (1 << s->cfg->overrun_bit))) {
sci_out(port, SCLSR, 0);
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
@@ -840,74 +963,102 @@ static int sci_notifier(struct notifier_block *self,
return NOTIFY_OK;
}
-static void sci_clk_enable(struct uart_port *port)
-{
- struct sci_port *sci_port = to_sci_port(port);
-
- pm_runtime_get_sync(port->dev);
+static struct sci_irq_desc {
+ const char *desc;
+ irq_handler_t handler;
+} sci_irq_desc[] = {
+ /*
+ * Split out handlers, the default case.
+ */
+ [SCIx_ERI_IRQ] = {
+ .desc = "rx err",
+ .handler = sci_er_interrupt,
+ },
- clk_enable(sci_port->iclk);
- sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
- clk_enable(sci_port->fclk);
-}
+ [SCIx_RXI_IRQ] = {
+ .desc = "rx full",
+ .handler = sci_rx_interrupt,
+ },
-static void sci_clk_disable(struct uart_port *port)
-{
- struct sci_port *sci_port = to_sci_port(port);
+ [SCIx_TXI_IRQ] = {
+ .desc = "tx empty",
+ .handler = sci_tx_interrupt,
+ },
- clk_disable(sci_port->fclk);
- clk_disable(sci_port->iclk);
+ [SCIx_BRI_IRQ] = {
+ .desc = "break",
+ .handler = sci_br_interrupt,
+ },
- pm_runtime_put_sync(port->dev);
-}
+ /*
+ * Special muxed handler.
+ */
+ [SCIx_MUX_IRQ] = {
+ .desc = "mux",
+ .handler = sci_mpxed_interrupt,
+ },
+};
static int sci_request_irq(struct sci_port *port)
{
- int i;
- irqreturn_t (*handlers[4])(int irq, void *ptr) = {
- sci_er_interrupt, sci_rx_interrupt, sci_tx_interrupt,
- sci_br_interrupt,
- };
- const char *desc[] = { "SCI Receive Error", "SCI Receive Data Full",
- "SCI Transmit Data Empty", "SCI Break" };
-
- if (port->cfg->irqs[0] == port->cfg->irqs[1]) {
- if (unlikely(!port->cfg->irqs[0]))
- return -ENODEV;
-
- if (request_irq(port->cfg->irqs[0], sci_mpxed_interrupt,
- IRQF_DISABLED, "sci", port)) {
- dev_err(port->port.dev, "Can't allocate IRQ\n");
- return -ENODEV;
+ struct uart_port *up = &port->port;
+ int i, j, ret = 0;
+
+ for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) {
+ struct sci_irq_desc *desc;
+ unsigned int irq;
+
+ if (SCIx_IRQ_IS_MUXED(port)) {
+ i = SCIx_MUX_IRQ;
+ irq = up->irq;
+ } else
+ irq = port->cfg->irqs[i];
+
+ desc = sci_irq_desc + i;
+ port->irqstr[j] = kasprintf(GFP_KERNEL, "%s:%s",
+ dev_name(up->dev), desc->desc);
+ if (!port->irqstr[j]) {
+ dev_err(up->dev, "Failed to allocate %s IRQ string\n",
+ desc->desc);
+ goto out_nomem;
}
- } else {
- for (i = 0; i < ARRAY_SIZE(handlers); i++) {
- if (unlikely(!port->cfg->irqs[i]))
- continue;
-
- if (request_irq(port->cfg->irqs[i], handlers[i],
- IRQF_DISABLED, desc[i], port)) {
- dev_err(port->port.dev, "Can't allocate IRQ\n");
- return -ENODEV;
- }
+
+ ret = request_irq(irq, desc->handler, up->irqflags,
+ port->irqstr[j], port);
+ if (unlikely(ret)) {
+ dev_err(up->dev, "Can't allocate %s IRQ\n", desc->desc);
+ goto out_noirq;
}
}
return 0;
+
+out_noirq:
+ while (--i >= 0)
+ free_irq(port->cfg->irqs[i], port);
+
+out_nomem:
+ while (--j >= 0)
+ kfree(port->irqstr[j]);
+
+ return ret;
}
static void sci_free_irq(struct sci_port *port)
{
int i;
- if (port->cfg->irqs[0] == port->cfg->irqs[1])
- free_irq(port->cfg->irqs[0], port);
- else {
- for (i = 0; i < ARRAY_SIZE(port->cfg->irqs); i++) {
- if (!port->cfg->irqs[i])
- continue;
+ /*
+ * Intentionally in reverse order so we iterate over the muxed
+ * IRQ first.
+ */
+ for (i = 0; i < SCIx_NR_IRQS; i++) {
+ free_irq(port->cfg->irqs[i], port);
+ kfree(port->irqstr[i]);
- free_irq(port->cfg->irqs[i], port);
+ if (SCIx_IRQ_IS_MUXED(port)) {
+ /* If there's only one IRQ, we're done. */
+ return;
}
}
}
@@ -915,7 +1066,7 @@ static void sci_free_irq(struct sci_port *port)
static unsigned int sci_tx_empty(struct uart_port *port)
{
unsigned short status = sci_in(port, SCxSR);
- unsigned short in_tx_fifo = scif_txfill(port);
+ unsigned short in_tx_fifo = sci_txfill(port);
return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
}
@@ -932,7 +1083,7 @@ static unsigned int sci_get_mctrl(struct uart_port *port)
/* This routine is used for getting signals of: DTR, DCD, DSR, RI,
and CTS/RTS */
- return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR;
+ return TIOCM_DTR | TIOCM_RTS | TIOCM_CTS | TIOCM_DSR;
}
#ifdef CONFIG_SERIAL_SH_SCI_DMA
@@ -1438,8 +1589,7 @@ static int sci_startup(struct uart_port *port)
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
- if (s->enable)
- s->enable(port);
+ sci_port_enable(s);
ret = sci_request_irq(s);
if (unlikely(ret < 0))
@@ -1465,8 +1615,7 @@ static void sci_shutdown(struct uart_port *port)
sci_free_dma(port);
sci_free_irq(s);
- if (s->disable)
- s->disable(port);
+ sci_port_disable(s);
}
static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
@@ -1491,11 +1640,25 @@ static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
return ((freq + 16 * bps) / (32 * bps) - 1);
}
+static void sci_reset(struct uart_port *port)
+{
+ unsigned int status;
+
+ do {
+ status = sci_in(port, SCxSR);
+ } while (!(status & SCxSR_TEND(port)));
+
+ sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
+
+ if (port->type != PORT_SCI)
+ sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
+}
+
static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
struct sci_port *s = to_sci_port(port);
- unsigned int status, baud, smr_val, max_baud;
+ unsigned int baud, smr_val, max_baud;
int t = -1;
u16 scfcr = 0;
@@ -1513,17 +1676,9 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
if (likely(baud && port->uartclk))
t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud, port->uartclk);
- if (s->enable)
- s->enable(port);
-
- do {
- status = sci_in(port, SCxSR);
- } while (!(status & SCxSR_TEND(port)));
-
- sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
+ sci_port_enable(s);
- if (port->type != PORT_SCI)
- sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
+ sci_reset(port);
smr_val = sci_in(port, SCSMR) & 3;
@@ -1584,8 +1739,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
if ((termios->c_cflag & CREAD) != 0)
sci_start_rx(port);
- if (s->disable)
- s->disable(port);
+ sci_port_disable(s);
}
static const char *sci_type(struct uart_port *port)
@@ -1726,6 +1880,7 @@ static int __devinit sci_init_single(struct platform_device *dev,
struct plat_sci_port *p)
{
struct uart_port *port = &sci_port->port;
+ int ret;
port->ops = &sci_uart_ops;
port->iotype = UPIO_MEM;
@@ -1746,6 +1901,12 @@ static int __devinit sci_init_single(struct platform_device *dev,
break;
}
+ if (p->regtype == SCIx_PROBE_REGTYPE) {
+ ret = sci_probe_regmap(p);
+ if (unlikely(ret))
+ return ret;
+ }
+
if (dev) {
sci_port->iclk = clk_get(&dev->dev, "sci_ick");
if (IS_ERR(sci_port->iclk)) {
@@ -1764,10 +1925,9 @@ static int __devinit sci_init_single(struct platform_device *dev,
if (IS_ERR(sci_port->fclk))
sci_port->fclk = NULL;
- sci_port->enable = sci_clk_enable;
- sci_port->disable = sci_clk_disable;
port->dev = &dev->dev;
+ pm_runtime_irq_safe(&dev->dev);
pm_runtime_enable(&dev->dev);
}
@@ -1775,20 +1935,51 @@ static int __devinit sci_init_single(struct platform_device *dev,
sci_port->break_timer.function = sci_break_timer;
init_timer(&sci_port->break_timer);
+ /*
+ * Establish some sensible defaults for the error detection.
+ */
+ if (!p->error_mask)
+ p->error_mask = (p->type == PORT_SCI) ?
+ SCI_DEFAULT_ERROR_MASK : SCIF_DEFAULT_ERROR_MASK;
+
+ /*
+ * Establish sensible defaults for the overrun detection, unless
+ * the part has explicitly disabled support for it.
+ */
+ if (p->overrun_bit != SCIx_NOT_SUPPORTED) {
+ if (p->type == PORT_SCI)
+ p->overrun_bit = 5;
+ else if (p->scbrr_algo_id == SCBRR_ALGO_4)
+ p->overrun_bit = 9;
+ else
+ p->overrun_bit = 0;
+
+ /*
+ * Make the error mask inclusive of overrun detection, if
+ * supported.
+ */
+ p->error_mask |= (1 << p->overrun_bit);
+ }
+
sci_port->cfg = p;
port->mapbase = p->mapbase;
port->type = p->type;
port->flags = p->flags;
+ port->regshift = p->regshift;
/*
- * The UART port needs an IRQ value, so we peg this to the TX IRQ
+ * The UART port needs an IRQ value, so we peg this to the RX IRQ
* for the multi-IRQ ports, which is where we are primarily
* concerned with the shutdown path synchronization.
*
* For the muxed case there's nothing more to do.
*/
port->irq = p->irqs[SCIx_RXI_IRQ];
+ port->irqflags = IRQF_DISABLED;
+
+ port->serial_in = sci_serial_in;
+ port->serial_out = sci_serial_out;
if (p->dma_dev)
dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n",
@@ -1814,8 +2005,7 @@ static void serial_console_write(struct console *co, const char *s,
struct uart_port *port = &sci_port->port;
unsigned short bits;
- if (sci_port->enable)
- sci_port->enable(port);
+ sci_port_enable(sci_port);
uart_console_write(port, s, count, serial_console_putchar);
@@ -1824,8 +2014,7 @@ static void serial_console_write(struct console *co, const char *s,
while ((sci_in(port, SCxSR) & bits) != bits)
cpu_relax();
- if (sci_port->disable)
- sci_port->disable(port);
+ sci_port_disable(sci_port);
}
static int __devinit serial_console_setup(struct console *co, char *options)
@@ -1857,20 +2046,14 @@ static int __devinit serial_console_setup(struct console *co, char *options)
if (unlikely(ret != 0))
return ret;
- if (sci_port->enable)
- sci_port->enable(port);
+ sci_port_enable(sci_port);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
- ret = uart_set_options(port, co, baud, parity, bits, flow);
-#if defined(__H8300H__) || defined(__H8300S__)
- /* disable rx interrupt */
- if (ret == 0)
- sci_stop_rx(port);
-#endif
- /* TODO: disable clock */
- return ret;
+ sci_port_disable(sci_port);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct console serial_console = {
@@ -1912,6 +2095,36 @@ static int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
return 0;
}
+#define uart_console(port) ((port)->cons->index == (port)->line)
+
+static int sci_runtime_suspend(struct device *dev)
+{
+ struct sci_port *sci_port = dev_get_drvdata(dev);
+ struct uart_port *port = &sci_port->port;
+
+ if (uart_console(port)) {
+ sci_port->saved_smr = sci_in(port, SCSMR);
+ sci_port->saved_brr = sci_in(port, SCBRR);
+ sci_port->saved_fcr = sci_in(port, SCFCR);
+ }
+ return 0;
+}
+
+static int sci_runtime_resume(struct device *dev)
+{
+ struct sci_port *sci_port = dev_get_drvdata(dev);
+ struct uart_port *port = &sci_port->port;
+
+ if (uart_console(port)) {
+ sci_reset(port);
+ sci_out(port, SCSMR, sci_port->saved_smr);
+ sci_out(port, SCBRR, sci_port->saved_brr);
+ sci_out(port, SCFCR, sci_port->saved_fcr);
+ sci_out(port, SCSCR, sci_port->cfg->scscr);
+ }
+ return 0;
+}
+
#define SCI_CONSOLE (&serial_console)
#else
@@ -1921,6 +2134,8 @@ static inline int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
}
#define SCI_CONSOLE NULL
+#define sci_runtime_suspend NULL
+#define sci_runtime_resume NULL
#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
@@ -2036,6 +2251,8 @@ static int sci_resume(struct device *dev)
}
static const struct dev_pm_ops sci_dev_pm_ops = {
+ .runtime_suspend = sci_runtime_suspend,
+ .runtime_resume = sci_runtime_resume,
.suspend = sci_suspend,
.resume = sci_resume,
};
@@ -2081,3 +2298,5 @@ module_exit(sci_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:sh-sci");
+MODULE_AUTHOR("Paul Mundt");
+MODULE_DESCRIPTION("SuperH SCI(F) serial driver");
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index b04d937..e9bed03 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -2,169 +2,14 @@
#include <linux/io.h>
#include <linux/gpio.h>
-#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
-#include <asm/regs306x.h>
-#endif
-#if defined(CONFIG_H8S2678)
-#include <asm/regs267x.h>
-#endif
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
- defined(CONFIG_CPU_SUBTYPE_SH7707) || \
- defined(CONFIG_CPU_SUBTYPE_SH7708) || \
- defined(CONFIG_CPU_SUBTYPE_SH7709)
-# define SCPCR 0xA4000116 /* 16 bit SCI and SCIF */
-# define SCPDR 0xA4000136 /* 8 bit SCI and SCIF */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7705)
-# define SCIF0 0xA4400000
-# define SCIF2 0xA4410000
-# define SCPCR 0xA4000116
-# define SCPDR 0xA4000136
-#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721) || \
- defined(CONFIG_ARCH_SH73A0) || \
- defined(CONFIG_ARCH_SH7367) || \
- defined(CONFIG_ARCH_SH7377) || \
- defined(CONFIG_ARCH_SH7372)
-# define PORT_PTCR 0xA405011EUL
-# define PORT_PVCR 0xA4050122UL
-# define SCIF_ORER 0x0200 /* overrun error bit */
-#elif defined(CONFIG_SH_RTS7751R2D)
-# define SCSPTR1 0xFFE0001C /* 8 bit SCIF */
-# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \
- defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
- defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
- defined(CONFIG_CPU_SUBTYPE_SH7091) || \
- defined(CONFIG_CPU_SUBTYPE_SH7751) || \
- defined(CONFIG_CPU_SUBTYPE_SH7751R)
-# define SCSPTR1 0xffe0001c /* 8 bit SCI */
-# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
-# define SCSPTR0 0xfe600024 /* 16 bit SCIF */
-# define SCSPTR1 0xfe610024 /* 16 bit SCIF */
-# define SCSPTR2 0xfe620024 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
-# define SCSPTR0 0xA4400000 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-# define PACR 0xa4050100
-# define PBCR 0xa4050102
-#elif defined(CONFIG_CPU_SUBTYPE_SH7343)
-# define SCSPTR0 0xffe00010 /* 16 bit SCIF */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
-# define PADR 0xA4050120
-# define PSDR 0xA405013e
-# define PWDR 0xA4050166
-# define PSCR 0xA405011E
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7366)
-# define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */
-# define SCSPTR0 SCPDR0
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
-# define SCSPTR0 0xa4050160
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
-# define SCSPTR2 0xffe80020 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_H83007) || defined(CONFIG_H83068)
-# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
-#elif defined(CONFIG_H8S2678)
-# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
-# define SCSPTR0 0xfe4b0020
-# define SCIF_ORER 0x0001
-#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
-# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7770)
-# define SCSPTR0 0xff923020 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
-# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* Overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \
- defined(CONFIG_CPU_SUBTYPE_SH7786)
-# define SCSPTR0 0xffea0024 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* Overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \
- defined(CONFIG_CPU_SUBTYPE_SH7203) || \
- defined(CONFIG_CPU_SUBTYPE_SH7206) || \
- defined(CONFIG_CPU_SUBTYPE_SH7263)
-# define SCSPTR0 0xfffe8020 /* 16 bit SCIF */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
-# define SCSPTR0 0xf8400020 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SHX3)
-# define SCSPTR0 0xffc30020 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* Overrun error bit */
-#else
-# error CPU subtype not defined
-#endif
-
-/* SCxSR SCI */
-#define SCI_TDRE 0x80 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-#define SCI_RDRF 0x40 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-#define SCI_ORER 0x20 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-#define SCI_FER 0x10 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-#define SCI_PER 0x08 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-#define SCI_TEND 0x04 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-/* SCI_MPB 0x02 * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-/* SCI_MPBT 0x01 * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-
-#define SCI_ERRORS ( SCI_PER | SCI_FER | SCI_ORER)
-
-/* SCxSR SCIF */
-#define SCIF_ER 0x0080 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_TEND 0x0040 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_TDFE 0x0020 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_BRK 0x0010 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_FER 0x0008 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_PER 0x0004 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_RDF 0x0002 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_DR 0x0001 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
- defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721) || \
- defined(CONFIG_ARCH_SH73A0) || \
- defined(CONFIG_ARCH_SH7367) || \
- defined(CONFIG_ARCH_SH7377) || \
- defined(CONFIG_ARCH_SH7372)
-# define SCIF_ORER 0x0200
-# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK | SCIF_ORER)
-# define SCIF_RFDC_MASK 0x007f
-# define SCIF_TXROOM_MAX 64
-#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
-# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK )
-# define SCIF_RFDC_MASK 0x007f
-# define SCIF_TXROOM_MAX 64
-/* SH7763 SCIF2 support */
-# define SCIF2_RFDC_MASK 0x001f
-# define SCIF2_TXROOM_MAX 16
-#else
-# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK)
-# define SCIF_RFDC_MASK 0x001f
-# define SCIF_TXROOM_MAX 16
-#endif
-
-#ifndef SCIF_ORER
-#define SCIF_ORER 0x0000
-#endif
-
#define SCxSR_TEND(port) (((port)->type == PORT_SCI) ? SCI_TEND : SCIF_TEND)
-#define SCxSR_ERRORS(port) (((port)->type == PORT_SCI) ? SCI_ERRORS : SCIF_ERRORS)
#define SCxSR_RDxF(port) (((port)->type == PORT_SCI) ? SCI_RDRF : SCIF_RDF)
#define SCxSR_TDxE(port) (((port)->type == PORT_SCI) ? SCI_TDRE : SCIF_TDFE)
#define SCxSR_FER(port) (((port)->type == PORT_SCI) ? SCI_FER : SCIF_FER)
#define SCxSR_PER(port) (((port)->type == PORT_SCI) ? SCI_PER : SCIF_PER)
#define SCxSR_BRK(port) (((port)->type == PORT_SCI) ? 0x00 : SCIF_BRK)
-#define SCxSR_ORER(port) (((port)->type == PORT_SCI) ? SCI_ORER : SCIF_ORER)
+
+#define SCxSR_ERRORS(port) (to_sci_port(port)->cfg->error_mask)
#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
defined(CONFIG_CPU_SUBTYPE_SH7720) || \
@@ -191,278 +36,3 @@
#define SCI_MAJOR 204
#define SCI_MINOR_START 8
-
-#define SCI_IN(size, offset) \
- if ((size) == 8) { \
- return ioread8(port->membase + (offset)); \
- } else { \
- return ioread16(port->membase + (offset)); \
- }
-#define SCI_OUT(size, offset, value) \
- if ((size) == 8) { \
- iowrite8(value, port->membase + (offset)); \
- } else if ((size) == 16) { \
- iowrite16(value, port->membase + (offset)); \
- }
-
-#define CPU_SCIx_FNS(name, sci_offset, sci_size, scif_offset, scif_size)\
- static inline unsigned int sci_##name##_in(struct uart_port *port) \
- { \
- if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \
- SCI_IN(scif_size, scif_offset) \
- } else { /* PORT_SCI or PORT_SCIFA */ \
- SCI_IN(sci_size, sci_offset); \
- } \
- } \
- static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
- { \
- if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \
- SCI_OUT(scif_size, scif_offset, value) \
- } else { /* PORT_SCI or PORT_SCIFA */ \
- SCI_OUT(sci_size, sci_offset, value); \
- } \
- }
-
-#ifdef CONFIG_H8300
-/* h8300 don't have SCIF */
-#define CPU_SCIF_FNS(name) \
- static inline unsigned int sci_##name##_in(struct uart_port *port) \
- { \
- return 0; \
- } \
- static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
- { \
- }
-#else
-#define CPU_SCIF_FNS(name, scif_offset, scif_size) \
- static inline unsigned int sci_##name##_in(struct uart_port *port) \
- { \
- SCI_IN(scif_size, scif_offset); \
- } \
- static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
- { \
- SCI_OUT(scif_size, scif_offset, value); \
- }
-#endif
-
-#define CPU_SCI_FNS(name, sci_offset, sci_size) \
- static inline unsigned int sci_##name##_in(struct uart_port* port) \
- { \
- SCI_IN(sci_size, sci_offset); \
- } \
- static inline void sci_##name##_out(struct uart_port* port, unsigned int value) \
- { \
- SCI_OUT(sci_size, sci_offset, value); \
- }
-
-#if defined(CONFIG_CPU_SH3) || \
- defined(CONFIG_ARCH_SH73A0) || \
- defined(CONFIG_ARCH_SH7367) || \
- defined(CONFIG_ARCH_SH7377) || \
- defined(CONFIG_ARCH_SH7372)
-#if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
-#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \
- sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
- h8_sci_offset, h8_sci_size) \
- CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size)
-#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
- CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \
- defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721) || \
- defined(CONFIG_ARCH_SH7367)
-#define SCIF_FNS(name, scif_offset, scif_size) \
- CPU_SCIF_FNS(name, scif_offset, scif_size)
-#elif defined(CONFIG_ARCH_SH7377) || \
- defined(CONFIG_ARCH_SH7372) || \
- defined(CONFIG_ARCH_SH73A0)
-#define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size) \
- CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size)
-#define SCIF_FNS(name, scif_offset, scif_size) \
- CPU_SCIF_FNS(name, scif_offset, scif_size)
-#else
-#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \
- sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
- h8_sci_offset, h8_sci_size) \
- CPU_SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh3_scif_offset, sh3_scif_size)
-#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
- CPU_SCIF_FNS(name, sh3_scif_offset, sh3_scif_size)
-#endif
-#elif defined(__H8300H__) || defined(__H8300S__)
-#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \
- sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
- h8_sci_offset, h8_sci_size) \
- CPU_SCI_FNS(name, h8_sci_offset, h8_sci_size)
-#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
- CPU_SCIF_FNS(name)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
- defined(CONFIG_CPU_SUBTYPE_SH7724)
- #define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size) \
- CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size)
- #define SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) \
- CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size)
-#else
-#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \
- sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
- h8_sci_offset, h8_sci_size) \
- CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size)
-#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
- CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size)
-#endif
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
- defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721) || \
- defined(CONFIG_ARCH_SH7367)
-
-SCIF_FNS(SCSMR, 0x00, 16)
-SCIF_FNS(SCBRR, 0x04, 8)
-SCIF_FNS(SCSCR, 0x08, 16)
-SCIF_FNS(SCxSR, 0x14, 16)
-SCIF_FNS(SCFCR, 0x18, 16)
-SCIF_FNS(SCFDR, 0x1c, 16)
-SCIF_FNS(SCxTDR, 0x20, 8)
-SCIF_FNS(SCxRDR, 0x24, 8)
-SCIF_FNS(SCLSR, 0x00, 0)
-#elif defined(CONFIG_ARCH_SH7377) || \
- defined(CONFIG_ARCH_SH7372) || \
- defined(CONFIG_ARCH_SH73A0)
-SCIF_FNS(SCSMR, 0x00, 16)
-SCIF_FNS(SCBRR, 0x04, 8)
-SCIF_FNS(SCSCR, 0x08, 16)
-SCIF_FNS(SCTDSR, 0x0c, 16)
-SCIF_FNS(SCFER, 0x10, 16)
-SCIF_FNS(SCxSR, 0x14, 16)
-SCIF_FNS(SCFCR, 0x18, 16)
-SCIF_FNS(SCFDR, 0x1c, 16)
-SCIF_FNS(SCTFDR, 0x38, 16)
-SCIF_FNS(SCRFDR, 0x3c, 16)
-SCIx_FNS(SCxTDR, 0x20, 8, 0x40, 8)
-SCIx_FNS(SCxRDR, 0x24, 8, 0x60, 8)
-SCIF_FNS(SCLSR, 0x00, 0)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
- defined(CONFIG_CPU_SUBTYPE_SH7724)
-SCIx_FNS(SCSMR, 0x00, 16, 0x00, 16)
-SCIx_FNS(SCBRR, 0x04, 8, 0x04, 8)
-SCIx_FNS(SCSCR, 0x08, 16, 0x08, 16)
-SCIx_FNS(SCxTDR, 0x20, 8, 0x0c, 8)
-SCIx_FNS(SCxSR, 0x14, 16, 0x10, 16)
-SCIx_FNS(SCxRDR, 0x24, 8, 0x14, 8)
-SCIx_FNS(SCSPTR, 0, 0, 0, 0)
-SCIF_FNS(SCFCR, 0x18, 16)
-SCIF_FNS(SCFDR, 0x1c, 16)
-SCIF_FNS(SCLSR, 0x24, 16)
-#else
-/* reg SCI/SH3 SCI/SH4 SCIF/SH3 SCIF/SH4 SCI/H8*/
-/* name off sz off sz off sz off sz off sz*/
-SCIx_FNS(SCSMR, 0x00, 8, 0x00, 8, 0x00, 8, 0x00, 16, 0x00, 8)
-SCIx_FNS(SCBRR, 0x02, 8, 0x04, 8, 0x02, 8, 0x04, 8, 0x01, 8)
-SCIx_FNS(SCSCR, 0x04, 8, 0x08, 8, 0x04, 8, 0x08, 16, 0x02, 8)
-SCIx_FNS(SCxTDR, 0x06, 8, 0x0c, 8, 0x06, 8, 0x0C, 8, 0x03, 8)
-SCIx_FNS(SCxSR, 0x08, 8, 0x10, 8, 0x08, 16, 0x10, 16, 0x04, 8)
-SCIx_FNS(SCxRDR, 0x0a, 8, 0x14, 8, 0x0A, 8, 0x14, 8, 0x05, 8)
-SCIF_FNS(SCFCR, 0x0c, 8, 0x18, 16)
-#if defined(CONFIG_CPU_SUBTYPE_SH7760) || \
- defined(CONFIG_CPU_SUBTYPE_SH7780) || \
- defined(CONFIG_CPU_SUBTYPE_SH7785) || \
- defined(CONFIG_CPU_SUBTYPE_SH7786)
-SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16)
-SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16)
-SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16)
-SCIF_FNS(SCSPTR, 0, 0, 0x24, 16)
-SCIF_FNS(SCLSR, 0, 0, 0x28, 16)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
-SCIF_FNS(SCFDR, 0, 0, 0x1C, 16)
-SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16)
-SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16)
-SCIF_FNS(SCSPTR, 0, 0, 0x24, 16)
-SCIF_FNS(SCLSR, 0, 0, 0x28, 16)
-#else
-SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16)
-#if defined(CONFIG_CPU_SUBTYPE_SH7722)
-SCIF_FNS(SCSPTR, 0, 0, 0, 0)
-#else
-SCIF_FNS(SCSPTR, 0, 0, 0x20, 16)
-#endif
-SCIF_FNS(SCLSR, 0, 0, 0x24, 16)
-#endif
-#endif
-#define sci_in(port, reg) sci_##reg##_in(port)
-#define sci_out(port, reg, value) sci_##reg##_out(port, value)
-
-/* H8/300 series SCI pins assignment */
-#if defined(__H8300H__) || defined(__H8300S__)
-static const struct __attribute__((packed)) {
- int port; /* GPIO port no */
- unsigned short rx,tx; /* GPIO bit no */
-} h8300_sci_pins[] = {
-#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
- { /* SCI0 */
- .port = H8300_GPIO_P9,
- .rx = H8300_GPIO_B2,
- .tx = H8300_GPIO_B0,
- },
- { /* SCI1 */
- .port = H8300_GPIO_P9,
- .rx = H8300_GPIO_B3,
- .tx = H8300_GPIO_B1,
- },
- { /* SCI2 */
- .port = H8300_GPIO_PB,
- .rx = H8300_GPIO_B7,
- .tx = H8300_GPIO_B6,
- }
-#elif defined(CONFIG_H8S2678)
- { /* SCI0 */
- .port = H8300_GPIO_P3,
- .rx = H8300_GPIO_B2,
- .tx = H8300_GPIO_B0,
- },
- { /* SCI1 */
- .port = H8300_GPIO_P3,
- .rx = H8300_GPIO_B3,
- .tx = H8300_GPIO_B1,
- },
- { /* SCI2 */
- .port = H8300_GPIO_P5,
- .rx = H8300_GPIO_B1,
- .tx = H8300_GPIO_B0,
- }
-#endif
-};
-#endif
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
- defined(CONFIG_CPU_SUBTYPE_SH7707) || \
- defined(CONFIG_CPU_SUBTYPE_SH7708) || \
- defined(CONFIG_CPU_SUBTYPE_SH7709)
-static inline int sci_rxd_in(struct uart_port *port)
-{
- if (port->mapbase == 0xfffffe80)
- return __raw_readb(SCPDR)&0x01 ? 1 : 0; /* SCI */
- return 1;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \
- defined(CONFIG_CPU_SUBTYPE_SH7751) || \
- defined(CONFIG_CPU_SUBTYPE_SH7751R) || \
- defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
- defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
- defined(CONFIG_CPU_SUBTYPE_SH7091)
-static inline int sci_rxd_in(struct uart_port *port)
-{
- if (port->mapbase == 0xffe00000)
- return __raw_readb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */
- return 1;
-}
-#elif defined(__H8300H__) || defined(__H8300S__)
-static inline int sci_rxd_in(struct uart_port *port)
-{
- int ch = (port->mapbase - SMR0) >> 3;
- return (H8300_SCI_DR(ch) & h8300_sci_pins[ch].rx) ? 1 : 0;
-}
-#else /* default case for non-SCI processors */
-static inline int sci_rxd_in(struct uart_port *port)
-{
- return 1;
-}
-#endif
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index c327218..9af9f08 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -235,7 +235,7 @@ static inline void *qe2cpu_addr(dma_addr_t addr, struct uart_qe_port *qe_port)
return qe_port->bd_virt + (addr - qe_port->bd_dma_addr);
/* something nasty happened */
- printk(KERN_ERR "%s: addr=%x\n", __func__, addr);
+ printk(KERN_ERR "%s: addr=%llx\n", __func__, (u64)addr);
BUG();
return NULL;
}
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 1a7fd3e..0aebd71 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -65,7 +65,7 @@
#include <linux/tty.h>
#include <linux/types.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/system.h>
#include <asm/dec/interrupts.h>
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 150e4f7..4f1fc81 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1295,8 +1295,7 @@ static int tty_driver_install_tty(struct tty_driver *driver,
*
* Locking: tty_mutex for now
*/
-static void tty_driver_remove_tty(struct tty_driver *driver,
- struct tty_struct *tty)
+void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty)
{
if (driver->ops->remove)
driver->ops->remove(driver, tty);
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 385acb8..3f94ac3 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -268,7 +268,7 @@ usbtmc_abort_bulk_in_status:
dev_err(dev, "usb_bulk_msg returned %d\n", rv);
goto exit;
}
- } while ((actual = max_size) &&
+ } while ((actual == max_size) &&
(n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN));
if (actual == max_size) {
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index c962608..26678ca 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -123,10 +123,11 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
}
if (usb_endpoint_xfer_isoc(&ep->desc))
- max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1) *
- (desc->bmAttributes + 1);
+ max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) *
+ le16_to_cpu(ep->desc.wMaxPacketSize);
else if (usb_endpoint_xfer_int(&ep->desc))
- max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1);
+ max_tx = le16_to_cpu(ep->desc.wMaxPacketSize) *
+ (desc->bMaxBurst + 1);
else
max_tx = 999999;
if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) {
@@ -134,10 +135,10 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
"config %d interface %d altsetting %d ep %d: "
"setting to %d\n",
usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int",
- desc->wBytesPerInterval,
+ le16_to_cpu(desc->wBytesPerInterval),
cfgno, inum, asnum, ep->desc.bEndpointAddress,
max_tx);
- ep->ss_ep_comp.wBytesPerInterval = max_tx;
+ ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx);
}
}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 8669ba3..73cbbd8 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1775,6 +1775,8 @@ int usb_hcd_alloc_bandwidth(struct usb_device *udev,
struct usb_interface *iface = usb_ifnum_to_if(udev,
cur_alt->desc.bInterfaceNumber);
+ if (!iface)
+ return -EINVAL;
if (iface->resetting_device) {
/*
* The USB core just reset the device, so the xHCI host
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 44b6b40..5a084b9 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -310,7 +310,7 @@ config USB_PXA_U2O
# musb builds in ../musb along with host support
config USB_GADGET_MUSB_HDRC
tristate "Inventra HDRC USB Peripheral (TI, ADI, ...)"
- depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
+ depends on USB_MUSB_HDRC
select USB_GADGET_DUALSPEED
help
This OTG-capable silicon IP is used in dual designs including
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 98cbc06..ddb118a 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -35,6 +35,7 @@
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
+#include <linux/prefetch.h>
#include <linux/clk.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 5ef8779..aef4741 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1079,10 +1079,12 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
cdev->desc.bMaxPacketSize0 =
cdev->gadget->ep0->maxpacket;
if (gadget_is_superspeed(gadget)) {
- if (gadget->speed >= USB_SPEED_SUPER)
+ if (gadget->speed >= USB_SPEED_SUPER) {
cdev->desc.bcdUSB = cpu_to_le16(0x0300);
- else
+ cdev->desc.bMaxPacketSize0 = 9;
+ } else {
cdev->desc.bcdUSB = cpu_to_le16(0x0210);
+ }
}
value = min(w_length, (u16) sizeof cdev->desc);
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
index 02a0270..a9a4ead 100644
--- a/drivers/usb/gadget/f_audio.c
+++ b/drivers/usb/gadget/f_audio.c
@@ -12,7 +12,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/device.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "u_audio.h"
diff --git a/drivers/usb/gadget/f_hid.c b/drivers/usb/gadget/f_hid.c
index 403a48b..83a266b 100644
--- a/drivers/usb/gadget/f_hid.c
+++ b/drivers/usb/gadget/f_hid.c
@@ -367,6 +367,13 @@ static int hidg_setup(struct usb_function *f,
case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8
| USB_REQ_GET_DESCRIPTOR):
switch (value >> 8) {
+ case HID_DT_HID:
+ VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n");
+ length = min_t(unsigned short, length,
+ hidg_desc.bLength);
+ memcpy(req->buf, &hidg_desc, length);
+ goto respond;
+ break;
case HID_DT_REPORT:
VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n");
length = min_t(unsigned short, length,
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 8f8d3f6..8f3eab1 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -434,6 +434,7 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
config_ep_by_speed(gadget, f, fp->out_ep)) {
fp->in_ep->desc = NULL;
fp->out_ep->desc = NULL;
+ spin_unlock(&port->lock);
return -EINVAL;
}
usb_ep_enable(fp->out_ep);
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 8f3eae9..3ea4666 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -29,7 +29,7 @@
#include <linux/device.h>
#include <linux/etherdevice.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "u_ether.h"
#include "rndis.h"
diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c
index 24a9243..4ec888f 100644
--- a/drivers/usb/gadget/fusb300_udc.c
+++ b/drivers/usb/gadget/fusb300_udc.c
@@ -609,107 +609,6 @@ void fusb300_rdcxf(struct fusb300 *fusb300,
}
}
-#if 0
-static void fusb300_dbg_fifo(struct fusb300_ep *ep,
- u8 entry, u16 length)
-{
- u32 reg;
- u32 i = 0;
- u32 j = 0;
-
- reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_GTM);
- reg &= ~(FUSB300_GTM_TST_EP_ENTRY(0xF) |
- FUSB300_GTM_TST_EP_NUM(0xF) | FUSB300_GTM_TST_FIFO_DEG);
- reg |= (FUSB300_GTM_TST_EP_ENTRY(entry) |
- FUSB300_GTM_TST_EP_NUM(ep->epnum) | FUSB300_GTM_TST_FIFO_DEG);
- iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_GTM);
-
- for (i = 0; i < (length >> 2); i++) {
- if (i * 4 == 1024)
- break;
- reg = ioread32(ep->fusb300->reg +
- FUSB300_OFFSET_BUFDBG_START + i * 4);
- printk(KERN_DEBUG" 0x%-8x", reg);
- j++;
- if ((j % 4) == 0)
- printk(KERN_DEBUG "\n");
- }
-
- if (length % 4) {
- reg = ioread32(ep->fusb300->reg +
- FUSB300_OFFSET_BUFDBG_START + i * 4);
- printk(KERN_DEBUG " 0x%x\n", reg);
- }
-
- if ((j % 4) != 0)
- printk(KERN_DEBUG "\n");
-
- fusb300_disable_bit(ep->fusb300, FUSB300_OFFSET_GTM,
- FUSB300_GTM_TST_FIFO_DEG);
-}
-
-static void fusb300_cmp_dbg_fifo(struct fusb300_ep *ep,
- u8 entry, u16 length, u8 *golden)
-{
- u32 reg;
- u32 i = 0;
- u32 golden_value;
- u8 *tmp;
-
- tmp = golden;
-
- printk(KERN_DEBUG "fusb300_cmp_dbg_fifo (entry %d) : start\n", entry);
-
- reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_GTM);
- reg &= ~(FUSB300_GTM_TST_EP_ENTRY(0xF) |
- FUSB300_GTM_TST_EP_NUM(0xF) | FUSB300_GTM_TST_FIFO_DEG);
- reg |= (FUSB300_GTM_TST_EP_ENTRY(entry) |
- FUSB300_GTM_TST_EP_NUM(ep->epnum) | FUSB300_GTM_TST_FIFO_DEG);
- iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_GTM);
-
- for (i = 0; i < (length >> 2); i++) {
- if (i * 4 == 1024)
- break;
- golden_value = *tmp | *(tmp + 1) << 8 |
- *(tmp + 2) << 16 | *(tmp + 3) << 24;
-
- reg = ioread32(ep->fusb300->reg +
- FUSB300_OFFSET_BUFDBG_START + i*4);
-
- if (reg != golden_value) {
- printk(KERN_DEBUG "0x%x : ", (u32)(ep->fusb300->reg +
- FUSB300_OFFSET_BUFDBG_START + i*4));
- printk(KERN_DEBUG " golden = 0x%x, reg = 0x%x\n",
- golden_value, reg);
- }
- tmp += 4;
- }
-
- switch (length % 4) {
- case 1:
- golden_value = *tmp;
- case 2:
- golden_value = *tmp | *(tmp + 1) << 8;
- case 3:
- golden_value = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16;
- default:
- break;
-
- reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_BUFDBG_START + i*4);
- if (reg != golden_value) {
- printk(KERN_DEBUG "0x%x:", (u32)(ep->fusb300->reg +
- FUSB300_OFFSET_BUFDBG_START + i*4));
- printk(KERN_DEBUG " golden = 0x%x, reg = 0x%x\n",
- golden_value, reg);
- }
- }
-
- printk(KERN_DEBUG "fusb300_cmp_dbg_fifo : end\n");
- fusb300_disable_bit(ep->fusb300, FUSB300_OFFSET_GTM,
- FUSB300_GTM_TST_FIFO_DEG);
-}
-#endif
-
static void fusb300_rdfifo(struct fusb300_ep *ep,
struct fusb300_request *req,
u32 length)
diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c
index 7c7b0e1..ab98ea9 100644
--- a/drivers/usb/gadget/net2272.c
+++ b/drivers/usb/gadget/net2272.c
@@ -27,13 +27,13 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
-#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/prefetch.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/timer.h>
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 85c1b0d..8d31848 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -2060,6 +2060,7 @@ static int s3c2410_udc_resume(struct platform_device *pdev)
static const struct platform_device_id s3c_udc_ids[] = {
{ "s3c2410-usbgadget", },
{ "s3c2440-usbgadget", },
+ { }
};
MODULE_DEVICE_TABLE(platform, s3c_udc_ids);
diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c
index f7395ac..aa0ad34 100644
--- a/drivers/usb/gadget/uvc_queue.c
+++ b/drivers/usb/gadget/uvc_queue.c
@@ -19,7 +19,7 @@
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "uvc.h"
diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
index a715805..cfb5838 100644
--- a/drivers/usb/gadget/uvc_v4l2.c
+++ b/drivers/usb/gadget/uvc_v4l2.c
@@ -123,24 +123,12 @@ uvc_v4l2_open(struct file *file)
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_file_handle *handle;
- int ret;
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (handle == NULL)
return -ENOMEM;
- ret = v4l2_fh_init(&handle->vfh, vdev);
- if (ret < 0)
- goto error;
-
- ret = v4l2_event_init(&handle->vfh);
- if (ret < 0)
- goto error;
-
- ret = v4l2_event_alloc(&handle->vfh, 8);
- if (ret < 0)
- goto error;
-
+ v4l2_fh_init(&handle->vfh, vdev);
v4l2_fh_add(&handle->vfh);
handle->device = &uvc->video;
@@ -148,10 +136,6 @@ uvc_v4l2_open(struct file *file)
uvc_function_connect(uvc);
return 0;
-
-error:
- v4l2_fh_exit(&handle->vfh);
- return ret;
}
static int
@@ -313,7 +297,7 @@ uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST)
return -EINVAL;
- return v4l2_event_subscribe(&handle->vfh, arg);
+ return v4l2_event_subscribe(&handle->vfh, arg, 2);
}
case VIDIOC_UNSUBSCRIBE_EVENT:
@@ -353,7 +337,7 @@ uvc_v4l2_poll(struct file *file, poll_table *wait)
struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
unsigned int mask = 0;
- poll_wait(file, &handle->vfh.events->wait, wait);
+ poll_wait(file, &handle->vfh.wait, wait);
if (v4l2_event_pending(&handle->vfh))
mask |= POLLPRI;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index bf2c8f6..4c32cb1 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -343,7 +343,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
u32 temp;
u32 power_okay;
int i;
- u8 resume_needed = 0;
+ unsigned long resume_needed = 0;
if (time_before (jiffies, ehci->next_statechange))
msleep(5);
@@ -416,7 +416,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
if (test_bit(i, &ehci->bus_suspended) &&
(temp & PORT_SUSPEND)) {
temp |= PORT_RESUME;
- resume_needed = 1;
+ set_bit(i, &resume_needed);
}
ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
}
@@ -431,8 +431,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
i = HCS_N_PORTS (ehci->hcs_params);
while (i--) {
temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
- if (test_bit(i, &ehci->bus_suspended) &&
- (temp & PORT_SUSPEND)) {
+ if (test_bit(i, &resume_needed)) {
temp &= ~(PORT_RWC_BITS | PORT_RESUME);
ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
ehci_vdbg (ehci, "resumed port %d\n", i + 1);
@@ -1046,7 +1045,19 @@ static int ehci_hub_control (
if (!selector || selector > 5)
goto error;
ehci_quiesce(ehci);
+
+ /* Put all enabled ports into suspend */
+ while (ports--) {
+ u32 __iomem *sreg =
+ &ehci->regs->port_status[ports];
+
+ temp = ehci_readl(ehci, sreg) & ~PORT_RWC_BITS;
+ if (temp & PORT_PE)
+ ehci_writel(ehci, temp | PORT_SUSPEND,
+ sreg);
+ }
ehci_halt(ehci);
+ temp = ehci_readl(ehci, status_reg);
temp |= selector << 16;
ehci_writel(ehci, temp, status_reg);
break;
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index 0c058be..555a73c 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -24,6 +24,7 @@
#include <linux/usb/ulpi.h>
#include <linux/slab.h>
+#include <mach/hardware.h>
#include <mach/mxc_ehci.h>
#include <asm/mach-types.h>
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 55a57c2..4524032 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -98,6 +98,18 @@ static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
}
}
+static void disable_put_regulator(
+ struct ehci_hcd_omap_platform_data *pdata)
+{
+ int i;
+
+ for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
+ if (pdata->regulator[i]) {
+ regulator_disable(pdata->regulator[i]);
+ regulator_put(pdata->regulator[i]);
+ }
+ }
+}
/* configure so an HC device and id are always provided */
/* always called with process context; sleeping is OK */
@@ -231,9 +243,11 @@ err_add_hcd:
omap_usbhs_disable(dev);
err_enable:
+ disable_put_regulator(pdata);
usb_put_hcd(hcd);
err_io:
+ iounmap(regs);
return ret;
}
@@ -253,6 +267,8 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
usb_remove_hcd(hcd);
omap_usbhs_disable(dev);
+ disable_put_regulator(dev->platform_data);
+ iounmap(hcd->regs);
usb_put_hcd(hcd);
return 0;
}
diff --git a/drivers/usb/host/ehci-s5p.c b/drivers/usb/host/ehci-s5p.c
index b3958b3..9e77f1c 100644
--- a/drivers/usb/host/ehci-s5p.c
+++ b/drivers/usb/host/ehci-s5p.c
@@ -86,6 +86,7 @@ static int __devinit s5p_ehci_probe(struct platform_device *pdev)
goto fail_hcd;
}
+ s5p_ehci->hcd = hcd;
s5p_ehci->clk = clk_get(&pdev->dev, "usbhost");
if (IS_ERR(s5p_ehci->clk)) {
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 55d3d58..840beda 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -1583,6 +1583,9 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
int retval = 0;
spin_lock_irqsave(&priv->lock, spinflags);
+ retval = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (retval)
+ goto out;
qh = urb->ep->hcpriv;
if (!qh) {
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index a9d3159..629a968 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -535,7 +535,7 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
iounmap(base);
}
-static const struct dmi_system_id __initconst ehci_dmi_nohandoff_table[] = {
+static const struct dmi_system_id __devinitconst ehci_dmi_nohandoff_table[] = {
{
/* Pegatron Lucid (ExoPC) */
.matches = {
@@ -817,7 +817,7 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
/* If the BIOS owns the HC, signal that the OS wants it, and wait */
if (val & XHCI_HC_BIOS_OWNED) {
- writel(val & XHCI_HC_OS_OWNED, base + ext_cap_offset);
+ writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
/* Wait for 5 seconds with 10 microsecond polling interval */
timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0be788c..1e96d1f 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -463,11 +463,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
&& (temp & PORT_POWER))
status |= USB_PORT_STAT_SUSPEND;
}
- if ((temp & PORT_PLS_MASK) == XDEV_RESUME) {
+ if ((temp & PORT_PLS_MASK) == XDEV_RESUME &&
+ !DEV_SUPERSPEED(temp)) {
if ((temp & PORT_RESET) || !(temp & PORT_PE))
goto error;
- if (!DEV_SUPERSPEED(temp) && time_after_eq(jiffies,
- bus_state->resume_done[wIndex])) {
+ if (time_after_eq(jiffies,
+ bus_state->resume_done[wIndex])) {
xhci_dbg(xhci, "Resume USB2 port %d\n",
wIndex + 1);
bus_state->resume_done[wIndex] = 0;
@@ -487,6 +488,14 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_ring_device(xhci, slot_id);
bus_state->port_c_suspend |= 1 << wIndex;
bus_state->suspended_ports &= ~(1 << wIndex);
+ } else {
+ /*
+ * The resume has been signaling for less than
+ * 20ms. Report the port status as SUSPEND,
+ * let the usbcore check port status again
+ * and clear resume signaling later.
+ */
+ status |= USB_PORT_STAT_SUSPEND;
}
}
if ((temp & PORT_PLS_MASK) == XDEV_U0
@@ -664,7 +673,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_dbg(xhci, "PORTSC %04x\n", temp);
if (temp & PORT_RESET)
goto error;
- if (temp & XDEV_U3) {
+ if ((temp & PORT_PLS_MASK) == XDEV_U3) {
if ((temp & PORT_PE) == 0)
goto error;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 7113d16..54139a2 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -514,8 +514,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
(unsigned long long) addr);
}
+/* flip_cycle means flip the cycle bit of all but the first and last TRB.
+ * (The last TRB actually points to the ring enqueue pointer, which is not part
+ * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
+ */
static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
- struct xhci_td *cur_td)
+ struct xhci_td *cur_td, bool flip_cycle)
{
struct xhci_segment *cur_seg;
union xhci_trb *cur_trb;
@@ -528,6 +532,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
* leave the pointers intact.
*/
cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
+ /* Flip the cycle bit (link TRBs can't be the first
+ * or last TRB).
+ */
+ if (flip_cycle)
+ cur_trb->generic.field[3] ^=
+ cpu_to_le32(TRB_CYCLE);
xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
xhci_dbg(xhci, "Address = %p (0x%llx dma); "
"in seg %p (0x%llx dma)\n",
@@ -541,6 +551,11 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
cur_trb->generic.field[2] = 0;
/* Preserve only the cycle bit of this TRB */
cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
+ /* Flip the cycle bit except on the first or last TRB */
+ if (flip_cycle && cur_trb != cur_td->first_trb &&
+ cur_trb != cur_td->last_trb)
+ cur_trb->generic.field[3] ^=
+ cpu_to_le32(TRB_CYCLE);
cur_trb->generic.field[3] |= cpu_to_le32(
TRB_TYPE(TRB_TR_NOOP));
xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
@@ -719,14 +734,14 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
cur_td->urb->stream_id,
cur_td, &deq_state);
else
- td_to_noop(xhci, ep_ring, cur_td);
+ td_to_noop(xhci, ep_ring, cur_td, false);
remove_finished_td:
/*
* The event handler won't see a completion for this TD anymore,
* so remove it from the endpoint ring's TD list. Keep it in
* the cancelled TD list for URB completion later.
*/
- list_del(&cur_td->td_list);
+ list_del_init(&cur_td->td_list);
}
last_unlinked_td = cur_td;
xhci_stop_watchdog_timer_in_irq(xhci, ep);
@@ -754,7 +769,7 @@ remove_finished_td:
do {
cur_td = list_entry(ep->cancelled_td_list.next,
struct xhci_td, cancelled_td_list);
- list_del(&cur_td->cancelled_td_list);
+ list_del_init(&cur_td->cancelled_td_list);
/* Clean up the cancelled URB */
/* Doesn't matter what we pass for status, since the core will
@@ -862,9 +877,9 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
cur_td = list_first_entry(&ring->td_list,
struct xhci_td,
td_list);
- list_del(&cur_td->td_list);
+ list_del_init(&cur_td->td_list);
if (!list_empty(&cur_td->cancelled_td_list))
- list_del(&cur_td->cancelled_td_list);
+ list_del_init(&cur_td->cancelled_td_list);
xhci_giveback_urb_in_irq(xhci, cur_td,
-ESHUTDOWN, "killed");
}
@@ -873,7 +888,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
&temp_ep->cancelled_td_list,
struct xhci_td,
cancelled_td_list);
- list_del(&cur_td->cancelled_td_list);
+ list_del_init(&cur_td->cancelled_td_list);
xhci_giveback_urb_in_irq(xhci, cur_td,
-ESHUTDOWN, "killed");
}
@@ -1565,10 +1580,10 @@ td_cleanup:
else
*status = 0;
}
- list_del(&td->td_list);
+ list_del_init(&td->td_list);
/* Was this TD slated to be cancelled but completed anyway? */
if (!list_empty(&td->cancelled_td_list))
- list_del(&td->cancelled_td_list);
+ list_del_init(&td->cancelled_td_list);
urb_priv->td_cnt++;
/* Giveback the urb when all the tds are completed */
@@ -2500,11 +2515,8 @@ static int prepare_transfer(struct xhci_hcd *xhci,
if (td_index == 0) {
ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
- if (unlikely(ret)) {
- xhci_urb_free_priv(xhci, urb_priv);
- urb->hcpriv = NULL;
+ if (unlikely(ret))
return ret;
- }
}
td->urb = urb;
@@ -2672,6 +2684,10 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
{
int packets_transferred;
+ /* One TRB with a zero-length data packet. */
+ if (running_total == 0 && trb_buff_len == 0)
+ return 0;
+
/* All the TRB queueing functions don't count the current TRB in
* running_total.
*/
@@ -3113,20 +3129,15 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
struct urb *urb, int i)
{
int num_trbs = 0;
- u64 addr, td_len, running_total;
+ u64 addr, td_len;
addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
td_len = urb->iso_frame_desc[i].length;
- running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
- running_total &= TRB_MAX_BUFF_SIZE - 1;
- if (running_total != 0)
- num_trbs++;
-
- while (running_total < td_len) {
+ num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
+ TRB_MAX_BUFF_SIZE);
+ if (num_trbs == 0)
num_trbs++;
- running_total += TRB_MAX_BUFF_SIZE;
- }
return num_trbs;
}
@@ -3226,6 +3237,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
+ urb_priv = urb->hcpriv;
/* Queue the first TRB, even if it's zero-length */
for (i = 0; i < num_tds; i++) {
unsigned int total_packet_count;
@@ -3237,9 +3249,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
addr = start_addr + urb->iso_frame_desc[i].offset;
td_len = urb->iso_frame_desc[i].length;
td_remain_len = td_len;
- /* FIXME: Ignoring zero-length packets, can those happen? */
total_packet_count = roundup(td_len,
le16_to_cpu(urb->ep->desc.wMaxPacketSize));
+ /* A zero-length transfer still involves at least one packet. */
+ if (total_packet_count == 0)
+ total_packet_count++;
burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
total_packet_count);
residue = xhci_get_last_burst_packet_count(xhci,
@@ -3249,12 +3263,13 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
urb->stream_id, trbs_per_td, urb, i, mem_flags);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ if (i == 0)
+ return ret;
+ goto cleanup;
+ }
- urb_priv = urb->hcpriv;
td = urb_priv->td[i];
-
for (j = 0; j < trbs_per_td; j++) {
u32 remainder = 0;
field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
@@ -3344,6 +3359,27 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb);
return 0;
+cleanup:
+ /* Clean up a partially enqueued isoc transfer. */
+
+ for (i--; i >= 0; i--)
+ list_del_init(&urb_priv->td[i]->td_list);
+
+ /* Use the first TD as a temporary variable to turn the TDs we've queued
+ * into No-ops with a software-owned cycle bit. That way the hardware
+ * won't accidentally start executing bogus TDs when we partially
+ * overwrite them. td->first_trb and td->start_seg are already set.
+ */
+ urb_priv->td[0]->last_trb = ep_ring->enqueue;
+ /* Every TRB except the first & last will have its cycle bit flipped. */
+ td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
+
+ /* Reset the ring enqueue back to the first TRB and its cycle bit. */
+ ep_ring->enqueue = urb_priv->td[0]->first_trb;
+ ep_ring->enq_seg = urb_priv->td[0]->start_seg;
+ ep_ring->cycle_state = start_cycle;
+ usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
+ return ret;
}
/*
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 763f484..3a0f695 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -345,7 +345,8 @@ static void xhci_event_ring_work(unsigned long arg)
spin_lock_irqsave(&xhci->lock, flags);
temp = xhci_readl(xhci, &xhci->op_regs->status);
xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
- if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
+ if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_dbg(xhci, "HW died, polling stopped.\n");
spin_unlock_irqrestore(&xhci->lock, flags);
return;
@@ -939,8 +940,11 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
return 0;
}
+ xhci = hcd_to_xhci(hcd);
+ if (xhci->xhc_state & XHCI_STATE_HALTED)
+ return -ENODEV;
+
if (check_virt_dev) {
- xhci = hcd_to_xhci(hcd);
if (!udev->slot_id || !xhci->devs
|| !xhci->devs[udev->slot_id]) {
printk(KERN_DEBUG "xHCI %s called with unaddressed "
@@ -1081,8 +1085,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
if (urb->dev->speed == USB_SPEED_FULL) {
ret = xhci_check_maxpacket(xhci, slot_id,
ep_index, urb);
- if (ret < 0)
+ if (ret < 0) {
+ xhci_urb_free_priv(xhci, urb_priv);
+ urb->hcpriv = NULL;
return ret;
+ }
}
/* We have a spinlock and interrupts disabled, so we must pass
@@ -1093,6 +1100,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
goto dying;
ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
+ if (ret)
+ goto free_priv;
spin_unlock_irqrestore(&xhci->lock, flags);
} else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
spin_lock_irqsave(&xhci->lock, flags);
@@ -1113,6 +1122,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
}
+ if (ret)
+ goto free_priv;
spin_unlock_irqrestore(&xhci->lock, flags);
} else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
spin_lock_irqsave(&xhci->lock, flags);
@@ -1120,6 +1131,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
goto dying;
ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
+ if (ret)
+ goto free_priv;
spin_unlock_irqrestore(&xhci->lock, flags);
} else {
spin_lock_irqsave(&xhci->lock, flags);
@@ -1127,18 +1140,22 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
goto dying;
ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
+ if (ret)
+ goto free_priv;
spin_unlock_irqrestore(&xhci->lock, flags);
}
exit:
return ret;
dying:
- xhci_urb_free_priv(xhci, urb_priv);
- urb->hcpriv = NULL;
xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
"non-responsive xHCI host.\n",
urb->ep->desc.bEndpointAddress, urb);
+ ret = -ESHUTDOWN;
+free_priv:
+ xhci_urb_free_priv(xhci, urb_priv);
+ urb->hcpriv = NULL;
spin_unlock_irqrestore(&xhci->lock, flags);
- return -ESHUTDOWN;
+ return ret;
}
/* Get the right ring for the given URB.
@@ -1235,6 +1252,13 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_dbg(xhci, "HW died, freeing TD.\n");
urb_priv = urb->hcpriv;
+ for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
+ td = urb_priv->td[i];
+ if (!list_empty(&td->td_list))
+ list_del_init(&td->td_list);
+ if (!list_empty(&td->cancelled_td_list))
+ list_del_init(&td->cancelled_td_list);
+ }
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1242,7 +1266,8 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
xhci_urb_free_priv(xhci, urb_priv);
return ret;
}
- if (xhci->xhc_state & XHCI_STATE_DYING) {
+ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
"non-responsive xHCI host.\n",
urb->ep->desc.bEndpointAddress, urb);
@@ -2665,7 +2690,10 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
int i, ret;
ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
- if (ret <= 0)
+ /* If the host is halted due to driver unload, we still need to free the
+ * device.
+ */
+ if (ret <= 0 && ret != -ENODEV)
return;
virt_dev = xhci->devs[udev->slot_id];
@@ -2679,7 +2707,8 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
spin_lock_irqsave(&xhci->lock, flags);
/* Don't disable the slot if the host controller is dead. */
state = xhci_readl(xhci, &xhci->op_regs->status);
- if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
+ if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_free_virt_device(xhci, udev->slot_id);
spin_unlock_irqrestore(&xhci->lock, flags);
return;
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index a003796..27e209a 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -131,7 +131,7 @@
#include <linux/usb.h>
#include <linux/proc_fs.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/blkdev.h>
#include "../../scsi/scsi.h"
#include <scsi/scsi_host.h>
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 68ab460..ac0d75a 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -29,7 +29,7 @@
#include <linux/backlight.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define APPLE_VENDOR_ID 0x05AC
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 6192b45..fc34b8b 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -3,9 +3,6 @@
# for silicon based on Mentor Graphics INVENTRA designs
#
-comment "Enable Host or Gadget support to see Inventra options"
- depends on !USB && USB_GADGET=n
-
# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
config USB_MUSB_HDRC
depends on USB && USB_GADGET
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index ae8c396..5e7cfba 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -17,6 +17,7 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/prefetch.h>
#include <asm/cacheflush.h>
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 149f3f3..318fb4e 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -226,8 +226,10 @@ static int cppi_controller_stop(struct dma_controller *c)
struct cppi *controller;
void __iomem *tibase;
int i;
+ struct musb *musb;
controller = container_of(c, struct cppi, controller);
+ musb = controller->musb;
tibase = controller->tibase;
/* DISABLE INDIVIDUAL CHANNEL Interrupts */
@@ -289,9 +291,11 @@ cppi_channel_allocate(struct dma_controller *c,
u8 index;
struct cppi_channel *cppi_ch;
void __iomem *tibase;
+ struct musb *musb;
controller = container_of(c, struct cppi, controller);
tibase = controller->tibase;
+ musb = controller->musb;
/* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
index = ep->epnum - 1;
@@ -339,7 +343,8 @@ static void cppi_channel_release(struct dma_channel *channel)
c = container_of(channel, struct cppi_channel, channel);
tibase = c->controller->tibase;
if (!c->hw_ep)
- dev_dbg(musb->controller, "releasing idle DMA channel %p\n", c);
+ dev_dbg(c->controller->musb->controller,
+ "releasing idle DMA channel %p\n", c);
else if (!c->transmit)
core_rxirq_enable(tibase, c->index + 1);
@@ -357,10 +362,11 @@ cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
musb_ep_select(base, c->index + 1);
- DBG(level, "RX DMA%d%s: %d left, csr %04x, "
- "%08x H%08x S%08x C%08x, "
- "B%08x L%08x %08x .. %08x"
- "\n",
+ dev_dbg(c->controller->musb->controller,
+ "RX DMA%d%s: %d left, csr %04x, "
+ "%08x H%08x S%08x C%08x, "
+ "B%08x L%08x %08x .. %08x"
+ "\n",
c->index, tag,
musb_readl(c->controller->tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
@@ -387,10 +393,11 @@ cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
musb_ep_select(base, c->index + 1);
- DBG(level, "TX DMA%d%s: csr %04x, "
- "H%08x S%08x C%08x %08x, "
- "F%08x L%08x .. %08x"
- "\n",
+ dev_dbg(c->controller->musb->controller,
+ "TX DMA%d%s: csr %04x, "
+ "H%08x S%08x C%08x %08x, "
+ "F%08x L%08x .. %08x"
+ "\n",
c->index, tag,
musb_readw(c->hw_ep->regs, MUSB_TXCSR),
@@ -1022,6 +1029,7 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
int i;
dma_addr_t safe2ack;
void __iomem *regs = rx->hw_ep->regs;
+ struct musb *musb = cppi->musb;
cppi_dump_rx(6, rx, "/K");
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 668eeef..b3c065a 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -172,7 +172,8 @@ enum musb_g_ep0_state {
#endif
/* TUSB mapping: "flat" plus ep0 special cases */
-#if defined(CONFIG_USB_MUSB_TUSB6010)
+#if defined(CONFIG_USB_MUSB_TUSB6010) || \
+ defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
#define musb_ep_select(_mbase, _epnum) \
musb_writeb((_mbase), MUSB_INDEX, (_epnum))
#define MUSB_EP_OFFSET MUSB_TUSB_OFFSET
@@ -241,7 +242,8 @@ struct musb_hw_ep {
void __iomem *fifo;
void __iomem *regs;
-#ifdef CONFIG_USB_MUSB_TUSB6010
+#if defined(CONFIG_USB_MUSB_TUSB6010) || \
+ defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
void __iomem *conf;
#endif
@@ -258,7 +260,8 @@ struct musb_hw_ep {
struct dma_channel *tx_channel;
struct dma_channel *rx_channel;
-#ifdef CONFIG_USB_MUSB_TUSB6010
+#if defined(CONFIG_USB_MUSB_TUSB6010) || \
+ defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
/* TUSB has "asynchronous" and "synchronous" dma modes */
dma_addr_t fifo_async;
dma_addr_t fifo_sync;
@@ -356,7 +359,8 @@ struct musb {
void __iomem *ctrl_base;
void __iomem *mregs;
-#ifdef CONFIG_USB_MUSB_TUSB6010
+#if defined(CONFIG_USB_MUSB_TUSB6010) || \
+ defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
dma_addr_t async;
dma_addr_t sync;
void __iomem *sync_va;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 95a67fe..fe8d14c 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1698,6 +1698,8 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
is_on = !!is_on;
+ pm_runtime_get_sync(musb->controller);
+
/* NOTE: this assumes we are sensing vbus; we'd rather
* not pullup unless the B-session is active.
*/
@@ -1707,6 +1709,9 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
musb_pullup(musb, is_on);
}
spin_unlock_irqrestore(&musb->lock, flags);
+
+ pm_runtime_put(musb->controller);
+
return 0;
}
@@ -1851,6 +1856,7 @@ int __init musb_gadget_setup(struct musb *musb)
return 0;
err:
+ musb->g.dev.parent = NULL;
device_unregister(&musb->g.dev);
return status;
}
@@ -1858,7 +1864,8 @@ err:
void musb_gadget_cleanup(struct musb *musb)
{
usb_del_gadget_udc(&musb->g);
- device_unregister(&musb->g.dev);
+ if (musb->g.dev.parent)
+ device_unregister(&musb->g.dev);
}
/*
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 8241070..03f2655 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -234,7 +234,8 @@
#define MUSB_TESTMODE 0x0F /* 8 bit */
/* Get offset for a given FIFO from musb->mregs */
-#ifdef CONFIG_USB_MUSB_TUSB6010
+#if defined(CONFIG_USB_MUSB_TUSB6010) || \
+ defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20))
#else
#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4))
@@ -295,7 +296,8 @@
#define MUSB_FLAT_OFFSET(_epnum, _offset) \
(0x100 + (0x10*(_epnum)) + (_offset))
-#ifdef CONFIG_USB_MUSB_TUSB6010
+#if defined(CONFIG_USB_MUSB_TUSB6010) || \
+ defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
/* TUSB6010 EP0 configuration register is special */
#define MUSB_TUSB_OFFSET(_epnum, _offset) \
(0x10 + _offset)
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 9eec41f..ec14801 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/prefetch.h>
#include <linux/usb.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index c784e6c..b67b4bc 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -20,6 +20,7 @@
#include <plat/mux.h>
#include "musb_core.h"
+#include "tusb6010.h"
#define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data)
@@ -89,7 +90,7 @@ static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat)
u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
if (reg != 0) {
- dev_dbg(musb->controller, "ep%i dmareq0 is busy for ep%i\n",
+ dev_dbg(chdat->musb->controller, "ep%i dmareq0 is busy for ep%i\n",
chdat->epnum, reg & 0xf);
return -EAGAIN;
}
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index cecace4..ef4333f 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -65,7 +65,8 @@ static void ux500_tx_work(struct work_struct *data)
struct musb *musb = hw_ep->musb;
unsigned long flags;
- DBG(4, "DMA tx transfer done on hw_ep=%d\n", hw_ep->epnum);
+ dev_dbg(musb->controller, "DMA tx transfer done on hw_ep=%d\n",
+ hw_ep->epnum);
spin_lock_irqsave(&musb->lock, flags);
ux500_channel->channel.actual_len = ux500_channel->cur_len;
@@ -84,7 +85,8 @@ static void ux500_rx_work(struct work_struct *data)
struct musb *musb = hw_ep->musb;
unsigned long flags;
- DBG(4, "DMA rx transfer done on hw_ep=%d\n", hw_ep->epnum);
+ dev_dbg(musb->controller, "DMA rx transfer done on hw_ep=%d\n",
+ hw_ep->epnum);
spin_lock_irqsave(&musb->lock, flags);
ux500_channel->channel.actual_len = ux500_channel->cur_len;
@@ -116,9 +118,11 @@ static bool ux500_configure_channel(struct dma_channel *channel,
enum dma_slave_buswidth addr_width;
dma_addr_t usb_fifo_addr = (MUSB_FIFO_OFFSET(hw_ep->epnum) +
ux500_channel->controller->phy_base);
+ struct musb *musb = ux500_channel->controller->private_data;
- DBG(4, "packet_sz=%d, mode=%d, dma_addr=0x%x, len=%d is_tx=%d\n",
- packet_sz, mode, dma_addr, len, ux500_channel->is_tx);
+ dev_dbg(musb->controller,
+ "packet_sz=%d, mode=%d, dma_addr=0x%x, len=%d is_tx=%d\n",
+ packet_sz, mode, dma_addr, len, ux500_channel->is_tx);
ux500_channel->cur_len = len;
@@ -133,15 +137,13 @@ static bool ux500_configure_channel(struct dma_channel *channel,
DMA_SLAVE_BUSWIDTH_4_BYTES;
slave_conf.direction = direction;
- if (direction == DMA_FROM_DEVICE) {
- slave_conf.src_addr = usb_fifo_addr;
- slave_conf.src_addr_width = addr_width;
- slave_conf.src_maxburst = 16;
- } else {
- slave_conf.dst_addr = usb_fifo_addr;
- slave_conf.dst_addr_width = addr_width;
- slave_conf.dst_maxburst = 16;
- }
+ slave_conf.src_addr = usb_fifo_addr;
+ slave_conf.src_addr_width = addr_width;
+ slave_conf.src_maxburst = 16;
+ slave_conf.dst_addr = usb_fifo_addr;
+ slave_conf.dst_addr_width = addr_width;
+ slave_conf.dst_maxburst = 16;
+
dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG,
(unsigned long) &slave_conf);
@@ -166,6 +168,7 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
struct ux500_dma_controller *controller = container_of(c,
struct ux500_dma_controller, controller);
struct ux500_dma_channel *ux500_channel = NULL;
+ struct musb *musb = controller->private_data;
u8 ch_num = hw_ep->epnum - 1;
u32 max_ch;
@@ -192,7 +195,7 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
ux500_channel->hw_ep = hw_ep;
ux500_channel->is_allocated = 1;
- DBG(7, "hw_ep=%d, is_tx=0x%x, channel=%d\n",
+ dev_dbg(musb->controller, "hw_ep=%d, is_tx=0x%x, channel=%d\n",
hw_ep->epnum, is_tx, ch_num);
return &(ux500_channel->channel);
@@ -201,8 +204,9 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
static void ux500_dma_channel_release(struct dma_channel *channel)
{
struct ux500_dma_channel *ux500_channel = channel->private_data;
+ struct musb *musb = ux500_channel->controller->private_data;
- DBG(7, "channel=%d\n", ux500_channel->ch_num);
+ dev_dbg(musb->controller, "channel=%d\n", ux500_channel->ch_num);
if (ux500_channel->is_allocated) {
ux500_channel->is_allocated = 0;
@@ -252,8 +256,8 @@ static int ux500_dma_channel_abort(struct dma_channel *channel)
void __iomem *epio = musb->endpoints[ux500_channel->hw_ep->epnum].regs;
u16 csr;
- DBG(4, "channel=%d, is_tx=%d\n", ux500_channel->ch_num,
- ux500_channel->is_tx);
+ dev_dbg(musb->controller, "channel=%d, is_tx=%d\n",
+ ux500_channel->ch_num, ux500_channel->is_tx);
if (channel->status == MUSB_DMA_STATUS_BUSY) {
if (ux500_channel->is_tx) {
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 406893e..a34430f 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -16,6 +16,7 @@
*/
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/scatterlist.h>
#include "./common.h"
#include "./pipe.h"
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index ba79dbf..cb2d451 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -14,6 +14,7 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
+#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -76,7 +77,7 @@ struct usbhsg_recip_handle {
struct usbhsg_gpriv, mod)
#define __usbhsg_for_each_uep(start, pos, g, i) \
- for (i = start, pos = (g)->uep; \
+ for (i = start, pos = (g)->uep + i; \
i < (g)->uep_size; \
i++, pos = (g)->uep + i)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 2e06b90..5fc13e7 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -101,6 +101,7 @@ static int ftdi_jtag_probe(struct usb_serial *serial);
static int ftdi_mtxorb_hack_setup(struct usb_serial *serial);
static int ftdi_NDI_device_setup(struct usb_serial *serial);
static int ftdi_stmclite_probe(struct usb_serial *serial);
+static int ftdi_8u2232c_probe(struct usb_serial *serial);
static void ftdi_USB_UIRT_setup(struct ftdi_private *priv);
static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
@@ -128,6 +129,10 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
.probe = ftdi_stmclite_probe,
};
+static struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
+ .probe = ftdi_8u2232c_probe,
+};
+
/*
* The 8U232AM has the same API as the sio except for:
* - it can support MUCH higher baudrates; up to:
@@ -151,6 +156,7 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
* /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
*/
static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
@@ -177,7 +183,8 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_232RL_PID) },
- { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) ,
+ .driver_info = (kernel_ulong_t)&ftdi_8u2232c_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_232H_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
@@ -1171,7 +1178,7 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
case FT2232H: /* FT2232H chip */
case FT4232H: /* FT4232H chip */
case FT232H: /* FT232H chip */
- if ((baud <= 12000000) & (baud >= 1200)) {
+ if ((baud <= 12000000) && (baud >= 1200)) {
div_value = ftdi_2232h_baud_to_divisor(baud);
} else if (baud < 1200) {
div_value = ftdi_232bm_baud_to_divisor(baud);
@@ -1205,7 +1212,10 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
urb_index_value = get_ftdi_divisor(tty, port);
urb_value = (__u16)urb_index_value;
urb_index = (__u16)(urb_index_value >> 16);
- if (priv->interface) { /* FT2232C */
+ if ((priv->chip_type == FT2232C) || (priv->chip_type == FT2232H) ||
+ (priv->chip_type == FT4232H) || (priv->chip_type == FT232H)) {
+ /* Probably the BM type needs the MSB of the encoded fractional
+ * divider also moved like for the chips above. Any infos? */
urb_index = (__u16)((urb_index << 8) | priv->interface);
}
@@ -1733,6 +1743,18 @@ static int ftdi_jtag_probe(struct usb_serial *serial)
return 0;
}
+static int ftdi_8u2232c_probe(struct usb_serial *serial)
+{
+ struct usb_device *udev = serial->dev;
+
+ dbg("%s", __func__);
+
+ if (strcmp(udev->manufacturer, "CALAO Systems") == 0)
+ return ftdi_jtag_probe(serial);
+
+ return 0;
+}
+
/*
* First and second port on STMCLiteadaptors is reserved for JTAG interface
* and the forth port for pio
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 19156d1..bf5227a 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1159,4 +1159,8 @@
/* USB-Nano-485*/
#define FTDI_CTI_NANO_PID 0xF60B
-
+/*
+ * ZeitControl cardsystems GmbH rfid-readers http://zeitconrol.de
+ */
+/* TagTracer MIFARE*/
+#define FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID 0xF7C0
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index b0a7a9e..1a49ca9 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -34,7 +34,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 60b25d8..fe22e90 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -148,6 +148,12 @@ static void option_instat_callback(struct urb *urb);
#define HUAWEI_PRODUCT_K4505 0x1464
#define HUAWEI_PRODUCT_K3765 0x1465
#define HUAWEI_PRODUCT_E14AC 0x14AC
+#define HUAWEI_PRODUCT_K3806 0x14AE
+#define HUAWEI_PRODUCT_K4605 0x14C6
+#define HUAWEI_PRODUCT_K3770 0x14C9
+#define HUAWEI_PRODUCT_K3771 0x14CA
+#define HUAWEI_PRODUCT_K4510 0x14CB
+#define HUAWEI_PRODUCT_K4511 0x14CC
#define HUAWEI_PRODUCT_ETS1220 0x1803
#define HUAWEI_PRODUCT_E353 0x1506
@@ -412,6 +418,56 @@ static void option_instat_callback(struct urb *urb);
#define SAMSUNG_VENDOR_ID 0x04e8
#define SAMSUNG_PRODUCT_GT_B3730 0x6889
+/* YUGA products www.yuga-info.com*/
+#define YUGA_VENDOR_ID 0x257A
+#define YUGA_PRODUCT_CEM600 0x1601
+#define YUGA_PRODUCT_CEM610 0x1602
+#define YUGA_PRODUCT_CEM500 0x1603
+#define YUGA_PRODUCT_CEM510 0x1604
+#define YUGA_PRODUCT_CEM800 0x1605
+#define YUGA_PRODUCT_CEM900 0x1606
+
+#define YUGA_PRODUCT_CEU818 0x1607
+#define YUGA_PRODUCT_CEU816 0x1608
+#define YUGA_PRODUCT_CEU828 0x1609
+#define YUGA_PRODUCT_CEU826 0x160A
+#define YUGA_PRODUCT_CEU518 0x160B
+#define YUGA_PRODUCT_CEU516 0x160C
+#define YUGA_PRODUCT_CEU528 0x160D
+#define YUGA_PRODUCT_CEU526 0x160F
+
+#define YUGA_PRODUCT_CWM600 0x2601
+#define YUGA_PRODUCT_CWM610 0x2602
+#define YUGA_PRODUCT_CWM500 0x2603
+#define YUGA_PRODUCT_CWM510 0x2604
+#define YUGA_PRODUCT_CWM800 0x2605
+#define YUGA_PRODUCT_CWM900 0x2606
+
+#define YUGA_PRODUCT_CWU718 0x2607
+#define YUGA_PRODUCT_CWU716 0x2608
+#define YUGA_PRODUCT_CWU728 0x2609
+#define YUGA_PRODUCT_CWU726 0x260A
+#define YUGA_PRODUCT_CWU518 0x260B
+#define YUGA_PRODUCT_CWU516 0x260C
+#define YUGA_PRODUCT_CWU528 0x260D
+#define YUGA_PRODUCT_CWU526 0x260F
+
+#define YUGA_PRODUCT_CLM600 0x2601
+#define YUGA_PRODUCT_CLM610 0x2602
+#define YUGA_PRODUCT_CLM500 0x2603
+#define YUGA_PRODUCT_CLM510 0x2604
+#define YUGA_PRODUCT_CLM800 0x2605
+#define YUGA_PRODUCT_CLM900 0x2606
+
+#define YUGA_PRODUCT_CLU718 0x2607
+#define YUGA_PRODUCT_CLU716 0x2608
+#define YUGA_PRODUCT_CLU728 0x2609
+#define YUGA_PRODUCT_CLU726 0x260A
+#define YUGA_PRODUCT_CLU518 0x260B
+#define YUGA_PRODUCT_CLU516 0x260C
+#define YUGA_PRODUCT_CLU528 0x260D
+#define YUGA_PRODUCT_CLU526 0x260F
+
/* some devices interfaces need special handling due to a number of reasons */
enum option_blacklist_reason {
OPTION_BLACKLIST_NONE = 0,
@@ -547,6 +603,16 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x31) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x32) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
@@ -993,6 +1059,48 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM500) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM510) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM800) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM900) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU818) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU816) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU828) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU826) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU518) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU516) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU528) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU526) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM600) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM610) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM500) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM510) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM800) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM900) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU718) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU716) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU728) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU726) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU518) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU516) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU528) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU526) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM600) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM610) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM500) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM510) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM800) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM900) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU718) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU716) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU728) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU726) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU518) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
@@ -1122,11 +1230,13 @@ static int option_probe(struct usb_serial *serial,
serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff)
return -ENODEV;
- /* Don't bind network interfaces on Huawei K3765 & K4505 */
+ /* Don't bind network interfaces on Huawei K3765, K4505 & K4605 */
if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID &&
(serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 ||
- serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505) &&
- serial->interface->cur_altsetting->desc.bInterfaceNumber == 1)
+ serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505 ||
+ serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4605) &&
+ (serial->interface->cur_altsetting->desc.bInterfaceNumber == 1 ||
+ serial->interface->cur_altsetting->desc.bInterfaceNumber == 2))
return -ENODEV;
/* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 54a9dab..aeccc7f 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -45,6 +45,7 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */
{USB_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
{USB_DEVICE(0x05c6, 0x9008)}, /* Generic Gobi QDL device */
+ {USB_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
{USB_DEVICE(0x05c6, 0x9201)}, /* Generic Gobi QDL device */
{USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
{USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
@@ -78,6 +79,7 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
+ {USB_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */
{USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */
{USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
{USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index ccff348..3041a97 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1988,6 +1988,16 @@ UNUSUAL_DEV( 0x4146, 0xba01, 0x0100, 0x0100,
"Micro Mini 1GB",
USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
+/*
+ * Nick Bowler <nbowler@elliptictech.com>
+ * SCSI stack spams (otherwise harmless) error messages.
+ */
+UNUSUAL_DEV( 0xc251, 0x4003, 0x0100, 0x0100,
+ "Keil Software, Inc.",
+ "V2M MotherBoard",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NOT_LOCKABLE),
+
/* Reported by Andrew Simmons <andrew.simmons@gmail.com> */
UNUSUAL_DEV( 0xed06, 0x4500, 0x0001, 0x0001,
"DataStor",
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index ca80171..2acc7f5 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -58,7 +58,7 @@
* destination address.
*/
#include <linux/init.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 14c9abf..a801e28 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -11,7 +11,7 @@
#include <linux/uio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
/* This is for zerocopy, used buffer len is set to 1 when lower device DMA
* done */
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 1e54b8b..278aeaa 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -335,6 +335,13 @@ config BACKLIGHT_PCF50633
If you have a backlight driven by a NXP PCF50633 MFD, say Y here to
enable its driver.
+config BACKLIGHT_AAT2870
+ tristate "AnalogicTech AAT2870 Backlight"
+ depends on BACKLIGHT_CLASS_DEVICE && MFD_AAT2870_CORE
+ help
+ If you have a AnalogicTech AAT2870 say Y to enable the
+ backlight driver.
+
endif # BACKLIGHT_CLASS_DEVICE
endif # BACKLIGHT_LCD_SUPPORT
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index bf1dd92..fdd1fc4 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -38,4 +38,5 @@ obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o
obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o
obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o
obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
+obj-$(CONFIG_BACKLIGHT_AAT2870) += aat2870_bl.o
diff --git a/drivers/video/backlight/aat2870_bl.c b/drivers/video/backlight/aat2870_bl.c
new file mode 100644
index 0000000..331f1ef
--- /dev/null
+++ b/drivers/video/backlight/aat2870_bl.c
@@ -0,0 +1,246 @@
+/*
+ * linux/drivers/video/backlight/aat2870_bl.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ * Author: Jin Park <jinyoungp@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+#include <linux/mfd/aat2870.h>
+
+struct aat2870_bl_driver_data {
+ struct platform_device *pdev;
+ struct backlight_device *bd;
+
+ int channels;
+ int max_current;
+ int brightness; /* current brightness */
+};
+
+static inline int aat2870_brightness(struct aat2870_bl_driver_data *aat2870_bl,
+ int brightness)
+{
+ struct backlight_device *bd = aat2870_bl->bd;
+ int val;
+
+ val = brightness * (aat2870_bl->max_current - 1);
+ val /= bd->props.max_brightness;
+
+ return val;
+}
+
+static inline int aat2870_bl_enable(struct aat2870_bl_driver_data *aat2870_bl)
+{
+ struct aat2870_data *aat2870
+ = dev_get_drvdata(aat2870_bl->pdev->dev.parent);
+
+ return aat2870->write(aat2870, AAT2870_BL_CH_EN,
+ (u8)aat2870_bl->channels);
+}
+
+static inline int aat2870_bl_disable(struct aat2870_bl_driver_data *aat2870_bl)
+{
+ struct aat2870_data *aat2870
+ = dev_get_drvdata(aat2870_bl->pdev->dev.parent);
+
+ return aat2870->write(aat2870, AAT2870_BL_CH_EN, 0x0);
+}
+
+static int aat2870_bl_get_brightness(struct backlight_device *bd)
+{
+ return bd->props.brightness;
+}
+
+static int aat2870_bl_update_status(struct backlight_device *bd)
+{
+ struct aat2870_bl_driver_data *aat2870_bl = dev_get_drvdata(&bd->dev);
+ struct aat2870_data *aat2870 =
+ dev_get_drvdata(aat2870_bl->pdev->dev.parent);
+ int brightness = bd->props.brightness;
+ int ret;
+
+ if ((brightness < 0) || (bd->props.max_brightness < brightness)) {
+ dev_err(&bd->dev, "invalid brightness, %d\n", brightness);
+ return -EINVAL;
+ }
+
+ dev_dbg(&bd->dev, "brightness=%d, power=%d, state=%d\n",
+ bd->props.brightness, bd->props.power, bd->props.state);
+
+ if ((bd->props.power != FB_BLANK_UNBLANK) ||
+ (bd->props.state & BL_CORE_FBBLANK) ||
+ (bd->props.state & BL_CORE_SUSPENDED))
+ brightness = 0;
+
+ ret = aat2870->write(aat2870, AAT2870_BLM,
+ (u8)aat2870_brightness(aat2870_bl, brightness));
+ if (ret < 0)
+ return ret;
+
+ if (brightness == 0) {
+ ret = aat2870_bl_disable(aat2870_bl);
+ if (ret < 0)
+ return ret;
+ } else if (aat2870_bl->brightness == 0) {
+ ret = aat2870_bl_enable(aat2870_bl);
+ if (ret < 0)
+ return ret;
+ }
+
+ aat2870_bl->brightness = brightness;
+
+ return 0;
+}
+
+static int aat2870_bl_check_fb(struct backlight_device *bd, struct fb_info *fi)
+{
+ return 1;
+}
+
+static const struct backlight_ops aat2870_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = aat2870_bl_get_brightness,
+ .update_status = aat2870_bl_update_status,
+ .check_fb = aat2870_bl_check_fb,
+};
+
+static int aat2870_bl_probe(struct platform_device *pdev)
+{
+ struct aat2870_bl_platform_data *pdata = pdev->dev.platform_data;
+ struct aat2870_bl_driver_data *aat2870_bl;
+ struct backlight_device *bd;
+ struct backlight_properties props;
+ int ret = 0;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (pdev->id != AAT2870_ID_BL) {
+ dev_err(&pdev->dev, "Invalid device ID, %d\n", pdev->id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ aat2870_bl = kzalloc(sizeof(struct aat2870_bl_driver_data), GFP_KERNEL);
+ if (!aat2870_bl) {
+ dev_err(&pdev->dev,
+ "Failed to allocate memory for aat2870 backlight\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+
+ props.type = BACKLIGHT_RAW;
+ bd = backlight_device_register("aat2870-backlight", &pdev->dev,
+ aat2870_bl, &aat2870_bl_ops, &props);
+ if (IS_ERR(bd)) {
+ dev_err(&pdev->dev,
+ "Failed allocate memory for backlight device\n");
+ ret = PTR_ERR(bd);
+ goto out_kfree;
+ }
+
+ aat2870_bl->pdev = pdev;
+ platform_set_drvdata(pdev, aat2870_bl);
+
+ aat2870_bl->bd = bd;
+
+ if (pdata->channels > 0)
+ aat2870_bl->channels = pdata->channels;
+ else
+ aat2870_bl->channels = AAT2870_BL_CH_ALL;
+
+ if (pdata->max_current > 0)
+ aat2870_bl->max_current = pdata->max_current;
+ else
+ aat2870_bl->max_current = AAT2870_CURRENT_27_9;
+
+ if (pdata->max_brightness > 0)
+ bd->props.max_brightness = pdata->max_brightness;
+ else
+ bd->props.max_brightness = 255;
+
+ aat2870_bl->brightness = 0;
+ bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.brightness = bd->props.max_brightness;
+
+ ret = aat2870_bl_update_status(bd);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to initialize\n");
+ goto out_bl_dev_unregister;
+ }
+
+ return 0;
+
+out_bl_dev_unregister:
+ backlight_device_unregister(bd);
+out_kfree:
+ kfree(aat2870_bl);
+out:
+ return ret;
+}
+
+static int aat2870_bl_remove(struct platform_device *pdev)
+{
+ struct aat2870_bl_driver_data *aat2870_bl = platform_get_drvdata(pdev);
+ struct backlight_device *bd = aat2870_bl->bd;
+
+ bd->props.power = FB_BLANK_POWERDOWN;
+ bd->props.brightness = 0;
+ backlight_update_status(bd);
+
+ backlight_device_unregister(bd);
+ kfree(aat2870_bl);
+
+ return 0;
+}
+
+static struct platform_driver aat2870_bl_driver = {
+ .driver = {
+ .name = "aat2870-backlight",
+ .owner = THIS_MODULE,
+ },
+ .probe = aat2870_bl_probe,
+ .remove = aat2870_bl_remove,
+};
+
+static int __init aat2870_bl_init(void)
+{
+ return platform_driver_register(&aat2870_bl_driver);
+}
+subsys_initcall(aat2870_bl_init);
+
+static void __exit aat2870_bl_exit(void)
+{
+ platform_driver_unregister(&aat2870_bl_driver);
+}
+module_exit(aat2870_bl_exit);
+
+MODULE_DESCRIPTION("AnalogicTech AAT2870 Backlight");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jin Park <jinyoungp@nvidia.com>");
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 05a8832..d06886a 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -1009,4 +1009,4 @@ module_exit(adp8870_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("ADP8870 Backlight driver");
-MODULE_ALIAS("platform:adp8870-backlight");
+MODULE_ALIAS("i2c:adp8870-backlight");
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 80d292f..7363c1b 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -19,7 +19,7 @@
#include <asm/backlight.h>
#endif
-static const char const *backlight_types[] = {
+static const char *const backlight_types[] = {
[BACKLIGHT_RAW] = "raw",
[BACKLIGHT_PLATFORM] = "platform",
[BACKLIGHT_FIRMWARE] = "firmware",
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c
index 9f1e389..b058291 100644
--- a/drivers/video/backlight/ep93xx_bl.c
+++ b/drivers/video/backlight/ep93xx_bl.c
@@ -11,7 +11,7 @@
* BRIGHT, on the Cirrus EP9307, EP9312, and EP9315 processors.
*/
-
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/fb.h>
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index b8f38ec..8b5b2a4 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -28,6 +28,8 @@ struct pwm_bl_data {
unsigned int lth_brightness;
int (*notify)(struct device *,
int brightness);
+ void (*notify_after)(struct device *,
+ int brightness);
int (*check_fb)(struct device *, struct fb_info *);
};
@@ -55,6 +57,10 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
pwm_config(pb->pwm, brightness, pb->period);
pwm_enable(pb->pwm);
}
+
+ if (pb->notify_after)
+ pb->notify_after(pb->dev, brightness);
+
return 0;
}
@@ -105,6 +111,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
pb->period = data->pwm_period_ns;
pb->notify = data->notify;
+ pb->notify_after = data->notify_after;
pb->check_fb = data->check_fb;
pb->lth_brightness = data->lth_brightness *
(data->pwm_period_ns / data->max_brightness);
@@ -172,6 +179,8 @@ static int pwm_backlight_suspend(struct platform_device *pdev,
pb->notify(pb->dev, 0);
pwm_config(pb->pwm, 0, pb->period);
pwm_disable(pb->pwm);
+ if (pb->notify_after)
+ pb->notify_after(pb->dev, 0);
return 0;
}
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index fdd5d4ae..4e888ac 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -504,14 +504,18 @@ static int taal_exit_ulps(struct omap_dss_device *dssdev)
return 0;
r = omapdss_dsi_display_enable(dssdev);
- if (r)
- goto err;
+ if (r) {
+ dev_err(&dssdev->dev, "failed to enable DSI\n");
+ goto err1;
+ }
omapdss_dsi_vc_enable_hs(dssdev, td->channel, true);
r = _taal_enable_te(dssdev, true);
- if (r)
- goto err;
+ if (r) {
+ dev_err(&dssdev->dev, "failed to re-enable TE");
+ goto err2;
+ }
enable_irq(gpio_to_irq(panel_data->ext_te_gpio));
@@ -521,13 +525,15 @@ static int taal_exit_ulps(struct omap_dss_device *dssdev)
return 0;
-err:
- dev_err(&dssdev->dev, "exit ULPS failed");
- r = taal_panel_reset(dssdev);
-
- enable_irq(gpio_to_irq(panel_data->ext_te_gpio));
- td->ulps_enabled = false;
+err2:
+ dev_err(&dssdev->dev, "failed to exit ULPS");
+ r = taal_panel_reset(dssdev);
+ if (!r) {
+ enable_irq(gpio_to_irq(panel_data->ext_te_gpio));
+ td->ulps_enabled = false;
+ }
+err1:
taal_queue_ulps_work(dssdev);
return r;
@@ -1241,11 +1247,8 @@ static void taal_power_off(struct omap_dss_device *dssdev)
int r;
r = taal_dcs_write_0(td, DCS_DISPLAY_OFF);
- if (!r) {
+ if (!r)
r = taal_sleep_in(td);
- /* HACK: wait a bit so that the message goes through */
- msleep(10);
- }
if (r) {
dev_err(&dssdev->dev,
@@ -1317,8 +1320,11 @@ static void taal_disable(struct omap_dss_device *dssdev)
dsi_bus_lock(dssdev);
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
- taal_wake_up(dssdev);
- taal_power_off(dssdev);
+ int r;
+
+ r = taal_wake_up(dssdev);
+ if (!r)
+ taal_power_off(dssdev);
}
dsi_bus_unlock(dssdev);
@@ -1897,20 +1903,6 @@ err:
mutex_unlock(&td->lock);
}
-static int taal_set_update_mode(struct omap_dss_device *dssdev,
- enum omap_dss_update_mode mode)
-{
- if (mode != OMAP_DSS_UPDATE_MANUAL)
- return -EINVAL;
- return 0;
-}
-
-static enum omap_dss_update_mode taal_get_update_mode(
- struct omap_dss_device *dssdev)
-{
- return OMAP_DSS_UPDATE_MANUAL;
-}
-
static struct omap_dss_driver taal_driver = {
.probe = taal_probe,
.remove = __exit_p(taal_remove),
@@ -1920,9 +1912,6 @@ static struct omap_dss_driver taal_driver = {
.suspend = taal_suspend,
.resume = taal_resume,
- .set_update_mode = taal_set_update_mode,
- .get_update_mode = taal_get_update_mode,
-
.update = taal_update,
.sync = taal_sync,
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 6b3e2da..0d12524 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -117,18 +117,6 @@ config OMAP2_DSS_MIN_FCK_PER_PCK
Max FCK is 173MHz, so this doesn't work if your PCK
is very high.
-config OMAP2_DSS_SLEEP_BEFORE_RESET
- bool "Sleep 50ms before DSS reset"
- default y
- help
- For some unknown reason we may get SYNC_LOST errors from the display
- subsystem at initialization time if we don't sleep before resetting
- the DSS. See the source (dss.c) for more comments.
-
- However, 50ms is quite long time to sleep, and with some
- configurations the SYNC_LOST may never happen, so the sleep can
- be disabled here.
-
config OMAP2_DSS_SLEEP_AFTER_VENC_RESET
bool "Sleep 20ms after VENC reset"
default y
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 3da4267..76821fe 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -183,8 +183,11 @@ static int omap_dss_probe(struct platform_device *pdev)
goto err_dss;
}
- /* keep clocks enabled to prevent context saves/restores during init */
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ r = dispc_init_platform_driver();
+ if (r) {
+ DSSERR("Failed to initialize dispc platform driver\n");
+ goto err_dispc;
+ }
r = rfbi_init_platform_driver();
if (r) {
@@ -192,12 +195,6 @@ static int omap_dss_probe(struct platform_device *pdev)
goto err_rfbi;
}
- r = dispc_init_platform_driver();
- if (r) {
- DSSERR("Failed to initialize dispc platform driver\n");
- goto err_dispc;
- }
-
r = venc_init_platform_driver();
if (r) {
DSSERR("Failed to initialize venc platform driver\n");
@@ -238,8 +235,6 @@ static int omap_dss_probe(struct platform_device *pdev)
pdata->default_device = dssdev;
}
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
-
return 0;
err_register:
@@ -268,11 +263,11 @@ static int omap_dss_remove(struct platform_device *pdev)
dss_uninitialize_debugfs();
+ hdmi_uninit_platform_driver();
+ dsi_uninit_platform_driver();
venc_uninit_platform_driver();
- dispc_uninit_platform_driver();
rfbi_uninit_platform_driver();
- dsi_uninit_platform_driver();
- hdmi_uninit_platform_driver();
+ dispc_uninit_platform_driver();
dss_uninit_platform_driver();
dss_uninit_overlays(pdev);
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 7a9a2e7..0f3961a 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -33,6 +33,8 @@
#include <linux/workqueue.h>
#include <linux/hardirq.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <plat/sram.h>
#include <plat/clock.h>
@@ -77,6 +79,12 @@ struct dispc_v_coef {
s8 vc00;
};
+enum omap_burst_size {
+ BURST_SIZE_X2 = 0,
+ BURST_SIZE_X4 = 1,
+ BURST_SIZE_X8 = 2,
+};
+
#define REG_GET(idx, start, end) \
FLD_GET(dispc_read_reg(idx), start, end)
@@ -92,7 +100,11 @@ struct dispc_irq_stats {
static struct {
struct platform_device *pdev;
void __iomem *base;
+
+ int ctx_loss_cnt;
+
int irq;
+ struct clk *dss_clk;
u32 fifo_size[3];
@@ -102,6 +114,7 @@ static struct {
u32 error_irqs;
struct work_struct error_work;
+ bool ctx_valid;
u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
@@ -134,18 +147,34 @@ static inline u32 dispc_read_reg(const u16 idx)
return __raw_readl(dispc.base + idx);
}
+static int dispc_get_ctx_loss_count(void)
+{
+ struct device *dev = &dispc.pdev->dev;
+ struct omap_display_platform_data *pdata = dev->platform_data;
+ struct omap_dss_board_info *board_data = pdata->board_data;
+ int cnt;
+
+ if (!board_data->get_context_loss_count)
+ return -ENOENT;
+
+ cnt = board_data->get_context_loss_count(dev);
+
+ WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
+
+ return cnt;
+}
+
#define SR(reg) \
dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
#define RR(reg) \
dispc_write_reg(DISPC_##reg, dispc.ctx[DISPC_##reg / sizeof(u32)])
-void dispc_save_context(void)
+static void dispc_save_context(void)
{
int i;
- if (cpu_is_omap24xx())
- return;
- SR(SYSCONFIG);
+ DSSDBG("dispc_save_context\n");
+
SR(IRQENABLE);
SR(CONTROL);
SR(CONFIG);
@@ -158,7 +187,8 @@ void dispc_save_context(void)
SR(TIMING_V(OMAP_DSS_CHANNEL_LCD));
SR(POL_FREQ(OMAP_DSS_CHANNEL_LCD));
SR(DIVISORo(OMAP_DSS_CHANNEL_LCD));
- SR(GLOBAL_ALPHA);
+ if (dss_has_feature(FEAT_GLOBAL_ALPHA))
+ SR(GLOBAL_ALPHA);
SR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
SR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
if (dss_has_feature(FEAT_MGR_LCD2)) {
@@ -188,20 +218,25 @@ void dispc_save_context(void)
SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
- SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
- SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
- SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ if (dss_has_feature(FEAT_CPR)) {
+ SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
+ SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
+ SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ }
if (dss_has_feature(FEAT_MGR_LCD2)) {
- SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
- SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
- SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ if (dss_has_feature(FEAT_CPR)) {
+ SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
+ SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
+ SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ }
SR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
}
- SR(OVL_PRELOAD(OMAP_DSS_GFX));
+ if (dss_has_feature(FEAT_PRELOAD))
+ SR(OVL_PRELOAD(OMAP_DSS_GFX));
/* VID1 */
SR(OVL_BA0(OMAP_DSS_VIDEO1));
@@ -226,8 +261,10 @@ void dispc_save_context(void)
for (i = 0; i < 5; i++)
SR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i));
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ for (i = 0; i < 8; i++)
+ SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
+ }
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
SR(OVL_BA0_UV(OMAP_DSS_VIDEO1));
@@ -248,7 +285,8 @@ void dispc_save_context(void)
if (dss_has_feature(FEAT_ATTR2))
SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1));
- SR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
+ if (dss_has_feature(FEAT_PRELOAD))
+ SR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
/* VID2 */
SR(OVL_BA0(OMAP_DSS_VIDEO2));
@@ -273,8 +311,10 @@ void dispc_save_context(void)
for (i = 0; i < 5; i++)
SR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i));
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ for (i = 0; i < 8; i++)
+ SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
+ }
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
SR(OVL_BA0_UV(OMAP_DSS_VIDEO2));
@@ -295,16 +335,35 @@ void dispc_save_context(void)
if (dss_has_feature(FEAT_ATTR2))
SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
- SR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
+ if (dss_has_feature(FEAT_PRELOAD))
+ SR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
if (dss_has_feature(FEAT_CORE_CLK_DIV))
SR(DIVISOR);
+
+ dispc.ctx_loss_cnt = dispc_get_ctx_loss_count();
+ dispc.ctx_valid = true;
+
+ DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt);
}
-void dispc_restore_context(void)
+static void dispc_restore_context(void)
{
- int i;
- RR(SYSCONFIG);
+ int i, ctx;
+
+ DSSDBG("dispc_restore_context\n");
+
+ if (!dispc.ctx_valid)
+ return;
+
+ ctx = dispc_get_ctx_loss_count();
+
+ if (ctx >= 0 && ctx == dispc.ctx_loss_cnt)
+ return;
+
+ DSSDBG("ctx_loss_count: saved %d, current %d\n",
+ dispc.ctx_loss_cnt, ctx);
+
/*RR(IRQENABLE);*/
/*RR(CONTROL);*/
RR(CONFIG);
@@ -317,7 +376,8 @@ void dispc_restore_context(void)
RR(TIMING_V(OMAP_DSS_CHANNEL_LCD));
RR(POL_FREQ(OMAP_DSS_CHANNEL_LCD));
RR(DIVISORo(OMAP_DSS_CHANNEL_LCD));
- RR(GLOBAL_ALPHA);
+ if (dss_has_feature(FEAT_GLOBAL_ALPHA))
+ RR(GLOBAL_ALPHA);
RR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
RR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
if (dss_has_feature(FEAT_MGR_LCD2)) {
@@ -347,20 +407,25 @@ void dispc_restore_context(void)
RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
- RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
- RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
- RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ if (dss_has_feature(FEAT_CPR)) {
+ RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
+ RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
+ RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ }
if (dss_has_feature(FEAT_MGR_LCD2)) {
RR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
- RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
- RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
- RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ if (dss_has_feature(FEAT_CPR)) {
+ RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
+ RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
+ RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ }
}
- RR(OVL_PRELOAD(OMAP_DSS_GFX));
+ if (dss_has_feature(FEAT_PRELOAD))
+ RR(OVL_PRELOAD(OMAP_DSS_GFX));
/* VID1 */
RR(OVL_BA0(OMAP_DSS_VIDEO1));
@@ -385,8 +450,10 @@ void dispc_restore_context(void)
for (i = 0; i < 5; i++)
RR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i));
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ for (i = 0; i < 8; i++)
+ RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
+ }
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
RR(OVL_BA0_UV(OMAP_DSS_VIDEO1));
@@ -407,7 +474,8 @@ void dispc_restore_context(void)
if (dss_has_feature(FEAT_ATTR2))
RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1));
- RR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
+ if (dss_has_feature(FEAT_PRELOAD))
+ RR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
/* VID2 */
RR(OVL_BA0(OMAP_DSS_VIDEO2));
@@ -432,8 +500,10 @@ void dispc_restore_context(void)
for (i = 0; i < 5; i++)
RR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i));
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ for (i = 0; i < 8; i++)
+ RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
+ }
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
RR(OVL_BA0_UV(OMAP_DSS_VIDEO2));
@@ -454,7 +524,8 @@ void dispc_restore_context(void)
if (dss_has_feature(FEAT_ATTR2))
RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
- RR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
+ if (dss_has_feature(FEAT_PRELOAD))
+ RR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
if (dss_has_feature(FEAT_CORE_CLK_DIV))
RR(DIVISOR);
@@ -471,19 +542,35 @@ void dispc_restore_context(void)
* the context is fully restored
*/
RR(IRQENABLE);
+
+ DSSDBG("context restored\n");
}
#undef SR
#undef RR
-static inline void enable_clocks(bool enable)
+int dispc_runtime_get(void)
{
- if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
- else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ int r;
+
+ DSSDBG("dispc_runtime_get\n");
+
+ r = pm_runtime_get_sync(&dispc.pdev->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
}
+void dispc_runtime_put(void)
+{
+ int r;
+
+ DSSDBG("dispc_runtime_put\n");
+
+ r = pm_runtime_put(&dispc.pdev->dev);
+ WARN_ON(r < 0);
+}
+
+
bool dispc_go_busy(enum omap_channel channel)
{
int bit;
@@ -505,8 +592,6 @@ void dispc_go(enum omap_channel channel)
int bit;
bool enable_bit, go_bit;
- enable_clocks(1);
-
if (channel == OMAP_DSS_CHANNEL_LCD ||
channel == OMAP_DSS_CHANNEL_LCD2)
bit = 0; /* LCDENABLE */
@@ -520,7 +605,7 @@ void dispc_go(enum omap_channel channel)
enable_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1;
if (!enable_bit)
- goto end;
+ return;
if (channel == OMAP_DSS_CHANNEL_LCD ||
channel == OMAP_DSS_CHANNEL_LCD2)
@@ -535,7 +620,7 @@ void dispc_go(enum omap_channel channel)
if (go_bit) {
DSSERR("GO bit not down for channel %d\n", channel);
- goto end;
+ return;
}
DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" :
@@ -545,8 +630,6 @@ void dispc_go(enum omap_channel channel)
REG_FLD_MOD(DISPC_CONTROL2, 1, bit, bit);
else
REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit);
-end:
- enable_clocks(0);
}
static void _dispc_write_firh_reg(enum omap_plane plane, int reg, u32 value)
@@ -920,7 +1003,7 @@ static void _dispc_set_color_mode(enum omap_plane plane,
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
}
-static void _dispc_set_channel_out(enum omap_plane plane,
+void dispc_set_channel_out(enum omap_plane plane,
enum omap_channel channel)
{
int shift;
@@ -967,13 +1050,10 @@ static void _dispc_set_channel_out(enum omap_plane plane,
dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
}
-void dispc_set_burst_size(enum omap_plane plane,
+static void dispc_set_burst_size(enum omap_plane plane,
enum omap_burst_size burst_size)
{
int shift;
- u32 val;
-
- enable_clocks(1);
switch (plane) {
case OMAP_DSS_GFX:
@@ -988,11 +1068,24 @@ void dispc_set_burst_size(enum omap_plane plane,
return;
}
- val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
- val = FLD_MOD(val, burst_size, shift+1, shift);
- dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
+ REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), burst_size, shift + 1, shift);
+}
- enable_clocks(0);
+static void dispc_configure_burst_sizes(void)
+{
+ int i;
+ const int burst_size = BURST_SIZE_X8;
+
+ /* Configure burst size always to maximum size */
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i)
+ dispc_set_burst_size(i, burst_size);
+}
+
+u32 dispc_get_burst_size(enum omap_plane plane)
+{
+ unsigned unit = dss_feat_get_burst_size_unit();
+ /* burst multiplier is always x8 (see dispc_configure_burst_sizes()) */
+ return unit * 8;
}
void dispc_enable_gamma_table(bool enable)
@@ -1009,6 +1102,40 @@ void dispc_enable_gamma_table(bool enable)
REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9);
}
+void dispc_enable_cpr(enum omap_channel channel, bool enable)
+{
+ u16 reg;
+
+ if (channel == OMAP_DSS_CHANNEL_LCD)
+ reg = DISPC_CONFIG;
+ else if (channel == OMAP_DSS_CHANNEL_LCD2)
+ reg = DISPC_CONFIG2;
+ else
+ return;
+
+ REG_FLD_MOD(reg, enable, 15, 15);
+}
+
+void dispc_set_cpr_coef(enum omap_channel channel,
+ struct omap_dss_cpr_coefs *coefs)
+{
+ u32 coef_r, coef_g, coef_b;
+
+ if (channel != OMAP_DSS_CHANNEL_LCD && channel != OMAP_DSS_CHANNEL_LCD2)
+ return;
+
+ coef_r = FLD_VAL(coefs->rr, 31, 22) | FLD_VAL(coefs->rg, 20, 11) |
+ FLD_VAL(coefs->rb, 9, 0);
+ coef_g = FLD_VAL(coefs->gr, 31, 22) | FLD_VAL(coefs->gg, 20, 11) |
+ FLD_VAL(coefs->gb, 9, 0);
+ coef_b = FLD_VAL(coefs->br, 31, 22) | FLD_VAL(coefs->bg, 20, 11) |
+ FLD_VAL(coefs->bb, 9, 0);
+
+ dispc_write_reg(DISPC_CPR_COEF_R(channel), coef_r);
+ dispc_write_reg(DISPC_CPR_COEF_G(channel), coef_g);
+ dispc_write_reg(DISPC_CPR_COEF_B(channel), coef_b);
+}
+
static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable)
{
u32 val;
@@ -1029,9 +1156,7 @@ void dispc_enable_replication(enum omap_plane plane, bool enable)
else
bit = 10;
- enable_clocks(1);
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, bit, bit);
- enable_clocks(0);
}
void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
@@ -1039,9 +1164,7 @@ void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
u32 val;
BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
- enable_clocks(1);
dispc_write_reg(DISPC_SIZE_MGR(channel), val);
- enable_clocks(0);
}
void dispc_set_digit_size(u16 width, u16 height)
@@ -1049,9 +1172,7 @@ void dispc_set_digit_size(u16 width, u16 height)
u32 val;
BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
- enable_clocks(1);
dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val);
- enable_clocks(0);
}
static void dispc_read_plane_fifo_sizes(void)
@@ -1059,18 +1180,17 @@ static void dispc_read_plane_fifo_sizes(void)
u32 size;
int plane;
u8 start, end;
+ u32 unit;
- enable_clocks(1);
+ unit = dss_feat_get_buffer_size_unit();
dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end);
for (plane = 0; plane < ARRAY_SIZE(dispc.fifo_size); ++plane) {
- size = FLD_GET(dispc_read_reg(DISPC_OVL_FIFO_SIZE_STATUS(plane)),
- start, end);
+ size = REG_GET(DISPC_OVL_FIFO_SIZE_STATUS(plane), start, end);
+ size *= unit;
dispc.fifo_size[plane] = size;
}
-
- enable_clocks(0);
}
u32 dispc_get_plane_fifo_size(enum omap_plane plane)
@@ -1078,15 +1198,22 @@ u32 dispc_get_plane_fifo_size(enum omap_plane plane)
return dispc.fifo_size[plane];
}
-void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high)
+void dispc_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high)
{
u8 hi_start, hi_end, lo_start, lo_end;
+ u32 unit;
+
+ unit = dss_feat_get_buffer_size_unit();
+
+ WARN_ON(low % unit != 0);
+ WARN_ON(high % unit != 0);
+
+ low /= unit;
+ high /= unit;
dss_feat_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end);
dss_feat_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end);
- enable_clocks(1);
-
DSSDBG("fifo(%d) low/high old %u/%u, new %u/%u\n",
plane,
REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane),
@@ -1098,18 +1225,12 @@ void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high)
dispc_write_reg(DISPC_OVL_FIFO_THRESHOLD(plane),
FLD_VAL(high, hi_start, hi_end) |
FLD_VAL(low, lo_start, lo_end));
-
- enable_clocks(0);
}
void dispc_enable_fifomerge(bool enable)
{
- enable_clocks(1);
-
DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled");
REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 14, 14);
-
- enable_clocks(0);
}
static void _dispc_set_fir(enum omap_plane plane,
@@ -1729,14 +1850,7 @@ static unsigned long calc_fclk(enum omap_channel channel, u16 width,
return dispc_pclk_rate(channel) * vf * hf;
}
-void dispc_set_channel_out(enum omap_plane plane, enum omap_channel channel_out)
-{
- enable_clocks(1);
- _dispc_set_channel_out(plane, channel_out);
- enable_clocks(0);
-}
-
-static int _dispc_setup_plane(enum omap_plane plane,
+int dispc_setup_plane(enum omap_plane plane,
u32 paddr, u16 screen_width,
u16 pos_x, u16 pos_y,
u16 width, u16 height,
@@ -1744,7 +1858,7 @@ static int _dispc_setup_plane(enum omap_plane plane,
enum omap_color_mode color_mode,
bool ilace,
enum omap_dss_rotation_type rotation_type,
- u8 rotation, int mirror,
+ u8 rotation, bool mirror,
u8 global_alpha, u8 pre_mult_alpha,
enum omap_channel channel, u32 puv_addr)
{
@@ -1758,6 +1872,14 @@ static int _dispc_setup_plane(enum omap_plane plane,
u16 frame_height = height;
unsigned int field_offset = 0;
+ DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d,%d, %dx%d -> "
+ "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n",
+ plane, paddr, screen_width, pos_x, pos_y,
+ width, height,
+ out_width, out_height,
+ ilace, color_mode,
+ rotation, mirror, channel);
+
if (paddr == 0)
return -EINVAL;
@@ -1903,9 +2025,13 @@ static int _dispc_setup_plane(enum omap_plane plane,
return 0;
}
-static void _dispc_enable_plane(enum omap_plane plane, bool enable)
+int dispc_enable_plane(enum omap_plane plane, bool enable)
{
+ DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
+
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0);
+
+ return 0;
}
static void dispc_disable_isr(void *data, u32 mask)
@@ -1929,8 +2055,6 @@ static void dispc_enable_lcd_out(enum omap_channel channel, bool enable)
int r;
u32 irq;
- enable_clocks(1);
-
/* When we disable LCD output, we need to wait until frame is done.
* Otherwise the DSS is still working, and turning off the clocks
* prevents DSS from going to OFF mode */
@@ -1964,8 +2088,6 @@ static void dispc_enable_lcd_out(enum omap_channel channel, bool enable)
if (r)
DSSERR("failed to unregister FRAMEDONE isr\n");
}
-
- enable_clocks(0);
}
static void _enable_digit_out(bool enable)
@@ -1978,12 +2100,8 @@ static void dispc_enable_digit_out(bool enable)
struct completion frame_done_completion;
int r;
- enable_clocks(1);
-
- if (REG_GET(DISPC_CONTROL, 1, 1) == enable) {
- enable_clocks(0);
+ if (REG_GET(DISPC_CONTROL, 1, 1) == enable)
return;
- }
if (enable) {
unsigned long flags;
@@ -2035,8 +2153,6 @@ static void dispc_enable_digit_out(bool enable)
_omap_dispc_set_irqs();
spin_unlock_irqrestore(&dispc.irq_lock, flags);
}
-
- enable_clocks(0);
}
bool dispc_is_channel_enabled(enum omap_channel channel)
@@ -2067,9 +2183,7 @@ void dispc_lcd_enable_signal_polarity(bool act_high)
if (!dss_has_feature(FEAT_LCDENABLEPOL))
return;
- enable_clocks(1);
REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29);
- enable_clocks(0);
}
void dispc_lcd_enable_signal(bool enable)
@@ -2077,9 +2191,7 @@ void dispc_lcd_enable_signal(bool enable)
if (!dss_has_feature(FEAT_LCDENABLESIGNAL))
return;
- enable_clocks(1);
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28);
- enable_clocks(0);
}
void dispc_pck_free_enable(bool enable)
@@ -2087,19 +2199,15 @@ void dispc_pck_free_enable(bool enable)
if (!dss_has_feature(FEAT_PCKFREEENABLE))
return;
- enable_clocks(1);
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27);
- enable_clocks(0);
}
void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable)
{
- enable_clocks(1);
if (channel == OMAP_DSS_CHANNEL_LCD2)
REG_FLD_MOD(DISPC_CONFIG2, enable ? 1 : 0, 16, 16);
else
REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16);
- enable_clocks(0);
}
@@ -2122,27 +2230,21 @@ void dispc_set_lcd_display_type(enum omap_channel channel,
return;
}
- enable_clocks(1);
if (channel == OMAP_DSS_CHANNEL_LCD2)
REG_FLD_MOD(DISPC_CONTROL2, mode, 3, 3);
else
REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3);
- enable_clocks(0);
}
void dispc_set_loadmode(enum omap_dss_load_mode mode)
{
- enable_clocks(1);
REG_FLD_MOD(DISPC_CONFIG, mode, 2, 1);
- enable_clocks(0);
}
void dispc_set_default_color(enum omap_channel channel, u32 color)
{
- enable_clocks(1);
dispc_write_reg(DISPC_DEFAULT_COLOR(channel), color);
- enable_clocks(0);
}
u32 dispc_get_default_color(enum omap_channel channel)
@@ -2153,9 +2255,7 @@ u32 dispc_get_default_color(enum omap_channel channel)
channel != OMAP_DSS_CHANNEL_LCD &&
channel != OMAP_DSS_CHANNEL_LCD2);
- enable_clocks(1);
l = dispc_read_reg(DISPC_DEFAULT_COLOR(channel));
- enable_clocks(0);
return l;
}
@@ -2164,7 +2264,6 @@ void dispc_set_trans_key(enum omap_channel ch,
enum omap_dss_trans_key_type type,
u32 trans_key)
{
- enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, type, 11, 11);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
@@ -2173,14 +2272,12 @@ void dispc_set_trans_key(enum omap_channel ch,
REG_FLD_MOD(DISPC_CONFIG2, type, 11, 11);
dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key);
- enable_clocks(0);
}
void dispc_get_trans_key(enum omap_channel ch,
enum omap_dss_trans_key_type *type,
u32 *trans_key)
{
- enable_clocks(1);
if (type) {
if (ch == OMAP_DSS_CHANNEL_LCD)
*type = REG_GET(DISPC_CONFIG, 11, 11);
@@ -2194,33 +2291,28 @@ void dispc_get_trans_key(enum omap_channel ch,
if (trans_key)
*trans_key = dispc_read_reg(DISPC_TRANS_COLOR(ch));
- enable_clocks(0);
}
void dispc_enable_trans_key(enum omap_channel ch, bool enable)
{
- enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12);
else /* OMAP_DSS_CHANNEL_LCD2 */
REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10);
- enable_clocks(0);
}
void dispc_enable_alpha_blending(enum omap_channel ch, bool enable)
{
if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
return;
- enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19);
else /* OMAP_DSS_CHANNEL_LCD2 */
REG_FLD_MOD(DISPC_CONFIG2, enable, 18, 18);
- enable_clocks(0);
}
bool dispc_alpha_blending_enabled(enum omap_channel ch)
{
@@ -2229,7 +2321,6 @@ bool dispc_alpha_blending_enabled(enum omap_channel ch)
if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
return false;
- enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
enabled = REG_GET(DISPC_CONFIG, 18, 18);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
@@ -2238,7 +2329,6 @@ bool dispc_alpha_blending_enabled(enum omap_channel ch)
enabled = REG_GET(DISPC_CONFIG2, 18, 18);
else
BUG();
- enable_clocks(0);
return enabled;
}
@@ -2248,7 +2338,6 @@ bool dispc_trans_key_enabled(enum omap_channel ch)
{
bool enabled;
- enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
enabled = REG_GET(DISPC_CONFIG, 10, 10);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
@@ -2257,7 +2346,6 @@ bool dispc_trans_key_enabled(enum omap_channel ch)
enabled = REG_GET(DISPC_CONFIG2, 10, 10);
else
BUG();
- enable_clocks(0);
return enabled;
}
@@ -2285,12 +2373,10 @@ void dispc_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
return;
}
- enable_clocks(1);
if (channel == OMAP_DSS_CHANNEL_LCD2)
REG_FLD_MOD(DISPC_CONTROL2, code, 9, 8);
else
REG_FLD_MOD(DISPC_CONTROL, code, 9, 8);
- enable_clocks(0);
}
void dispc_set_parallel_interface_mode(enum omap_channel channel,
@@ -2322,8 +2408,6 @@ void dispc_set_parallel_interface_mode(enum omap_channel channel,
return;
}
- enable_clocks(1);
-
if (channel == OMAP_DSS_CHANNEL_LCD2) {
l = dispc_read_reg(DISPC_CONTROL2);
l = FLD_MOD(l, stallmode, 11, 11);
@@ -2335,8 +2419,6 @@ void dispc_set_parallel_interface_mode(enum omap_channel channel,
l = FLD_MOD(l, gpout1, 16, 16);
dispc_write_reg(DISPC_CONTROL, l);
}
-
- enable_clocks(0);
}
static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
@@ -2389,10 +2471,8 @@ static void _dispc_set_lcd_timings(enum omap_channel channel, int hsw,
FLD_VAL(vbp, 31, 20);
}
- enable_clocks(1);
dispc_write_reg(DISPC_TIMING_H(channel), timing_h);
dispc_write_reg(DISPC_TIMING_V(channel), timing_v);
- enable_clocks(0);
}
/* change name to mode? */
@@ -2435,10 +2515,8 @@ static void dispc_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
BUG_ON(lck_div < 1);
BUG_ON(pck_div < 2);
- enable_clocks(1);
dispc_write_reg(DISPC_DIVISORo(channel),
FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
- enable_clocks(0);
}
static void dispc_get_lcd_divisor(enum omap_channel channel, int *lck_div,
@@ -2457,7 +2535,7 @@ unsigned long dispc_fclk_rate(void)
switch (dss_get_dispc_clk_source()) {
case OMAP_DSS_CLK_SRC_FCK:
- r = dss_clk_get_rate(DSS_CLK_FCK);
+ r = clk_get_rate(dispc.dss_clk);
break;
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
dsidev = dsi_get_dsidev_from_id(0);
@@ -2487,7 +2565,7 @@ unsigned long dispc_lclk_rate(enum omap_channel channel)
switch (dss_get_lcd_clk_source(channel)) {
case OMAP_DSS_CLK_SRC_FCK:
- r = dss_clk_get_rate(DSS_CLK_FCK);
+ r = clk_get_rate(dispc.dss_clk);
break;
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
dsidev = dsi_get_dsidev_from_id(0);
@@ -2526,7 +2604,8 @@ void dispc_dump_clocks(struct seq_file *s)
enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
enum omap_dss_clk_source lcd_clk_src;
- enable_clocks(1);
+ if (dispc_runtime_get())
+ return;
seq_printf(s, "- DISPC -\n");
@@ -2574,7 +2653,8 @@ void dispc_dump_clocks(struct seq_file *s)
seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd);
}
- enable_clocks(0);
+
+ dispc_runtime_put();
}
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
@@ -2629,7 +2709,8 @@ void dispc_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-50s %08x\n", #r, dispc_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ if (dispc_runtime_get())
+ return;
DUMPREG(DISPC_REVISION);
DUMPREG(DISPC_SYSCONFIG);
@@ -2649,7 +2730,8 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_TIMING_V(OMAP_DSS_CHANNEL_LCD));
DUMPREG(DISPC_POL_FREQ(OMAP_DSS_CHANNEL_LCD));
DUMPREG(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_GLOBAL_ALPHA);
+ if (dss_has_feature(FEAT_GLOBAL_ALPHA))
+ DUMPREG(DISPC_GLOBAL_ALPHA);
DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
if (dss_has_feature(FEAT_MGR_LCD2)) {
@@ -2680,20 +2762,25 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ if (dss_has_feature(FEAT_CPR)) {
+ DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
+ DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
+ DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ }
if (dss_has_feature(FEAT_MGR_LCD2)) {
DUMPREG(DISPC_DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
+ if (dss_has_feature(FEAT_CPR)) {
+ DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
+ DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
+ }
}
- DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_GFX));
+ if (dss_has_feature(FEAT_PRELOAD))
+ DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_GFX));
DUMPREG(DISPC_OVL_BA0(OMAP_DSS_VIDEO1));
DUMPREG(DISPC_OVL_BA1(OMAP_DSS_VIDEO1));
@@ -2744,14 +2831,16 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 2));
DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 3));
DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 7));
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 0));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 1));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 2));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 3));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 4));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 5));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 6));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 7));
+ }
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO1));
@@ -2812,14 +2901,17 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 2));
DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 3));
DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 7));
+
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 0));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 1));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 2));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 3));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 4));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 5));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 6));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 7));
+ }
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO2));
@@ -2858,10 +2950,12 @@ void dispc_dump_regs(struct seq_file *s)
if (dss_has_feature(FEAT_ATTR2))
DUMPREG(DISPC_OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO2));
+ if (dss_has_feature(FEAT_PRELOAD)) {
+ DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO1));
+ DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO2));
+ }
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dispc_runtime_put();
#undef DUMPREG
}
@@ -2882,9 +2976,7 @@ static void _dispc_set_pol_freq(enum omap_channel channel, bool onoff, bool rf,
l |= FLD_VAL(acbi, 11, 8);
l |= FLD_VAL(acb, 7, 0);
- enable_clocks(1);
dispc_write_reg(DISPC_POL_FREQ(channel), l);
- enable_clocks(0);
}
void dispc_set_pol_freq(enum omap_channel channel,
@@ -3005,15 +3097,11 @@ static void _omap_dispc_set_irqs(void)
mask |= isr_data->mask;
}
- enable_clocks(1);
-
old_mask = dispc_read_reg(DISPC_IRQENABLE);
/* clear the irqstatus for newly enabled irqs */
dispc_write_reg(DISPC_IRQSTATUS, (mask ^ old_mask) & mask);
dispc_write_reg(DISPC_IRQENABLE, mask);
-
- enable_clocks(0);
}
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
@@ -3522,13 +3610,6 @@ static void _omap_dispc_initial_config(void)
{
u32 l;
- l = dispc_read_reg(DISPC_SYSCONFIG);
- l = FLD_MOD(l, 2, 13, 12); /* MIDLEMODE: smart standby */
- l = FLD_MOD(l, 2, 4, 3); /* SIDLEMODE: smart idle */
- l = FLD_MOD(l, 1, 2, 2); /* ENWAKEUP */
- l = FLD_MOD(l, 1, 0, 0); /* AUTOIDLE */
- dispc_write_reg(DISPC_SYSCONFIG, l);
-
/* Exclusively enable DISPC_CORE_CLK and set divider to 1 */
if (dss_has_feature(FEAT_CORE_CLK_DIV)) {
l = dispc_read_reg(DISPC_DIVISOR);
@@ -3552,58 +3633,8 @@ static void _omap_dispc_initial_config(void)
dispc_set_loadmode(OMAP_DSS_LOAD_FRAME_ONLY);
dispc_read_plane_fifo_sizes();
-}
-int dispc_enable_plane(enum omap_plane plane, bool enable)
-{
- DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
-
- enable_clocks(1);
- _dispc_enable_plane(plane, enable);
- enable_clocks(0);
-
- return 0;
-}
-
-int dispc_setup_plane(enum omap_plane plane,
- u32 paddr, u16 screen_width,
- u16 pos_x, u16 pos_y,
- u16 width, u16 height,
- u16 out_width, u16 out_height,
- enum omap_color_mode color_mode,
- bool ilace,
- enum omap_dss_rotation_type rotation_type,
- u8 rotation, bool mirror, u8 global_alpha,
- u8 pre_mult_alpha, enum omap_channel channel,
- u32 puv_addr)
-{
- int r = 0;
-
- DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d, %d, %dx%d -> "
- "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n",
- plane, paddr, screen_width, pos_x, pos_y,
- width, height,
- out_width, out_height,
- ilace, color_mode,
- rotation, mirror, channel);
-
- enable_clocks(1);
-
- r = _dispc_setup_plane(plane,
- paddr, screen_width,
- pos_x, pos_y,
- width, height,
- out_width, out_height,
- color_mode, ilace,
- rotation_type,
- rotation, mirror,
- global_alpha,
- pre_mult_alpha,
- channel, puv_addr);
-
- enable_clocks(0);
-
- return r;
+ dispc_configure_burst_sizes();
}
/* DISPC HW IP initialisation */
@@ -3612,9 +3643,19 @@ static int omap_dispchw_probe(struct platform_device *pdev)
u32 rev;
int r = 0;
struct resource *dispc_mem;
+ struct clk *clk;
dispc.pdev = pdev;
+ clk = clk_get(&pdev->dev, "fck");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get fck\n");
+ r = PTR_ERR(clk);
+ goto err_get_clk;
+ }
+
+ dispc.dss_clk = clk;
+
spin_lock_init(&dispc.irq_lock);
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
@@ -3628,62 +3669,103 @@ static int omap_dispchw_probe(struct platform_device *pdev)
if (!dispc_mem) {
DSSERR("can't get IORESOURCE_MEM DISPC\n");
r = -EINVAL;
- goto fail0;
+ goto err_ioremap;
}
dispc.base = ioremap(dispc_mem->start, resource_size(dispc_mem));
if (!dispc.base) {
DSSERR("can't ioremap DISPC\n");
r = -ENOMEM;
- goto fail0;
+ goto err_ioremap;
}
dispc.irq = platform_get_irq(dispc.pdev, 0);
if (dispc.irq < 0) {
DSSERR("platform_get_irq failed\n");
r = -ENODEV;
- goto fail1;
+ goto err_irq;
}
r = request_irq(dispc.irq, omap_dispc_irq_handler, IRQF_SHARED,
"OMAP DISPC", dispc.pdev);
if (r < 0) {
DSSERR("request_irq failed\n");
- goto fail1;
+ goto err_irq;
}
- enable_clocks(1);
+ pm_runtime_enable(&pdev->dev);
+
+ r = dispc_runtime_get();
+ if (r)
+ goto err_runtime_get;
_omap_dispc_initial_config();
_omap_dispc_initialize_irq();
- dispc_save_context();
-
rev = dispc_read_reg(DISPC_REVISION);
dev_dbg(&pdev->dev, "OMAP DISPC rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
- enable_clocks(0);
+ dispc_runtime_put();
return 0;
-fail1:
+
+err_runtime_get:
+ pm_runtime_disable(&pdev->dev);
+ free_irq(dispc.irq, dispc.pdev);
+err_irq:
iounmap(dispc.base);
-fail0:
+err_ioremap:
+ clk_put(dispc.dss_clk);
+err_get_clk:
return r;
}
static int omap_dispchw_remove(struct platform_device *pdev)
{
+ pm_runtime_disable(&pdev->dev);
+
+ clk_put(dispc.dss_clk);
+
free_irq(dispc.irq, dispc.pdev);
iounmap(dispc.base);
return 0;
}
+static int dispc_runtime_suspend(struct device *dev)
+{
+ dispc_save_context();
+ clk_disable(dispc.dss_clk);
+ dss_runtime_put();
+
+ return 0;
+}
+
+static int dispc_runtime_resume(struct device *dev)
+{
+ int r;
+
+ r = dss_runtime_get();
+ if (r < 0)
+ return r;
+
+ clk_enable(dispc.dss_clk);
+ dispc_restore_context();
+
+ return 0;
+}
+
+static const struct dev_pm_ops dispc_pm_ops = {
+ .runtime_suspend = dispc_runtime_suspend,
+ .runtime_resume = dispc_runtime_resume,
+};
+
static struct platform_driver omap_dispchw_driver = {
.probe = omap_dispchw_probe,
.remove = omap_dispchw_remove,
.driver = {
.name = "omapdss_dispc",
.owner = THIS_MODULE,
+ .pm = &dispc_pm_ops,
},
};
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index c2dfc8c..94495e4 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -29,6 +29,7 @@
#include <video/omapdss.h>
#include "dss.h"
+#include "dss_features.h"
static ssize_t display_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -65,48 +66,6 @@ static ssize_t display_enabled_store(struct device *dev,
return size;
}
-static ssize_t display_upd_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- enum omap_dss_update_mode mode = OMAP_DSS_UPDATE_AUTO;
- if (dssdev->driver->get_update_mode)
- mode = dssdev->driver->get_update_mode(dssdev);
- return snprintf(buf, PAGE_SIZE, "%d\n", mode);
-}
-
-static ssize_t display_upd_mode_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- int val, r;
- enum omap_dss_update_mode mode;
-
- if (!dssdev->driver->set_update_mode)
- return -EINVAL;
-
- r = kstrtoint(buf, 0, &val);
- if (r)
- return r;
-
- switch (val) {
- case OMAP_DSS_UPDATE_DISABLED:
- case OMAP_DSS_UPDATE_AUTO:
- case OMAP_DSS_UPDATE_MANUAL:
- mode = (enum omap_dss_update_mode)val;
- break;
- default:
- return -EINVAL;
- }
-
- r = dssdev->driver->set_update_mode(dssdev, mode);
- if (r)
- return r;
-
- return size;
-}
-
static ssize_t display_tear_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -294,8 +253,6 @@ static ssize_t display_wss_store(struct device *dev,
static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR,
display_enabled_show, display_enabled_store);
-static DEVICE_ATTR(update_mode, S_IRUGO|S_IWUSR,
- display_upd_mode_show, display_upd_mode_store);
static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR,
display_tear_show, display_tear_store);
static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR,
@@ -309,7 +266,6 @@ static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR,
static struct device_attribute *display_sysfs_attrs[] = {
&dev_attr_enabled,
- &dev_attr_update_mode,
&dev_attr_tear_elim,
&dev_attr_timings,
&dev_attr_rotate,
@@ -327,16 +283,13 @@ void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
EXPORT_SYMBOL(omapdss_default_get_resolution);
void default_get_overlay_fifo_thresholds(enum omap_plane plane,
- u32 fifo_size, enum omap_burst_size *burst_size,
+ u32 fifo_size, u32 burst_size,
u32 *fifo_low, u32 *fifo_high)
{
- unsigned burst_size_bytes;
-
- *burst_size = OMAP_DSS_BURST_16x32;
- burst_size_bytes = 16 * 32 / 8;
+ unsigned buf_unit = dss_feat_get_buffer_size_unit();
- *fifo_high = fifo_size - 1;
- *fifo_low = fifo_size - burst_size_bytes;
+ *fifo_high = fifo_size - buf_unit;
+ *fifo_low = fifo_size - burst_size;
}
int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index ff6bd30..f053b18 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -23,7 +23,6 @@
#define DSS_SUBSYS_NAME "DPI"
#include <linux/kernel.h>
-#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -130,8 +129,6 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
bool is_tft;
int r = 0;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
-
dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
dssdev->panel.acbi, dssdev->panel.acb);
@@ -144,7 +141,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000,
&fck, &lck_div, &pck_div);
if (r)
- goto err0;
+ return r;
pck = fck / lck_div / pck_div / 1000;
@@ -158,12 +155,10 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
dispc_set_lcd_timings(dssdev->manager->id, t);
-err0:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
- return r;
+ return 0;
}
-static int dpi_basic_init(struct omap_dss_device *dssdev)
+static void dpi_basic_init(struct omap_dss_device *dssdev)
{
bool is_tft;
@@ -175,8 +170,6 @@ static int dpi_basic_init(struct omap_dss_device *dssdev)
OMAP_DSS_LCD_DISPLAY_TFT : OMAP_DSS_LCD_DISPLAY_STN);
dispc_set_tft_data_lines(dssdev->manager->id,
dssdev->phy.dpi.data_lines);
-
- return 0;
}
int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
@@ -186,31 +179,38 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
r = omap_dss_start_device(dssdev);
if (r) {
DSSERR("failed to start device\n");
- goto err0;
+ goto err_start_dev;
}
if (cpu_is_omap34xx()) {
r = regulator_enable(dpi.vdds_dsi_reg);
if (r)
- goto err1;
+ goto err_reg_enable;
}
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ r = dss_runtime_get();
+ if (r)
+ goto err_get_dss;
- r = dpi_basic_init(dssdev);
+ r = dispc_runtime_get();
if (r)
- goto err2;
+ goto err_get_dispc;
+
+ dpi_basic_init(dssdev);
if (dpi_use_dsi_pll(dssdev)) {
- dss_clk_enable(DSS_CLK_SYSCK);
+ r = dsi_runtime_get(dpi.dsidev);
+ if (r)
+ goto err_get_dsi;
+
r = dsi_pll_init(dpi.dsidev, 0, 1);
if (r)
- goto err3;
+ goto err_dsi_pll_init;
}
r = dpi_set_mode(dssdev);
if (r)
- goto err4;
+ goto err_set_mode;
mdelay(2);
@@ -218,19 +218,22 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
return 0;
-err4:
+err_set_mode:
if (dpi_use_dsi_pll(dssdev))
dsi_pll_uninit(dpi.dsidev, true);
-err3:
+err_dsi_pll_init:
if (dpi_use_dsi_pll(dssdev))
- dss_clk_disable(DSS_CLK_SYSCK);
-err2:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dsi_runtime_put(dpi.dsidev);
+err_get_dsi:
+ dispc_runtime_put();
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
if (cpu_is_omap34xx())
regulator_disable(dpi.vdds_dsi_reg);
-err1:
+err_reg_enable:
omap_dss_stop_device(dssdev);
-err0:
+err_start_dev:
return r;
}
EXPORT_SYMBOL(omapdss_dpi_display_enable);
@@ -242,10 +245,11 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
if (dpi_use_dsi_pll(dssdev)) {
dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
dsi_pll_uninit(dpi.dsidev, true);
- dss_clk_disable(DSS_CLK_SYSCK);
+ dsi_runtime_put(dpi.dsidev);
}
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dispc_runtime_put();
+ dss_runtime_put();
if (cpu_is_omap34xx())
regulator_disable(dpi.vdds_dsi_reg);
@@ -257,11 +261,26 @@ EXPORT_SYMBOL(omapdss_dpi_display_disable);
void dpi_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
+ int r;
+
DSSDBG("dpi_set_timings\n");
dssdev->panel.timings = *timings;
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ r = dss_runtime_get();
+ if (r)
+ return;
+
+ r = dispc_runtime_get();
+ if (r) {
+ dss_runtime_put();
+ return;
+ }
+
dpi_set_mode(dssdev);
dispc_go(dssdev->manager->id);
+
+ dispc_runtime_put();
+ dss_runtime_put();
}
}
EXPORT_SYMBOL(dpi_set_timings);
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 345757c..7adbbeb 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -36,6 +36,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
#include <video/omapdss.h>
#include <plat/clock.h>
@@ -267,8 +268,12 @@ struct dsi_isr_tables {
struct dsi_data {
struct platform_device *pdev;
void __iomem *base;
+
int irq;
+ struct clk *dss_clk;
+ struct clk *sys_clk;
+
void (*dsi_mux_pads)(bool enable);
struct dsi_clock_info current_cinfo;
@@ -389,15 +394,6 @@ static inline u32 dsi_read_reg(struct platform_device *dsidev,
return __raw_readl(dsi->base + idx.idx);
}
-
-void dsi_save_context(void)
-{
-}
-
-void dsi_restore_context(void)
-{
-}
-
void dsi_bus_lock(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -493,9 +489,18 @@ static void dsi_perf_show(struct platform_device *dsidev, const char *name)
total_bytes * 1000 / total_us);
}
#else
-#define dsi_perf_mark_setup(x)
-#define dsi_perf_mark_start(x)
-#define dsi_perf_show(x, y)
+static inline void dsi_perf_mark_setup(struct platform_device *dsidev)
+{
+}
+
+static inline void dsi_perf_mark_start(struct platform_device *dsidev)
+{
+}
+
+static inline void dsi_perf_show(struct platform_device *dsidev,
+ const char *name)
+{
+}
#endif
static void print_irq_status(u32 status)
@@ -1039,13 +1044,27 @@ static u32 dsi_get_errors(struct platform_device *dsidev)
return e;
}
-/* DSI func clock. this could also be dsi_pll_hsdiv_dsi_clk */
-static inline void enable_clocks(bool enable)
+int dsi_runtime_get(struct platform_device *dsidev)
{
- if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
- else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ int r;
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+
+ DSSDBG("dsi_runtime_get\n");
+
+ r = pm_runtime_get_sync(&dsi->pdev->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
+}
+
+void dsi_runtime_put(struct platform_device *dsidev)
+{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ int r;
+
+ DSSDBG("dsi_runtime_put\n");
+
+ r = pm_runtime_put(&dsi->pdev->dev);
+ WARN_ON(r < 0);
}
/* source clock for DSI PLL. this could also be PCLKFREE */
@@ -1055,9 +1074,9 @@ static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
if (enable)
- dss_clk_enable(DSS_CLK_SYSCK);
+ clk_enable(dsi->sys_clk);
else
- dss_clk_disable(DSS_CLK_SYSCK);
+ clk_disable(dsi->sys_clk);
if (enable && dsi->pll_locked) {
if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1)
@@ -1150,10 +1169,11 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
{
unsigned long r;
int dsi_module = dsi_get_dsidev_id(dsidev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) {
/* DSI FCLK source is DSS_CLK_FCK */
- r = dss_clk_get_rate(DSS_CLK_FCK);
+ r = clk_get_rate(dsi->dss_clk);
} else {
/* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
r = dsi_get_pll_hsdiv_dsi_rate(dsidev);
@@ -1262,7 +1282,7 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
return -EINVAL;
if (cinfo->use_sys_clk) {
- cinfo->clkin = dss_clk_get_rate(DSS_CLK_SYSCK);
+ cinfo->clkin = clk_get_rate(dsi->sys_clk);
/* XXX it is unclear if highfreq should be used
* with DSS_SYS_CLK source also */
cinfo->highfreq = 0;
@@ -1311,7 +1331,7 @@ int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft,
int match = 0;
unsigned long dss_sys_clk, max_dss_fck;
- dss_sys_clk = dss_clk_get_rate(DSS_CLK_SYSCK);
+ dss_sys_clk = clk_get_rate(dsi->sys_clk);
max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
@@ -1601,7 +1621,6 @@ int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
dsi->vdds_dsi_reg = vdds_dsi;
}
- enable_clocks(1);
dsi_enable_pll_clock(dsidev, 1);
/*
* Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
@@ -1653,7 +1672,6 @@ err1:
}
err0:
dsi_disable_scp_clk(dsidev);
- enable_clocks(0);
dsi_enable_pll_clock(dsidev, 0);
return r;
}
@@ -1671,7 +1689,6 @@ void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
}
dsi_disable_scp_clk(dsidev);
- enable_clocks(0);
dsi_enable_pll_clock(dsidev, 0);
DSSDBG("PLL uninit done\n");
@@ -1688,7 +1705,8 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
dispc_clk_src = dss_get_dispc_clk_source();
dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
- enable_clocks(1);
+ if (dsi_runtime_get(dsidev))
+ return;
seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
@@ -1731,7 +1749,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk);
- enable_clocks(0);
+ dsi_runtime_put(dsidev);
}
void dsi_dump_clocks(struct seq_file *s)
@@ -1873,7 +1891,8 @@ static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ if (dsi_runtime_get(dsidev))
+ return;
dsi_enable_scp_clk(dsidev);
DUMPREG(DSI_REVISION);
@@ -1947,7 +1966,7 @@ static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
DUMPREG(DSI_PLL_CONFIGURATION2);
dsi_disable_scp_clk(dsidev);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dsi_runtime_put(dsidev);
#undef DUMPREG
}
@@ -2463,28 +2482,6 @@ static void dsi_cio_uninit(struct platform_device *dsidev)
dsi->dsi_mux_pads(false);
}
-static int _dsi_wait_reset(struct platform_device *dsidev)
-{
- int t = 0;
-
- while (REG_GET(dsidev, DSI_SYSSTATUS, 0, 0) == 0) {
- if (++t > 5) {
- DSSERR("soft reset failed\n");
- return -ENODEV;
- }
- udelay(1);
- }
-
- return 0;
-}
-
-static int _dsi_reset(struct platform_device *dsidev)
-{
- /* Soft reset */
- REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 1, 1);
- return _dsi_wait_reset(dsidev);
-}
-
static void dsi_config_tx_fifo(struct platform_device *dsidev,
enum fifo_size size1, enum fifo_size size2,
enum fifo_size size3, enum fifo_size size4)
@@ -3386,6 +3383,10 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
+ /* Reset LANEx_ULPS_SIG2 */
+ REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (0 << 0) | (0 << 1) | (0 << 2),
+ 7, 5);
+
dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
dsi_if_enable(dsidev, false);
@@ -4198,22 +4199,6 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
dsi_pll_uninit(dsidev, disconnect_lanes);
}
-static int dsi_core_init(struct platform_device *dsidev)
-{
- /* Autoidle */
- REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 0, 0);
-
- /* ENWAKEUP */
- REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 2, 2);
-
- /* SIDLEMODE smart-idle */
- REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 2, 4, 3);
-
- _dsi_initialize_irq(dsidev);
-
- return 0;
-}
-
int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -4229,37 +4214,37 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
r = omap_dss_start_device(dssdev);
if (r) {
DSSERR("failed to start device\n");
- goto err0;
+ goto err_start_dev;
}
- enable_clocks(1);
- dsi_enable_pll_clock(dsidev, 1);
-
- r = _dsi_reset(dsidev);
+ r = dsi_runtime_get(dsidev);
if (r)
- goto err1;
+ goto err_get_dsi;
- dsi_core_init(dsidev);
+ dsi_enable_pll_clock(dsidev, 1);
+
+ _dsi_initialize_irq(dsidev);
r = dsi_display_init_dispc(dssdev);
if (r)
- goto err1;
+ goto err_init_dispc;
r = dsi_display_init_dsi(dssdev);
if (r)
- goto err2;
+ goto err_init_dsi;
mutex_unlock(&dsi->lock);
return 0;
-err2:
+err_init_dsi:
dsi_display_uninit_dispc(dssdev);
-err1:
- enable_clocks(0);
+err_init_dispc:
dsi_enable_pll_clock(dsidev, 0);
+ dsi_runtime_put(dsidev);
+err_get_dsi:
omap_dss_stop_device(dssdev);
-err0:
+err_start_dev:
mutex_unlock(&dsi->lock);
DSSDBG("dsi_display_enable FAILED\n");
return r;
@@ -4278,11 +4263,16 @@ void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
mutex_lock(&dsi->lock);
+ dsi_sync_vc(dsidev, 0);
+ dsi_sync_vc(dsidev, 1);
+ dsi_sync_vc(dsidev, 2);
+ dsi_sync_vc(dsidev, 3);
+
dsi_display_uninit_dispc(dssdev);
dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps);
- enable_clocks(0);
+ dsi_runtime_put(dsidev);
dsi_enable_pll_clock(dsidev, 0);
omap_dss_stop_device(dssdev);
@@ -4302,16 +4292,11 @@ int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
EXPORT_SYMBOL(omapdss_dsi_enable_te);
void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
- u32 fifo_size, enum omap_burst_size *burst_size,
+ u32 fifo_size, u32 burst_size,
u32 *fifo_low, u32 *fifo_high)
{
- unsigned burst_size_bytes;
-
- *burst_size = OMAP_DSS_BURST_16x32;
- burst_size_bytes = 16 * 32 / 8;
-
- *fifo_high = fifo_size - burst_size_bytes;
- *fifo_low = fifo_size - burst_size_bytes * 2;
+ *fifo_high = fifo_size - burst_size;
+ *fifo_low = fifo_size - burst_size * 2;
}
int dsi_init_display(struct omap_dss_device *dssdev)
@@ -4437,7 +4422,47 @@ static void dsi_calc_clock_param_ranges(struct platform_device *dsidev)
dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
}
-static int dsi_init(struct platform_device *dsidev)
+static int dsi_get_clocks(struct platform_device *dsidev)
+{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct clk *clk;
+
+ clk = clk_get(&dsidev->dev, "fck");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get fck\n");
+ return PTR_ERR(clk);
+ }
+
+ dsi->dss_clk = clk;
+
+ if (cpu_is_omap34xx() || cpu_is_omap3630())
+ clk = clk_get(&dsidev->dev, "dss2_alwon_fck");
+ else
+ clk = clk_get(&dsidev->dev, "sys_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get sys_clk\n");
+ clk_put(dsi->dss_clk);
+ dsi->dss_clk = NULL;
+ return PTR_ERR(clk);
+ }
+
+ dsi->sys_clk = clk;
+
+ return 0;
+}
+
+static void dsi_put_clocks(struct platform_device *dsidev)
+{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+
+ if (dsi->dss_clk)
+ clk_put(dsi->dss_clk);
+ if (dsi->sys_clk)
+ clk_put(dsi->sys_clk);
+}
+
+/* DSI1 HW IP initialisation */
+static int omap_dsi1hw_probe(struct platform_device *dsidev)
{
struct omap_display_platform_data *dss_plat_data;
struct omap_dss_board_info *board_info;
@@ -4449,7 +4474,7 @@ static int dsi_init(struct platform_device *dsidev)
dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
if (!dsi) {
r = -ENOMEM;
- goto err0;
+ goto err_alloc;
}
dsi->pdev = dsidev;
@@ -4472,6 +4497,12 @@ static int dsi_init(struct platform_device *dsidev)
mutex_init(&dsi->lock);
sema_init(&dsi->bus_lock, 1);
+ r = dsi_get_clocks(dsidev);
+ if (r)
+ goto err_get_clk;
+
+ pm_runtime_enable(&dsidev->dev);
+
INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work,
dsi_framedone_timeout_work_callback);
@@ -4484,26 +4515,26 @@ static int dsi_init(struct platform_device *dsidev)
if (!dsi_mem) {
DSSERR("can't get IORESOURCE_MEM DSI\n");
r = -EINVAL;
- goto err1;
+ goto err_ioremap;
}
dsi->base = ioremap(dsi_mem->start, resource_size(dsi_mem));
if (!dsi->base) {
DSSERR("can't ioremap DSI\n");
r = -ENOMEM;
- goto err1;
+ goto err_ioremap;
}
dsi->irq = platform_get_irq(dsi->pdev, 0);
if (dsi->irq < 0) {
DSSERR("platform_get_irq failed\n");
r = -ENODEV;
- goto err2;
+ goto err_get_irq;
}
r = request_irq(dsi->irq, omap_dsi_irq_handler, IRQF_SHARED,
dev_name(&dsidev->dev), dsi->pdev);
if (r < 0) {
DSSERR("request_irq failed\n");
- goto err2;
+ goto err_get_irq;
}
/* DSI VCs initialization */
@@ -4515,7 +4546,9 @@ static int dsi_init(struct platform_device *dsidev)
dsi_calc_clock_param_ranges(dsidev);
- enable_clocks(1);
+ r = dsi_runtime_get(dsidev);
+ if (r)
+ goto err_get_dsi;
rev = dsi_read_reg(dsidev, DSI_REVISION);
dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
@@ -4523,21 +4556,32 @@ static int dsi_init(struct platform_device *dsidev)
dsi->num_data_lanes = dsi_get_num_data_lanes(dsidev);
- enable_clocks(0);
+ dsi_runtime_put(dsidev);
return 0;
-err2:
+
+err_get_dsi:
+ free_irq(dsi->irq, dsi->pdev);
+err_get_irq:
iounmap(dsi->base);
-err1:
+err_ioremap:
+ pm_runtime_disable(&dsidev->dev);
+err_get_clk:
kfree(dsi);
-err0:
+err_alloc:
return r;
}
-static void dsi_exit(struct platform_device *dsidev)
+static int omap_dsi1hw_remove(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ WARN_ON(dsi->scp_clk_refcount > 0);
+
+ pm_runtime_disable(&dsidev->dev);
+
+ dsi_put_clocks(dsidev);
+
if (dsi->vdds_dsi_reg != NULL) {
if (dsi->vdds_dsi_enabled) {
regulator_disable(dsi->vdds_dsi_reg);
@@ -4553,38 +4597,56 @@ static void dsi_exit(struct platform_device *dsidev)
kfree(dsi);
- DSSDBG("omap_dsi_exit\n");
+ return 0;
}
-/* DSI1 HW IP initialisation */
-static int omap_dsi1hw_probe(struct platform_device *dsidev)
+static int dsi_runtime_suspend(struct device *dev)
{
- int r;
+ struct dsi_data *dsi = dsi_get_dsidrv_data(to_platform_device(dev));
- r = dsi_init(dsidev);
- if (r) {
- DSSERR("Failed to initialize DSI\n");
- goto err_dsi;
- }
-err_dsi:
- return r;
+ clk_disable(dsi->dss_clk);
+
+ dispc_runtime_put();
+ dss_runtime_put();
+
+ return 0;
}
-static int omap_dsi1hw_remove(struct platform_device *dsidev)
+static int dsi_runtime_resume(struct device *dev)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(to_platform_device(dev));
+ int r;
+
+ r = dss_runtime_get();
+ if (r)
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r)
+ goto err_get_dispc;
+
+ clk_enable(dsi->dss_clk);
- dsi_exit(dsidev);
- WARN_ON(dsi->scp_clk_refcount > 0);
return 0;
+
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
+ return r;
}
+static const struct dev_pm_ops dsi_pm_ops = {
+ .runtime_suspend = dsi_runtime_suspend,
+ .runtime_resume = dsi_runtime_resume,
+};
+
static struct platform_driver omap_dsi1hw_driver = {
.probe = omap_dsi1hw_probe,
.remove = omap_dsi1hw_remove,
.driver = {
.name = "omapdss_dsi1",
.owner = THIS_MODULE,
+ .pm = &dsi_pm_ops,
},
};
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index d9489d5..0f9c3a6 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -28,6 +28,8 @@
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <video/omapdss.h>
#include <plat/clock.h>
@@ -59,15 +61,9 @@ struct dss_reg {
static struct {
struct platform_device *pdev;
void __iomem *base;
- int ctx_id;
struct clk *dpll4_m4_ck;
- struct clk *dss_ick;
- struct clk *dss_fck;
- struct clk *dss_sys_clk;
- struct clk *dss_tv_fck;
- struct clk *dss_video_fck;
- unsigned num_clks_enabled;
+ struct clk *dss_clk;
unsigned long cache_req_pck;
unsigned long cache_prate;
@@ -78,6 +74,7 @@ static struct {
enum omap_dss_clk_source dispc_clk_source;
enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
+ bool ctx_valid;
u32 ctx[DSS_SZ_REGS / sizeof(u32)];
} dss;
@@ -87,13 +84,6 @@ static const char * const dss_generic_clk_source_names[] = {
[OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK",
};
-static void dss_clk_enable_all_no_ctx(void);
-static void dss_clk_disable_all_no_ctx(void);
-static void dss_clk_enable_no_ctx(enum dss_clock clks);
-static void dss_clk_disable_no_ctx(enum dss_clock clks);
-
-static int _omap_dss_wait_reset(void);
-
static inline void dss_write_reg(const struct dss_reg idx, u32 val)
{
__raw_writel(val, dss.base + idx.idx);
@@ -109,12 +99,10 @@ static inline u32 dss_read_reg(const struct dss_reg idx)
#define RR(reg) \
dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)])
-void dss_save_context(void)
+static void dss_save_context(void)
{
- if (cpu_is_omap24xx())
- return;
+ DSSDBG("dss_save_context\n");
- SR(SYSCONFIG);
SR(CONTROL);
if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
@@ -122,14 +110,19 @@ void dss_save_context(void)
SR(SDI_CONTROL);
SR(PLL_CONTROL);
}
+
+ dss.ctx_valid = true;
+
+ DSSDBG("context saved\n");
}
-void dss_restore_context(void)
+static void dss_restore_context(void)
{
- if (_omap_dss_wait_reset())
- DSSERR("DSS not coming out of reset after sleep\n");
+ DSSDBG("dss_restore_context\n");
+
+ if (!dss.ctx_valid)
+ return;
- RR(SYSCONFIG);
RR(CONTROL);
if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
@@ -137,6 +130,8 @@ void dss_restore_context(void)
RR(SDI_CONTROL);
RR(PLL_CONTROL);
}
+
+ DSSDBG("context restored\n");
}
#undef SR
@@ -234,6 +229,7 @@ const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src)
return dss_generic_clk_source_names[clk_src];
}
+
void dss_dump_clocks(struct seq_file *s)
{
unsigned long dpll4_ck_rate;
@@ -241,13 +237,14 @@ void dss_dump_clocks(struct seq_file *s)
const char *fclk_name, *fclk_real_name;
unsigned long fclk_rate;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ if (dss_runtime_get())
+ return;
seq_printf(s, "- DSS -\n");
fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
- fclk_rate = dss_clk_get_rate(DSS_CLK_FCK);
+ fclk_rate = clk_get_rate(dss.dss_clk);
if (dss.dpll4_m4_ck) {
dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
@@ -273,14 +270,15 @@ void dss_dump_clocks(struct seq_file *s)
fclk_rate);
}
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dss_runtime_put();
}
void dss_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ if (dss_runtime_get())
+ return;
DUMPREG(DSS_REVISION);
DUMPREG(DSS_SYSCONFIG);
@@ -294,7 +292,7 @@ void dss_dump_regs(struct seq_file *s)
DUMPREG(DSS_SDI_STATUS);
}
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dss_runtime_put();
#undef DUMPREG
}
@@ -437,7 +435,7 @@ int dss_calc_clock_rates(struct dss_clock_info *cinfo)
} else {
if (cinfo->fck_div != 0)
return -EINVAL;
- cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK);
+ cinfo->fck = clk_get_rate(dss.dss_clk);
}
return 0;
@@ -467,7 +465,7 @@ int dss_set_clock_div(struct dss_clock_info *cinfo)
int dss_get_clock_div(struct dss_clock_info *cinfo)
{
- cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK);
+ cinfo->fck = clk_get_rate(dss.dss_clk);
if (dss.dpll4_m4_ck) {
unsigned long prate;
@@ -512,7 +510,7 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
- fck = dss_clk_get_rate(DSS_CLK_FCK);
+ fck = clk_get_rate(dss.dss_clk);
if (req_pck == dss.cache_req_pck &&
((cpu_is_omap34xx() && prate == dss.cache_prate) ||
dss.cache_dss_cinfo.fck == fck)) {
@@ -539,7 +537,7 @@ retry:
if (dss.dpll4_m4_ck == NULL) {
struct dispc_clock_info cur_dispc;
/* XXX can we change the clock on omap2? */
- fck = dss_clk_get_rate(DSS_CLK_FCK);
+ fck = clk_get_rate(dss.dss_clk);
fck_div = 1;
dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
@@ -616,28 +614,6 @@ found:
return 0;
}
-static int _omap_dss_wait_reset(void)
-{
- int t = 0;
-
- while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) {
- if (++t > 1000) {
- DSSERR("soft reset failed\n");
- return -ENODEV;
- }
- udelay(1);
- }
-
- return 0;
-}
-
-static int _omap_dss_reset(void)
-{
- /* Soft reset */
- REG_FLD_MOD(DSS_SYSCONFIG, 1, 1, 1);
- return _omap_dss_wait_reset();
-}
-
void dss_set_venc_output(enum omap_dss_venc_type type)
{
int l = 0;
@@ -663,424 +639,88 @@ void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select hdmi)
REG_FLD_MOD(DSS_CONTROL, hdmi, 15, 15); /* VENC_HDMI_SWITCH */
}
-static int dss_init(void)
+static int dss_get_clocks(void)
{
+ struct clk *clk;
int r;
- u32 rev;
- struct resource *dss_mem;
- struct clk *dpll4_m4_ck;
- dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
- if (!dss_mem) {
- DSSERR("can't get IORESOURCE_MEM DSS\n");
- r = -EINVAL;
- goto fail0;
- }
- dss.base = ioremap(dss_mem->start, resource_size(dss_mem));
- if (!dss.base) {
- DSSERR("can't ioremap DSS\n");
- r = -ENOMEM;
- goto fail0;
+ clk = clk_get(&dss.pdev->dev, "fck");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get clock fck\n");
+ r = PTR_ERR(clk);
+ goto err;
}
- /* disable LCD and DIGIT output. This seems to fix the synclost
- * problem that we get, if the bootloader starts the DSS and
- * the kernel resets it */
- omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440);
-
-#ifdef CONFIG_OMAP2_DSS_SLEEP_BEFORE_RESET
- /* We need to wait here a bit, otherwise we sometimes start to
- * get synclost errors, and after that only power cycle will
- * restore DSS functionality. I have no idea why this happens.
- * And we have to wait _before_ resetting the DSS, but after
- * enabling clocks.
- *
- * This bug was at least present on OMAP3430. It's unknown
- * if it happens on OMAP2 or OMAP3630.
- */
- msleep(50);
-#endif
-
- _omap_dss_reset();
+ dss.dss_clk = clk;
- /* autoidle */
- REG_FLD_MOD(DSS_SYSCONFIG, 1, 0, 0);
-
- /* Select DPLL */
- REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
-
-#ifdef CONFIG_OMAP2_DSS_VENC
- REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
- REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
- REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
-#endif
if (cpu_is_omap34xx()) {
- dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck");
- if (IS_ERR(dpll4_m4_ck)) {
+ clk = clk_get(NULL, "dpll4_m4_ck");
+ if (IS_ERR(clk)) {
DSSERR("Failed to get dpll4_m4_ck\n");
- r = PTR_ERR(dpll4_m4_ck);
- goto fail1;
+ r = PTR_ERR(clk);
+ goto err;
}
} else if (cpu_is_omap44xx()) {
- dpll4_m4_ck = clk_get(NULL, "dpll_per_m5x2_ck");
- if (IS_ERR(dpll4_m4_ck)) {
- DSSERR("Failed to get dpll4_m4_ck\n");
- r = PTR_ERR(dpll4_m4_ck);
- goto fail1;
+ clk = clk_get(NULL, "dpll_per_m5x2_ck");
+ if (IS_ERR(clk)) {
+ DSSERR("Failed to get dpll_per_m5x2_ck\n");
+ r = PTR_ERR(clk);
+ goto err;
}
} else { /* omap24xx */
- dpll4_m4_ck = NULL;
+ clk = NULL;
}
- dss.dpll4_m4_ck = dpll4_m4_ck;
-
- dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
- dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
- dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK;
- dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
- dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
-
- dss_save_context();
-
- rev = dss_read_reg(DSS_REVISION);
- printk(KERN_INFO "OMAP DSS rev %d.%d\n",
- FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+ dss.dpll4_m4_ck = clk;
return 0;
-fail1:
- iounmap(dss.base);
-fail0:
- return r;
-}
-
-static void dss_exit(void)
-{
+err:
+ if (dss.dss_clk)
+ clk_put(dss.dss_clk);
if (dss.dpll4_m4_ck)
clk_put(dss.dpll4_m4_ck);
- iounmap(dss.base);
-}
-
-/* CONTEXT */
-static int dss_get_ctx_id(void)
-{
- struct omap_display_platform_data *pdata = dss.pdev->dev.platform_data;
- int r;
-
- if (!pdata->board_data->get_last_off_on_transaction_id)
- return 0;
- r = pdata->board_data->get_last_off_on_transaction_id(&dss.pdev->dev);
- if (r < 0) {
- dev_err(&dss.pdev->dev, "getting transaction ID failed, "
- "will force context restore\n");
- r = -1;
- }
- return r;
-}
-
-int dss_need_ctx_restore(void)
-{
- int id = dss_get_ctx_id();
-
- if (id < 0 || id != dss.ctx_id) {
- DSSDBG("ctx id %d -> id %d\n",
- dss.ctx_id, id);
- dss.ctx_id = id;
- return 1;
- } else {
- return 0;
- }
-}
-
-static void save_all_ctx(void)
-{
- DSSDBG("save context\n");
-
- dss_clk_enable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK);
-
- dss_save_context();
- dispc_save_context();
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_save_context();
-#endif
-
- dss_clk_disable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK);
-}
-
-static void restore_all_ctx(void)
-{
- DSSDBG("restore context\n");
-
- dss_clk_enable_all_no_ctx();
-
- dss_restore_context();
- dispc_restore_context();
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_restore_context();
-#endif
-
- dss_clk_disable_all_no_ctx();
-}
-
-static int dss_get_clock(struct clk **clock, const char *clk_name)
-{
- struct clk *clk;
-
- clk = clk_get(&dss.pdev->dev, clk_name);
-
- if (IS_ERR(clk)) {
- DSSERR("can't get clock %s", clk_name);
- return PTR_ERR(clk);
- }
-
- *clock = clk;
-
- DSSDBG("clk %s, rate %ld\n", clk_name, clk_get_rate(clk));
-
- return 0;
-}
-
-static int dss_get_clocks(void)
-{
- int r;
- struct omap_display_platform_data *pdata = dss.pdev->dev.platform_data;
-
- dss.dss_ick = NULL;
- dss.dss_fck = NULL;
- dss.dss_sys_clk = NULL;
- dss.dss_tv_fck = NULL;
- dss.dss_video_fck = NULL;
-
- r = dss_get_clock(&dss.dss_ick, "ick");
- if (r)
- goto err;
-
- r = dss_get_clock(&dss.dss_fck, "fck");
- if (r)
- goto err;
-
- if (!pdata->opt_clock_available) {
- r = -ENODEV;
- goto err;
- }
-
- if (pdata->opt_clock_available("sys_clk")) {
- r = dss_get_clock(&dss.dss_sys_clk, "sys_clk");
- if (r)
- goto err;
- }
-
- if (pdata->opt_clock_available("tv_clk")) {
- r = dss_get_clock(&dss.dss_tv_fck, "tv_clk");
- if (r)
- goto err;
- }
-
- if (pdata->opt_clock_available("video_clk")) {
- r = dss_get_clock(&dss.dss_video_fck, "video_clk");
- if (r)
- goto err;
- }
-
- return 0;
-
-err:
- if (dss.dss_ick)
- clk_put(dss.dss_ick);
- if (dss.dss_fck)
- clk_put(dss.dss_fck);
- if (dss.dss_sys_clk)
- clk_put(dss.dss_sys_clk);
- if (dss.dss_tv_fck)
- clk_put(dss.dss_tv_fck);
- if (dss.dss_video_fck)
- clk_put(dss.dss_video_fck);
-
return r;
}
static void dss_put_clocks(void)
{
- if (dss.dss_video_fck)
- clk_put(dss.dss_video_fck);
- if (dss.dss_tv_fck)
- clk_put(dss.dss_tv_fck);
- if (dss.dss_sys_clk)
- clk_put(dss.dss_sys_clk);
- clk_put(dss.dss_fck);
- clk_put(dss.dss_ick);
-}
-
-unsigned long dss_clk_get_rate(enum dss_clock clk)
-{
- switch (clk) {
- case DSS_CLK_ICK:
- return clk_get_rate(dss.dss_ick);
- case DSS_CLK_FCK:
- return clk_get_rate(dss.dss_fck);
- case DSS_CLK_SYSCK:
- return clk_get_rate(dss.dss_sys_clk);
- case DSS_CLK_TVFCK:
- return clk_get_rate(dss.dss_tv_fck);
- case DSS_CLK_VIDFCK:
- return clk_get_rate(dss.dss_video_fck);
- }
-
- BUG();
- return 0;
-}
-
-static unsigned count_clk_bits(enum dss_clock clks)
-{
- unsigned num_clks = 0;
-
- if (clks & DSS_CLK_ICK)
- ++num_clks;
- if (clks & DSS_CLK_FCK)
- ++num_clks;
- if (clks & DSS_CLK_SYSCK)
- ++num_clks;
- if (clks & DSS_CLK_TVFCK)
- ++num_clks;
- if (clks & DSS_CLK_VIDFCK)
- ++num_clks;
-
- return num_clks;
-}
-
-static void dss_clk_enable_no_ctx(enum dss_clock clks)
-{
- unsigned num_clks = count_clk_bits(clks);
-
- if (clks & DSS_CLK_ICK)
- clk_enable(dss.dss_ick);
- if (clks & DSS_CLK_FCK)
- clk_enable(dss.dss_fck);
- if ((clks & DSS_CLK_SYSCK) && dss.dss_sys_clk)
- clk_enable(dss.dss_sys_clk);
- if ((clks & DSS_CLK_TVFCK) && dss.dss_tv_fck)
- clk_enable(dss.dss_tv_fck);
- if ((clks & DSS_CLK_VIDFCK) && dss.dss_video_fck)
- clk_enable(dss.dss_video_fck);
-
- dss.num_clks_enabled += num_clks;
-}
-
-void dss_clk_enable(enum dss_clock clks)
-{
- bool check_ctx = dss.num_clks_enabled == 0;
-
- dss_clk_enable_no_ctx(clks);
-
- /*
- * HACK: On omap4 the registers may not be accessible right after
- * enabling the clocks. At some point this will be handled by
- * pm_runtime, but for the time begin this should make things work.
- */
- if (cpu_is_omap44xx() && check_ctx)
- udelay(10);
-
- if (check_ctx && cpu_is_omap34xx() && dss_need_ctx_restore())
- restore_all_ctx();
+ if (dss.dpll4_m4_ck)
+ clk_put(dss.dpll4_m4_ck);
+ clk_put(dss.dss_clk);
}
-static void dss_clk_disable_no_ctx(enum dss_clock clks)
+struct clk *dss_get_ick(void)
{
- unsigned num_clks = count_clk_bits(clks);
-
- if (clks & DSS_CLK_ICK)
- clk_disable(dss.dss_ick);
- if (clks & DSS_CLK_FCK)
- clk_disable(dss.dss_fck);
- if ((clks & DSS_CLK_SYSCK) && dss.dss_sys_clk)
- clk_disable(dss.dss_sys_clk);
- if ((clks & DSS_CLK_TVFCK) && dss.dss_tv_fck)
- clk_disable(dss.dss_tv_fck);
- if ((clks & DSS_CLK_VIDFCK) && dss.dss_video_fck)
- clk_disable(dss.dss_video_fck);
-
- dss.num_clks_enabled -= num_clks;
+ return clk_get(&dss.pdev->dev, "ick");
}
-void dss_clk_disable(enum dss_clock clks)
+int dss_runtime_get(void)
{
- if (cpu_is_omap34xx()) {
- unsigned num_clks = count_clk_bits(clks);
-
- BUG_ON(dss.num_clks_enabled < num_clks);
+ int r;
- if (dss.num_clks_enabled == num_clks)
- save_all_ctx();
- }
+ DSSDBG("dss_runtime_get\n");
- dss_clk_disable_no_ctx(clks);
+ r = pm_runtime_get_sync(&dss.pdev->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
}
-static void dss_clk_enable_all_no_ctx(void)
+void dss_runtime_put(void)
{
- enum dss_clock clks;
-
- clks = DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_SYSCK | DSS_CLK_TVFCK;
- if (cpu_is_omap34xx())
- clks |= DSS_CLK_VIDFCK;
- dss_clk_enable_no_ctx(clks);
-}
-
-static void dss_clk_disable_all_no_ctx(void)
-{
- enum dss_clock clks;
+ int r;
- clks = DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_SYSCK | DSS_CLK_TVFCK;
- if (cpu_is_omap34xx())
- clks |= DSS_CLK_VIDFCK;
- dss_clk_disable_no_ctx(clks);
-}
+ DSSDBG("dss_runtime_put\n");
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
-/* CLOCKS */
-static void core_dump_clocks(struct seq_file *s)
-{
- int i;
- struct clk *clocks[5] = {
- dss.dss_ick,
- dss.dss_fck,
- dss.dss_sys_clk,
- dss.dss_tv_fck,
- dss.dss_video_fck
- };
-
- const char *names[5] = {
- "ick",
- "fck",
- "sys_clk",
- "tv_fck",
- "video_fck"
- };
-
- seq_printf(s, "- CORE -\n");
-
- seq_printf(s, "internal clk count\t\t%u\n", dss.num_clks_enabled);
-
- for (i = 0; i < 5; i++) {
- if (!clocks[i])
- continue;
- seq_printf(s, "%s (%s)%*s\t%lu\t%d\n",
- names[i],
- clocks[i]->name,
- 24 - strlen(names[i]) - strlen(clocks[i]->name),
- "",
- clk_get_rate(clocks[i]),
- clocks[i]->usecount);
- }
+ r = pm_runtime_put(&dss.pdev->dev);
+ WARN_ON(r < 0);
}
-#endif /* defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) */
/* DEBUGFS */
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
void dss_debug_dump_clocks(struct seq_file *s)
{
- core_dump_clocks(s);
dss_dump_clocks(s);
dispc_dump_clocks(s);
#ifdef CONFIG_OMAP2_DSS_DSI
@@ -1089,28 +729,51 @@ void dss_debug_dump_clocks(struct seq_file *s)
}
#endif
-
/* DSS HW IP initialisation */
static int omap_dsshw_probe(struct platform_device *pdev)
{
+ struct resource *dss_mem;
+ u32 rev;
int r;
dss.pdev = pdev;
+ dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
+ if (!dss_mem) {
+ DSSERR("can't get IORESOURCE_MEM DSS\n");
+ r = -EINVAL;
+ goto err_ioremap;
+ }
+ dss.base = ioremap(dss_mem->start, resource_size(dss_mem));
+ if (!dss.base) {
+ DSSERR("can't ioremap DSS\n");
+ r = -ENOMEM;
+ goto err_ioremap;
+ }
+
r = dss_get_clocks();
if (r)
goto err_clocks;
- dss_clk_enable_all_no_ctx();
+ pm_runtime_enable(&pdev->dev);
- dss.ctx_id = dss_get_ctx_id();
- DSSDBG("initial ctx id %u\n", dss.ctx_id);
+ r = dss_runtime_get();
+ if (r)
+ goto err_runtime_get;
- r = dss_init();
- if (r) {
- DSSERR("Failed to initialize DSS\n");
- goto err_dss;
- }
+ /* Select DPLL */
+ REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
+
+#ifdef CONFIG_OMAP2_DSS_VENC
+ REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
+ REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
+ REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
+#endif
+ dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
+ dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
+ dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK;
+ dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
+ dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
r = dpi_init();
if (r) {
@@ -1124,42 +787,66 @@ static int omap_dsshw_probe(struct platform_device *pdev)
goto err_sdi;
}
- dss_clk_disable_all_no_ctx();
+ rev = dss_read_reg(DSS_REVISION);
+ printk(KERN_INFO "OMAP DSS rev %d.%d\n",
+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+ dss_runtime_put();
+
return 0;
err_sdi:
dpi_exit();
err_dpi:
- dss_exit();
-err_dss:
- dss_clk_disable_all_no_ctx();
+ dss_runtime_put();
+err_runtime_get:
+ pm_runtime_disable(&pdev->dev);
dss_put_clocks();
err_clocks:
+ iounmap(dss.base);
+err_ioremap:
return r;
}
static int omap_dsshw_remove(struct platform_device *pdev)
{
+ dpi_exit();
+ sdi_exit();
- dss_exit();
+ iounmap(dss.base);
- /*
- * As part of hwmod changes, DSS is not the only controller of dss
- * clocks; hwmod framework itself will also enable clocks during hwmod
- * init for dss, and autoidle is set in h/w for DSS. Hence, there's no
- * need to disable clocks if their usecounts > 1.
- */
- WARN_ON(dss.num_clks_enabled > 0);
+ pm_runtime_disable(&pdev->dev);
dss_put_clocks();
+
+ return 0;
+}
+
+static int dss_runtime_suspend(struct device *dev)
+{
+ dss_save_context();
+ clk_disable(dss.dss_clk);
return 0;
}
+static int dss_runtime_resume(struct device *dev)
+{
+ clk_enable(dss.dss_clk);
+ dss_restore_context();
+ return 0;
+}
+
+static const struct dev_pm_ops dss_pm_ops = {
+ .runtime_suspend = dss_runtime_suspend,
+ .runtime_resume = dss_runtime_resume,
+};
+
static struct platform_driver omap_dsshw_driver = {
.probe = omap_dsshw_probe,
.remove = omap_dsshw_remove,
.driver = {
.name = "omapdss_dss",
.owner = THIS_MODULE,
+ .pm = &dss_pm_ops,
},
};
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 8ab6d43..9c94b11 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -97,26 +97,12 @@ extern unsigned int dss_debug;
#define FLD_MOD(orig, val, start, end) \
(((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
-enum omap_burst_size {
- OMAP_DSS_BURST_4x32 = 0,
- OMAP_DSS_BURST_8x32 = 1,
- OMAP_DSS_BURST_16x32 = 2,
-};
-
enum omap_parallel_interface_mode {
OMAP_DSS_PARALLELMODE_BYPASS, /* MIPI DPI */
OMAP_DSS_PARALLELMODE_RFBI, /* MIPI DBI */
OMAP_DSS_PARALLELMODE_DSI,
};
-enum dss_clock {
- DSS_CLK_ICK = 1 << 0, /* DSS_L3_ICLK and DSS_L4_ICLK */
- DSS_CLK_FCK = 1 << 1, /* DSS1_ALWON_FCLK */
- DSS_CLK_SYSCK = 1 << 2, /* DSS2_ALWON_FCLK */
- DSS_CLK_TVFCK = 1 << 3, /* DSS_TV_FCLK */
- DSS_CLK_VIDFCK = 1 << 4, /* DSS_96M_FCLK*/
-};
-
enum dss_hdmi_venc_clk_source_select {
DSS_VENC_TV_CLK = 0,
DSS_HDMI_M_PCLK = 1,
@@ -194,7 +180,7 @@ void dss_uninit_device(struct platform_device *pdev,
bool dss_use_replication(struct omap_dss_device *dssdev,
enum omap_color_mode mode);
void default_get_overlay_fifo_thresholds(enum omap_plane plane,
- u32 fifo_size, enum omap_burst_size *burst_size,
+ u32 fifo_size, u32 burst_size,
u32 *fifo_low, u32 *fifo_high);
/* manager */
@@ -220,13 +206,12 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
int dss_init_platform_driver(void);
void dss_uninit_platform_driver(void);
+int dss_runtime_get(void);
+void dss_runtime_put(void);
+
+struct clk *dss_get_ick(void);
+
void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
-void dss_save_context(void);
-void dss_restore_context(void);
-void dss_clk_enable(enum dss_clock clks);
-void dss_clk_disable(enum dss_clock clks);
-unsigned long dss_clk_get_rate(enum dss_clock clk);
-int dss_need_ctx_restore(void);
const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
void dss_dump_clocks(struct seq_file *s);
@@ -283,15 +268,15 @@ struct file_operations;
int dsi_init_platform_driver(void);
void dsi_uninit_platform_driver(void);
+int dsi_runtime_get(struct platform_device *dsidev);
+void dsi_runtime_put(struct platform_device *dsidev);
+
void dsi_dump_clocks(struct seq_file *s);
void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
const struct file_operations *debug_fops);
void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
const struct file_operations *debug_fops);
-void dsi_save_context(void);
-void dsi_restore_context(void);
-
int dsi_init_display(struct omap_dss_device *display);
void dsi_irq_handler(void);
unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev);
@@ -304,7 +289,7 @@ int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
bool enable_hsdiv);
void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes);
void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
- u32 fifo_size, enum omap_burst_size *burst_size,
+ u32 fifo_size, u32 burst_size,
u32 *fifo_low, u32 *fifo_high);
void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev);
void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev);
@@ -317,6 +302,13 @@ static inline int dsi_init_platform_driver(void)
static inline void dsi_uninit_platform_driver(void)
{
}
+static inline int dsi_runtime_get(struct platform_device *dsidev)
+{
+ return 0;
+}
+static inline void dsi_runtime_put(struct platform_device *dsidev)
+{
+}
static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
{
WARN("%s: DSI not compiled in, returning rate as 0\n", __func__);
@@ -384,8 +376,8 @@ void dispc_dump_regs(struct seq_file *s);
void dispc_irq_handler(void);
void dispc_fake_vsync_irq(void);
-void dispc_save_context(void);
-void dispc_restore_context(void);
+int dispc_runtime_get(void);
+void dispc_runtime_put(void);
void dispc_enable_sidle(void);
void dispc_disable_sidle(void);
@@ -398,10 +390,12 @@ void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable);
void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height);
void dispc_set_digit_size(u16 width, u16 height);
u32 dispc_get_plane_fifo_size(enum omap_plane plane);
-void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high);
+void dispc_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high);
void dispc_enable_fifomerge(bool enable);
-void dispc_set_burst_size(enum omap_plane plane,
- enum omap_burst_size burst_size);
+u32 dispc_get_burst_size(enum omap_plane plane);
+void dispc_enable_cpr(enum omap_channel channel, bool enable);
+void dispc_set_cpr_coef(enum omap_channel channel,
+ struct omap_dss_cpr_coefs *coefs);
void dispc_set_plane_ba0(enum omap_plane plane, u32 paddr);
void dispc_set_plane_ba1(enum omap_plane plane, u32 paddr);
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index 1c18888..b415c4e 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -49,6 +49,9 @@ struct omap_dss_features {
const enum omap_color_mode *supported_color_modes;
const char * const *clksrc_names;
const struct dss_param_range *dss_params;
+
+ const u32 buffer_size_unit;
+ const u32 burst_size_unit;
};
/* This struct is assigned to one of the below during initialization */
@@ -274,6 +277,8 @@ static const struct omap_dss_features omap2_dss_features = {
.supported_color_modes = omap2_dss_supported_color_modes,
.clksrc_names = omap2_dss_clk_source_names,
.dss_params = omap2_dss_param_range,
+ .buffer_size_unit = 1,
+ .burst_size_unit = 8,
};
/* OMAP3 DSS Features */
@@ -286,7 +291,9 @@ static const struct omap_dss_features omap3430_dss_features = {
FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
FEAT_FUNCGATED | FEAT_ROWREPEATENABLE |
FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF |
- FEAT_DSI_PLL_FREQSEL | FEAT_DSI_REVERSE_TXCLKESC,
+ FEAT_DSI_PLL_FREQSEL | FEAT_DSI_REVERSE_TXCLKESC |
+ FEAT_VENC_REQUIRES_TV_DAC_CLK | FEAT_CPR | FEAT_PRELOAD |
+ FEAT_FIR_COEF_V,
.num_mgrs = 2,
.num_ovls = 3,
@@ -294,6 +301,8 @@ static const struct omap_dss_features omap3430_dss_features = {
.supported_color_modes = omap3_dss_supported_color_modes,
.clksrc_names = omap3_dss_clk_source_names,
.dss_params = omap3_dss_param_range,
+ .buffer_size_unit = 1,
+ .burst_size_unit = 8,
};
static const struct omap_dss_features omap3630_dss_features = {
@@ -306,7 +315,8 @@ static const struct omap_dss_features omap3630_dss_features = {
FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED |
FEAT_ROWREPEATENABLE | FEAT_LINEBUFFERSPLIT |
FEAT_RESIZECONF | FEAT_DSI_PLL_PWR_BUG |
- FEAT_DSI_PLL_FREQSEL,
+ FEAT_DSI_PLL_FREQSEL | FEAT_CPR | FEAT_PRELOAD |
+ FEAT_FIR_COEF_V,
.num_mgrs = 2,
.num_ovls = 3,
@@ -314,6 +324,8 @@ static const struct omap_dss_features omap3630_dss_features = {
.supported_color_modes = omap3_dss_supported_color_modes,
.clksrc_names = omap3_dss_clk_source_names,
.dss_params = omap3_dss_param_range,
+ .buffer_size_unit = 1,
+ .burst_size_unit = 8,
};
/* OMAP4 DSS Features */
@@ -327,7 +339,8 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = {
FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 |
FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC |
FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH |
- FEAT_DSI_GNQ | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2,
+ FEAT_DSI_GNQ | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2 |
+ FEAT_CPR | FEAT_PRELOAD | FEAT_FIR_COEF_V,
.num_mgrs = 3,
.num_ovls = 3,
@@ -335,6 +348,8 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = {
.supported_color_modes = omap4_dss_supported_color_modes,
.clksrc_names = omap4_dss_clk_source_names,
.dss_params = omap4_dss_param_range,
+ .buffer_size_unit = 16,
+ .burst_size_unit = 16,
};
/* For all the other OMAP4 versions */
@@ -348,7 +363,8 @@ static const struct omap_dss_features omap4_dss_features = {
FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC |
FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH |
FEAT_DSI_GNQ | FEAT_HDMI_CTS_SWMODE |
- FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2,
+ FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2 | FEAT_CPR |
+ FEAT_PRELOAD | FEAT_FIR_COEF_V,
.num_mgrs = 3,
.num_ovls = 3,
@@ -356,6 +372,8 @@ static const struct omap_dss_features omap4_dss_features = {
.supported_color_modes = omap4_dss_supported_color_modes,
.clksrc_names = omap4_dss_clk_source_names,
.dss_params = omap4_dss_param_range,
+ .buffer_size_unit = 16,
+ .burst_size_unit = 16,
};
/* Functions returning values related to a DSS feature */
@@ -401,6 +419,16 @@ const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id)
return omap_current_dss_features->clksrc_names[id];
}
+u32 dss_feat_get_buffer_size_unit(void)
+{
+ return omap_current_dss_features->buffer_size_unit;
+}
+
+u32 dss_feat_get_burst_size_unit(void)
+{
+ return omap_current_dss_features->burst_size_unit;
+}
+
/* DSS has_feature check */
bool dss_has_feature(enum dss_feat_id id)
{
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index 07b346f..b7398cb 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -51,6 +51,10 @@ enum dss_feat_id {
FEAT_HDMI_CTS_SWMODE = 1 << 19,
FEAT_HANDLE_UV_SEPARATE = 1 << 20,
FEAT_ATTR2 = 1 << 21,
+ FEAT_VENC_REQUIRES_TV_DAC_CLK = 1 << 22,
+ FEAT_CPR = 1 << 23,
+ FEAT_PRELOAD = 1 << 24,
+ FEAT_FIR_COEF_V = 1 << 25,
};
/* DSS register field id */
@@ -90,6 +94,9 @@ bool dss_feat_color_mode_supported(enum omap_plane plane,
enum omap_color_mode color_mode);
const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id);
+u32 dss_feat_get_buffer_size_unit(void); /* in bytes */
+u32 dss_feat_get_burst_size_unit(void); /* in bytes */
+
bool dss_has_feature(enum dss_feat_id id);
void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
void dss_features_init(void);
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index b0555f4..256f27a 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -29,6 +29,9 @@
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
#include <video/omapdss.h>
#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
@@ -51,6 +54,9 @@ static struct {
u8 edid_set;
bool custom_set;
struct hdmi_config cfg;
+
+ struct clk *sys_clk;
+ struct clk *hdmi_clk;
} hdmi;
/*
@@ -162,6 +168,27 @@ static inline int hdmi_wait_for_bit_change(const struct hdmi_reg idx,
return val;
}
+static int hdmi_runtime_get(void)
+{
+ int r;
+
+ DSSDBG("hdmi_runtime_get\n");
+
+ r = pm_runtime_get_sync(&hdmi.pdev->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
+}
+
+static void hdmi_runtime_put(void)
+{
+ int r;
+
+ DSSDBG("hdmi_runtime_put\n");
+
+ r = pm_runtime_put(&hdmi.pdev->dev);
+ WARN_ON(r < 0);
+}
+
int hdmi_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("init_display\n");
@@ -311,30 +338,11 @@ static int hdmi_phy_init(void)
return 0;
}
-static int hdmi_wait_softreset(void)
-{
- /* reset W1 */
- REG_FLD_MOD(HDMI_WP_SYSCONFIG, 0x1, 0, 0);
-
- /* wait till SOFTRESET == 0 */
- if (hdmi_wait_for_bit_change(HDMI_WP_SYSCONFIG, 0, 0, 0) != 0) {
- DSSERR("sysconfig reset failed\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
static int hdmi_pll_program(struct hdmi_pll_info *fmt)
{
u16 r = 0;
enum hdmi_clk_refsel refsel;
- /* wait for wrapper reset */
- r = hdmi_wait_softreset();
- if (r)
- return r;
-
r = hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF);
if (r)
return r;
@@ -1064,7 +1072,7 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
unsigned long clkin, refclk;
u32 mf;
- clkin = dss_clk_get_rate(DSS_CLK_SYSCK) / 10000;
+ clkin = clk_get_rate(hdmi.sys_clk) / 10000;
/*
* Input clock is predivided by N + 1
* out put of which is reference clk
@@ -1098,16 +1106,6 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
}
-static void hdmi_enable_clocks(int enable)
-{
- if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK |
- DSS_CLK_SYSCK | DSS_CLK_VIDFCK);
- else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK |
- DSS_CLK_SYSCK | DSS_CLK_VIDFCK);
-}
-
static int hdmi_power_on(struct omap_dss_device *dssdev)
{
int r, code = 0;
@@ -1115,7 +1113,9 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
struct omap_video_timings *p;
unsigned long phy;
- hdmi_enable_clocks(1);
+ r = hdmi_runtime_get();
+ if (r)
+ return r;
dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, 0);
@@ -1180,7 +1180,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
return 0;
err:
- hdmi_enable_clocks(0);
+ hdmi_runtime_put();
return -EIO;
}
@@ -1191,7 +1191,7 @@ static void hdmi_power_off(struct omap_dss_device *dssdev)
hdmi_wp_video_start(0);
hdmi_phy_off();
hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF);
- hdmi_enable_clocks(0);
+ hdmi_runtime_put();
hdmi.edid_set = 0;
}
@@ -1686,14 +1686,43 @@ static struct snd_soc_dai_driver hdmi_codec_dai_drv = {
};
#endif
+static int hdmi_get_clocks(struct platform_device *pdev)
+{
+ struct clk *clk;
+
+ clk = clk_get(&pdev->dev, "sys_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get sys_clk\n");
+ return PTR_ERR(clk);
+ }
+
+ hdmi.sys_clk = clk;
+
+ clk = clk_get(&pdev->dev, "dss_48mhz_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get hdmi_clk\n");
+ clk_put(hdmi.sys_clk);
+ return PTR_ERR(clk);
+ }
+
+ hdmi.hdmi_clk = clk;
+
+ return 0;
+}
+
+static void hdmi_put_clocks(void)
+{
+ if (hdmi.sys_clk)
+ clk_put(hdmi.sys_clk);
+ if (hdmi.hdmi_clk)
+ clk_put(hdmi.hdmi_clk);
+}
+
/* HDMI HW IP initialisation */
static int omapdss_hdmihw_probe(struct platform_device *pdev)
{
struct resource *hdmi_mem;
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
- int ret;
-#endif
+ int r;
hdmi.pdata = pdev->dev.platform_data;
hdmi.pdev = pdev;
@@ -1713,17 +1742,25 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ r = hdmi_get_clocks(pdev);
+ if (r) {
+ iounmap(hdmi.base_wp);
+ return r;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
hdmi_panel_init();
#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
/* Register ASoC codec DAI */
- ret = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv,
+ r = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv,
&hdmi_codec_dai_drv, 1);
- if (ret) {
+ if (r) {
DSSERR("can't register ASoC HDMI audio codec\n");
- return ret;
+ return r;
}
#endif
return 0;
@@ -1738,17 +1775,62 @@ static int omapdss_hdmihw_remove(struct platform_device *pdev)
snd_soc_unregister_codec(&pdev->dev);
#endif
+ pm_runtime_disable(&pdev->dev);
+
+ hdmi_put_clocks();
+
iounmap(hdmi.base_wp);
return 0;
}
+static int hdmi_runtime_suspend(struct device *dev)
+{
+ clk_disable(hdmi.hdmi_clk);
+ clk_disable(hdmi.sys_clk);
+
+ dispc_runtime_put();
+ dss_runtime_put();
+
+ return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+ int r;
+
+ r = dss_runtime_get();
+ if (r < 0)
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r < 0)
+ goto err_get_dispc;
+
+
+ clk_enable(hdmi.sys_clk);
+ clk_enable(hdmi.hdmi_clk);
+
+ return 0;
+
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
+ return r;
+}
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+ .runtime_suspend = hdmi_runtime_suspend,
+ .runtime_resume = hdmi_runtime_resume,
+};
+
static struct platform_driver omapdss_hdmihw_driver = {
.probe = omapdss_hdmihw_probe,
.remove = omapdss_hdmihw_remove,
.driver = {
.name = "omapdss_hdmi",
.owner = THIS_MODULE,
+ .pm = &hdmi_pm_ops,
},
};
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 9aeea50..13d72d5 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -275,6 +275,108 @@ static ssize_t manager_alpha_blending_enabled_store(
return size;
}
+static ssize_t manager_cpr_enable_show(struct omap_overlay_manager *mgr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.cpr_enable);
+}
+
+static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr,
+ const char *buf, size_t size)
+{
+ struct omap_overlay_manager_info info;
+ int v;
+ int r;
+ bool enable;
+
+ if (!dss_has_feature(FEAT_CPR))
+ return -ENODEV;
+
+ r = kstrtoint(buf, 0, &v);
+ if (r)
+ return r;
+
+ enable = !!v;
+
+ mgr->get_manager_info(mgr, &info);
+
+ if (info.cpr_enable == enable)
+ return size;
+
+ info.cpr_enable = enable;
+
+ r = mgr->set_manager_info(mgr, &info);
+ if (r)
+ return r;
+
+ r = mgr->apply(mgr);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static ssize_t manager_cpr_coef_show(struct omap_overlay_manager *mgr,
+ char *buf)
+{
+ struct omap_overlay_manager_info info;
+
+ mgr->get_manager_info(mgr, &info);
+
+ return snprintf(buf, PAGE_SIZE,
+ "%d %d %d %d %d %d %d %d %d\n",
+ info.cpr_coefs.rr,
+ info.cpr_coefs.rg,
+ info.cpr_coefs.rb,
+ info.cpr_coefs.gr,
+ info.cpr_coefs.gg,
+ info.cpr_coefs.gb,
+ info.cpr_coefs.br,
+ info.cpr_coefs.bg,
+ info.cpr_coefs.bb);
+}
+
+static ssize_t manager_cpr_coef_store(struct omap_overlay_manager *mgr,
+ const char *buf, size_t size)
+{
+ struct omap_overlay_manager_info info;
+ struct omap_dss_cpr_coefs coefs;
+ int r, i;
+ s16 *arr;
+
+ if (!dss_has_feature(FEAT_CPR))
+ return -ENODEV;
+
+ if (sscanf(buf, "%hd %hd %hd %hd %hd %hd %hd %hd %hd",
+ &coefs.rr, &coefs.rg, &coefs.rb,
+ &coefs.gr, &coefs.gg, &coefs.gb,
+ &coefs.br, &coefs.bg, &coefs.bb) != 9)
+ return -EINVAL;
+
+ arr = (s16[]){ coefs.rr, coefs.rg, coefs.rb,
+ coefs.gr, coefs.gg, coefs.gb,
+ coefs.br, coefs.bg, coefs.bb };
+
+ for (i = 0; i < 9; ++i) {
+ if (arr[i] < -512 || arr[i] > 511)
+ return -EINVAL;
+ }
+
+ mgr->get_manager_info(mgr, &info);
+
+ info.cpr_coefs = coefs;
+
+ r = mgr->set_manager_info(mgr, &info);
+ if (r)
+ return r;
+
+ r = mgr->apply(mgr);
+ if (r)
+ return r;
+
+ return size;
+}
+
struct manager_attribute {
struct attribute attr;
ssize_t (*show)(struct omap_overlay_manager *, char *);
@@ -300,6 +402,12 @@ static MANAGER_ATTR(trans_key_enabled, S_IRUGO|S_IWUSR,
static MANAGER_ATTR(alpha_blending_enabled, S_IRUGO|S_IWUSR,
manager_alpha_blending_enabled_show,
manager_alpha_blending_enabled_store);
+static MANAGER_ATTR(cpr_enable, S_IRUGO|S_IWUSR,
+ manager_cpr_enable_show,
+ manager_cpr_enable_store);
+static MANAGER_ATTR(cpr_coef, S_IRUGO|S_IWUSR,
+ manager_cpr_coef_show,
+ manager_cpr_coef_store);
static struct attribute *manager_sysfs_attrs[] = {
@@ -310,6 +418,8 @@ static struct attribute *manager_sysfs_attrs[] = {
&manager_attr_trans_key_value.attr,
&manager_attr_trans_key_enabled.attr,
&manager_attr_alpha_blending_enabled.attr,
+ &manager_attr_cpr_enable.attr,
+ &manager_attr_cpr_coef.attr,
NULL
};
@@ -391,33 +501,14 @@ struct overlay_cache_data {
bool enabled;
- u32 paddr;
- void __iomem *vaddr;
- u32 p_uv_addr; /* relevant for NV12 format only */
- u16 screen_width;
- u16 width;
- u16 height;
- enum omap_color_mode color_mode;
- u8 rotation;
- enum omap_dss_rotation_type rotation_type;
- bool mirror;
-
- u16 pos_x;
- u16 pos_y;
- u16 out_width; /* if 0, out_width == width */
- u16 out_height; /* if 0, out_height == height */
- u8 global_alpha;
- u8 pre_mult_alpha;
+ struct omap_overlay_info info;
enum omap_channel channel;
bool replication;
bool ilace;
- enum omap_burst_size burst_size;
u32 fifo_low;
u32 fifo_high;
-
- bool manual_update;
};
struct manager_cache_data {
@@ -429,15 +520,8 @@ struct manager_cache_data {
* VSYNC/EVSYNC */
bool shadow_dirty;
- u32 default_color;
-
- enum omap_dss_trans_key_type trans_key_type;
- u32 trans_key;
- bool trans_enabled;
-
- bool alpha_enabled;
+ struct omap_overlay_manager_info info;
- bool manual_upd_display;
bool manual_update;
bool do_manual_update;
@@ -539,24 +623,15 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return 0;
+ if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
+ return 0;
+
if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
|| dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
} else {
- if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
- enum omap_dss_update_mode mode;
- mode = dssdev->driver->get_update_mode(dssdev);
- if (mode != OMAP_DSS_UPDATE_AUTO)
- return 0;
-
- irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
- DISPC_IRQ_FRAMEDONE
- : DISPC_IRQ_FRAMEDONE2;
- } else {
- irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
- DISPC_IRQ_VSYNC
- : DISPC_IRQ_VSYNC2;
- }
+ irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
+ DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2;
}
mc = &dss_cache.manager_cache[mgr->id];
@@ -617,24 +692,15 @@ int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return 0;
+ if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
+ return 0;
+
if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
|| dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
} else {
- if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
- enum omap_dss_update_mode mode;
- mode = dssdev->driver->get_update_mode(dssdev);
- if (mode != OMAP_DSS_UPDATE_AUTO)
- return 0;
-
- irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
- DISPC_IRQ_FRAMEDONE
- : DISPC_IRQ_FRAMEDONE2;
- } else {
- irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
- DISPC_IRQ_VSYNC
- : DISPC_IRQ_VSYNC2;
- }
+ irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
+ DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2;
}
oc = &dss_cache.overlay_cache[ovl->id];
@@ -720,10 +786,12 @@ static bool rectangle_intersects(int x1, int y1, int w1, int h1,
static bool dispc_is_overlay_scaled(struct overlay_cache_data *oc)
{
- if (oc->out_width != 0 && oc->width != oc->out_width)
+ struct omap_overlay_info *oi = &oc->info;
+
+ if (oi->out_width != 0 && oi->width != oi->out_width)
return true;
- if (oc->out_height != 0 && oc->height != oc->out_height)
+ if (oi->out_height != 0 && oi->height != oi->out_height)
return true;
return false;
@@ -733,6 +801,8 @@ static int configure_overlay(enum omap_plane plane)
{
struct overlay_cache_data *c;
struct manager_cache_data *mc;
+ struct omap_overlay_info *oi;
+ struct omap_overlay_manager_info *mi;
u16 outw, outh;
u16 x, y, w, h;
u32 paddr;
@@ -742,6 +812,7 @@ static int configure_overlay(enum omap_plane plane)
DSSDBGF("%d", plane);
c = &dss_cache.overlay_cache[plane];
+ oi = &c->info;
if (!c->enabled) {
dispc_enable_plane(plane, 0);
@@ -749,21 +820,22 @@ static int configure_overlay(enum omap_plane plane)
}
mc = &dss_cache.manager_cache[c->channel];
+ mi = &mc->info;
- x = c->pos_x;
- y = c->pos_y;
- w = c->width;
- h = c->height;
- outw = c->out_width == 0 ? c->width : c->out_width;
- outh = c->out_height == 0 ? c->height : c->out_height;
- paddr = c->paddr;
+ x = oi->pos_x;
+ y = oi->pos_y;
+ w = oi->width;
+ h = oi->height;
+ outw = oi->out_width == 0 ? oi->width : oi->out_width;
+ outh = oi->out_height == 0 ? oi->height : oi->out_height;
+ paddr = oi->paddr;
orig_w = w;
orig_h = h;
orig_outw = outw;
orig_outh = outh;
- if (c->manual_update && mc->do_manual_update) {
+ if (mc->manual_update && mc->do_manual_update) {
unsigned bpp;
unsigned scale_x_m = w, scale_x_d = outw;
unsigned scale_y_m = h, scale_y_d = outh;
@@ -775,7 +847,7 @@ static int configure_overlay(enum omap_plane plane)
return 0;
}
- switch (c->color_mode) {
+ switch (oi->color_mode) {
case OMAP_DSS_COLOR_NV12:
bpp = 8;
break;
@@ -805,23 +877,23 @@ static int configure_overlay(enum omap_plane plane)
BUG();
}
- if (mc->x > c->pos_x) {
+ if (mc->x > oi->pos_x) {
x = 0;
- outw -= (mc->x - c->pos_x);
- paddr += (mc->x - c->pos_x) *
+ outw -= (mc->x - oi->pos_x);
+ paddr += (mc->x - oi->pos_x) *
scale_x_m / scale_x_d * bpp / 8;
} else {
- x = c->pos_x - mc->x;
+ x = oi->pos_x - mc->x;
}
- if (mc->y > c->pos_y) {
+ if (mc->y > oi->pos_y) {
y = 0;
- outh -= (mc->y - c->pos_y);
- paddr += (mc->y - c->pos_y) *
+ outh -= (mc->y - oi->pos_y);
+ paddr += (mc->y - oi->pos_y) *
scale_y_m / scale_y_d *
- c->screen_width * bpp / 8;
+ oi->screen_width * bpp / 8;
} else {
- y = c->pos_y - mc->y;
+ y = oi->pos_y - mc->y;
}
if (mc->w < (x + outw))
@@ -840,8 +912,8 @@ static int configure_overlay(enum omap_plane plane)
* the width if the original width was bigger.
*/
if ((w & 1) &&
- (c->color_mode == OMAP_DSS_COLOR_YUV2 ||
- c->color_mode == OMAP_DSS_COLOR_UYVY)) {
+ (oi->color_mode == OMAP_DSS_COLOR_YUV2 ||
+ oi->color_mode == OMAP_DSS_COLOR_UYVY)) {
if (orig_w > w)
w += 1;
else
@@ -851,19 +923,19 @@ static int configure_overlay(enum omap_plane plane)
r = dispc_setup_plane(plane,
paddr,
- c->screen_width,
+ oi->screen_width,
x, y,
w, h,
outw, outh,
- c->color_mode,
+ oi->color_mode,
c->ilace,
- c->rotation_type,
- c->rotation,
- c->mirror,
- c->global_alpha,
- c->pre_mult_alpha,
+ oi->rotation_type,
+ oi->rotation,
+ oi->mirror,
+ oi->global_alpha,
+ oi->pre_mult_alpha,
c->channel,
- c->p_uv_addr);
+ oi->p_uv_addr);
if (r) {
/* this shouldn't happen */
@@ -874,8 +946,7 @@ static int configure_overlay(enum omap_plane plane)
dispc_enable_replication(plane, c->replication);
- dispc_set_burst_size(plane, c->burst_size);
- dispc_setup_plane_fifo(plane, c->fifo_low, c->fifo_high);
+ dispc_set_fifo_threshold(plane, c->fifo_low, c->fifo_high);
dispc_enable_plane(plane, 1);
@@ -884,16 +955,21 @@ static int configure_overlay(enum omap_plane plane)
static void configure_manager(enum omap_channel channel)
{
- struct manager_cache_data *c;
+ struct omap_overlay_manager_info *mi;
DSSDBGF("%d", channel);
- c = &dss_cache.manager_cache[channel];
+ /* picking info from the cache */
+ mi = &dss_cache.manager_cache[channel].info;
- dispc_set_default_color(channel, c->default_color);
- dispc_set_trans_key(channel, c->trans_key_type, c->trans_key);
- dispc_enable_trans_key(channel, c->trans_enabled);
- dispc_enable_alpha_blending(channel, c->alpha_enabled);
+ dispc_set_default_color(channel, mi->default_color);
+ dispc_set_trans_key(channel, mi->trans_key_type, mi->trans_key);
+ dispc_enable_trans_key(channel, mi->trans_enabled);
+ dispc_enable_alpha_blending(channel, mi->alpha_enabled);
+ if (dss_has_feature(FEAT_CPR)) {
+ dispc_enable_cpr(channel, mi->cpr_enable);
+ dispc_set_cpr_coef(channel, &mi->cpr_coefs);
+ }
}
/* configure_dispc() tries to write values from cache to shadow registers.
@@ -928,7 +1004,7 @@ static int configure_dispc(void)
if (!oc->dirty)
continue;
- if (oc->manual_update && !mc->do_manual_update)
+ if (mc->manual_update && !mc->do_manual_update)
continue;
if (mgr_busy[oc->channel]) {
@@ -976,7 +1052,7 @@ static int configure_dispc(void)
/* We don't need GO with manual update display. LCD iface will
* always be turned off after frame, and new settings will be
* taken in to use at next update */
- if (!mc->manual_upd_display)
+ if (!mc->manual_update)
dispc_go(i);
}
@@ -1011,6 +1087,7 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
{
struct overlay_cache_data *oc;
struct manager_cache_data *mc;
+ struct omap_overlay_info *oi;
const int num_ovls = dss_feat_get_num_ovls();
struct omap_overlay_manager *mgr;
int i;
@@ -1053,6 +1130,7 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
unsigned outw, outh;
oc = &dss_cache.overlay_cache[i];
+ oi = &oc->info;
if (oc->channel != mgr->id)
continue;
@@ -1068,39 +1146,39 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
if (!dispc_is_overlay_scaled(oc))
continue;
- outw = oc->out_width == 0 ?
- oc->width : oc->out_width;
- outh = oc->out_height == 0 ?
- oc->height : oc->out_height;
+ outw = oi->out_width == 0 ?
+ oi->width : oi->out_width;
+ outh = oi->out_height == 0 ?
+ oi->height : oi->out_height;
/* is the overlay outside the update region? */
if (!rectangle_intersects(x, y, w, h,
- oc->pos_x, oc->pos_y,
+ oi->pos_x, oi->pos_y,
outw, outh))
continue;
/* if the overlay totally inside the update region? */
- if (rectangle_subset(oc->pos_x, oc->pos_y, outw, outh,
+ if (rectangle_subset(oi->pos_x, oi->pos_y, outw, outh,
x, y, w, h))
continue;
- if (x > oc->pos_x)
- x1 = oc->pos_x;
+ if (x > oi->pos_x)
+ x1 = oi->pos_x;
else
x1 = x;
- if (y > oc->pos_y)
- y1 = oc->pos_y;
+ if (y > oi->pos_y)
+ y1 = oi->pos_y;
else
y1 = y;
- if ((x + w) < (oc->pos_x + outw))
- x2 = oc->pos_x + outw;
+ if ((x + w) < (oi->pos_x + outw))
+ x2 = oi->pos_x + outw;
else
x2 = x + w;
- if ((y + h) < (oc->pos_y + outh))
- y2 = oc->pos_y + outh;
+ if ((y + h) < (oi->pos_y + outh))
+ y2 = oi->pos_y + outh;
else
y2 = y + h;
@@ -1236,6 +1314,10 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
+ r = dispc_runtime_get();
+ if (r)
+ return r;
+
spin_lock_irqsave(&dss_cache.lock, flags);
/* Configure overlays */
@@ -1275,23 +1357,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
ovl->info_dirty = false;
oc->dirty = true;
-
- oc->paddr = ovl->info.paddr;
- oc->vaddr = ovl->info.vaddr;
- oc->p_uv_addr = ovl->info.p_uv_addr;
- oc->screen_width = ovl->info.screen_width;
- oc->width = ovl->info.width;
- oc->height = ovl->info.height;
- oc->color_mode = ovl->info.color_mode;
- oc->rotation = ovl->info.rotation;
- oc->rotation_type = ovl->info.rotation_type;
- oc->mirror = ovl->info.mirror;
- oc->pos_x = ovl->info.pos_x;
- oc->pos_y = ovl->info.pos_y;
- oc->out_width = ovl->info.out_width;
- oc->out_height = ovl->info.out_height;
- oc->global_alpha = ovl->info.global_alpha;
- oc->pre_mult_alpha = ovl->info.pre_mult_alpha;
+ oc->info = ovl->info;
oc->replication =
dss_use_replication(dssdev, ovl->info.color_mode);
@@ -1302,11 +1368,6 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
oc->enabled = true;
- oc->manual_update =
- dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE &&
- dssdev->driver->get_update_mode(dssdev) !=
- OMAP_DSS_UPDATE_AUTO;
-
++num_planes_enabled;
}
@@ -1334,20 +1395,10 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
mgr->info_dirty = false;
mc->dirty = true;
-
- mc->default_color = mgr->info.default_color;
- mc->trans_key_type = mgr->info.trans_key_type;
- mc->trans_key = mgr->info.trans_key;
- mc->trans_enabled = mgr->info.trans_enabled;
- mc->alpha_enabled = mgr->info.alpha_enabled;
-
- mc->manual_upd_display =
- dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
+ mc->info = mgr->info;
mc->manual_update =
- dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE &&
- dssdev->driver->get_update_mode(dssdev) !=
- OMAP_DSS_UPDATE_AUTO;
+ dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
}
/* XXX TODO: Try to get fifomerge working. The problem is that it
@@ -1368,7 +1419,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
/* Configure overlay fifos */
for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
struct omap_dss_device *dssdev;
- u32 size;
+ u32 size, burst_size;
ovl = omap_dss_get_overlay(i);
@@ -1386,6 +1437,8 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
if (use_fifomerge)
size *= 3;
+ burst_size = dispc_get_burst_size(ovl->id);
+
switch (dssdev->type) {
case OMAP_DISPLAY_TYPE_DPI:
case OMAP_DISPLAY_TYPE_DBI:
@@ -1393,13 +1446,13 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
case OMAP_DISPLAY_TYPE_VENC:
case OMAP_DISPLAY_TYPE_HDMI:
default_get_overlay_fifo_thresholds(ovl->id, size,
- &oc->burst_size, &oc->fifo_low,
+ burst_size, &oc->fifo_low,
&oc->fifo_high);
break;
#ifdef CONFIG_OMAP2_DSS_DSI
case OMAP_DISPLAY_TYPE_DSI:
dsi_get_overlay_fifo_thresholds(ovl->id, size,
- &oc->burst_size, &oc->fifo_low,
+ burst_size, &oc->fifo_low,
&oc->fifo_high);
break;
#endif
@@ -1409,7 +1462,6 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
}
r = 0;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
if (!dss_cache.irq_enabled) {
u32 mask;
@@ -1422,10 +1474,11 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
dss_cache.irq_enabled = true;
}
configure_dispc();
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
spin_unlock_irqrestore(&dss_cache.lock, flags);
+ dispc_runtime_put();
+
return r;
}
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 0f08025..c84380c 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -84,32 +84,42 @@ static ssize_t overlay_manager_store(struct omap_overlay *ovl, const char *buf,
old_mgr = ovl->manager;
+ r = dispc_runtime_get();
+ if (r)
+ return r;
+
/* detach old manager */
if (old_mgr) {
r = ovl->unset_manager(ovl);
if (r) {
DSSERR("detach failed\n");
- return r;
+ goto err;
}
r = old_mgr->apply(old_mgr);
if (r)
- return r;
+ goto err;
}
if (mgr) {
r = ovl->set_manager(ovl, mgr);
if (r) {
DSSERR("Failed to attach overlay\n");
- return r;
+ goto err;
}
r = mgr->apply(mgr);
if (r)
- return r;
+ goto err;
}
+ dispc_runtime_put();
+
return size;
+
+err:
+ dispc_runtime_put();
+ return r;
}
static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf)
@@ -238,6 +248,9 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
u8 alpha;
struct omap_overlay_info info;
+ if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
+ return -ENODEV;
+
r = kstrtou8(buf, 0, &alpha);
if (r)
return r;
@@ -504,7 +517,6 @@ static int omap_dss_set_manager(struct omap_overlay *ovl,
ovl->manager = mgr;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
/* XXX: When there is an overlay on a DSI manual update display, and
* the overlay is first disabled, then moved to tv, and enabled, we
* seem to get SYNC_LOST_DIGIT error.
@@ -518,7 +530,6 @@ static int omap_dss_set_manager(struct omap_overlay *ovl,
* the overlay, but before moving the overlay to TV.
*/
dispc_set_channel_out(ovl->id, mgr->id);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
return 0;
}
@@ -719,6 +730,8 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
}
if (mgr) {
+ dispc_runtime_get();
+
for (i = 0; i < dss_feat_get_num_ovls(); i++) {
struct omap_overlay *ovl;
ovl = omap_dss_get_overlay(i);
@@ -728,6 +741,8 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
omap_dss_set_manager(ovl, mgr);
}
}
+
+ dispc_runtime_put();
}
}
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index c06fbe0..39f4c59 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -33,6 +33,8 @@
#include <linux/hrtimer.h>
#include <linux/seq_file.h>
#include <linux/semaphore.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <video/omapdss.h>
#include "dss.h"
@@ -120,12 +122,25 @@ static inline u32 rfbi_read_reg(const struct rfbi_reg idx)
return __raw_readl(rfbi.base + idx.idx);
}
-static void rfbi_enable_clocks(bool enable)
+static int rfbi_runtime_get(void)
{
- if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
- else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ int r;
+
+ DSSDBG("rfbi_runtime_get\n");
+
+ r = pm_runtime_get_sync(&rfbi.pdev->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
+}
+
+static void rfbi_runtime_put(void)
+{
+ int r;
+
+ DSSDBG("rfbi_runtime_put\n");
+
+ r = pm_runtime_put(&rfbi.pdev->dev);
+ WARN_ON(r < 0);
}
void rfbi_bus_lock(void)
@@ -805,7 +820,8 @@ void rfbi_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ if (rfbi_runtime_get())
+ return;
DUMPREG(RFBI_REVISION);
DUMPREG(RFBI_SYSCONFIG);
@@ -836,7 +852,7 @@ void rfbi_dump_regs(struct seq_file *s)
DUMPREG(RFBI_VSYNC_WIDTH);
DUMPREG(RFBI_HSYNC_WIDTH);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ rfbi_runtime_put();
#undef DUMPREG
}
@@ -844,7 +860,9 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
{
int r;
- rfbi_enable_clocks(1);
+ r = rfbi_runtime_get();
+ if (r)
+ return r;
r = omap_dss_start_device(dssdev);
if (r) {
@@ -879,6 +897,7 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
err1:
omap_dss_stop_device(dssdev);
err0:
+ rfbi_runtime_put();
return r;
}
EXPORT_SYMBOL(omapdss_rfbi_display_enable);
@@ -889,7 +908,7 @@ void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev)
DISPC_IRQ_FRAMEDONE);
omap_dss_stop_device(dssdev);
- rfbi_enable_clocks(0);
+ rfbi_runtime_put();
}
EXPORT_SYMBOL(omapdss_rfbi_display_disable);
@@ -904,8 +923,9 @@ int rfbi_init_display(struct omap_dss_device *dssdev)
static int omap_rfbihw_probe(struct platform_device *pdev)
{
u32 rev;
- u32 l;
struct resource *rfbi_mem;
+ struct clk *clk;
+ int r;
rfbi.pdev = pdev;
@@ -914,46 +934,102 @@ static int omap_rfbihw_probe(struct platform_device *pdev)
rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0);
if (!rfbi_mem) {
DSSERR("can't get IORESOURCE_MEM RFBI\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto err_ioremap;
}
rfbi.base = ioremap(rfbi_mem->start, resource_size(rfbi_mem));
if (!rfbi.base) {
DSSERR("can't ioremap RFBI\n");
- return -ENOMEM;
+ r = -ENOMEM;
+ goto err_ioremap;
}
- rfbi_enable_clocks(1);
+ pm_runtime_enable(&pdev->dev);
+
+ r = rfbi_runtime_get();
+ if (r)
+ goto err_get_rfbi;
msleep(10);
- rfbi.l4_khz = dss_clk_get_rate(DSS_CLK_ICK) / 1000;
+ if (cpu_is_omap24xx() || cpu_is_omap34xx() || cpu_is_omap3630())
+ clk = dss_get_ick();
+ else
+ clk = clk_get(&pdev->dev, "ick");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get ick\n");
+ r = PTR_ERR(clk);
+ goto err_get_ick;
+ }
+
+ rfbi.l4_khz = clk_get_rate(clk) / 1000;
- /* Enable autoidle and smart-idle */
- l = rfbi_read_reg(RFBI_SYSCONFIG);
- l |= (1 << 0) | (2 << 3);
- rfbi_write_reg(RFBI_SYSCONFIG, l);
+ clk_put(clk);
rev = rfbi_read_reg(RFBI_REVISION);
dev_dbg(&pdev->dev, "OMAP RFBI rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
- rfbi_enable_clocks(0);
+ rfbi_runtime_put();
return 0;
+
+err_get_ick:
+ rfbi_runtime_put();
+err_get_rfbi:
+ pm_runtime_disable(&pdev->dev);
+ iounmap(rfbi.base);
+err_ioremap:
+ return r;
}
static int omap_rfbihw_remove(struct platform_device *pdev)
{
+ pm_runtime_disable(&pdev->dev);
iounmap(rfbi.base);
return 0;
}
+static int rfbi_runtime_suspend(struct device *dev)
+{
+ dispc_runtime_put();
+ dss_runtime_put();
+
+ return 0;
+}
+
+static int rfbi_runtime_resume(struct device *dev)
+{
+ int r;
+
+ r = dss_runtime_get();
+ if (r < 0)
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r < 0)
+ goto err_get_dispc;
+
+ return 0;
+
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
+ return r;
+}
+
+static const struct dev_pm_ops rfbi_pm_ops = {
+ .runtime_suspend = rfbi_runtime_suspend,
+ .runtime_resume = rfbi_runtime_resume,
+};
+
static struct platform_driver omap_rfbihw_driver = {
.probe = omap_rfbihw_probe,
.remove = omap_rfbihw_remove,
.driver = {
.name = "omapdss_rfbi",
.owner = THIS_MODULE,
+ .pm = &rfbi_pm_ops,
},
};
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 0bd4b03..3a688c8 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -20,13 +20,11 @@
#define DSS_SUBSYS_NAME "SDI"
#include <linux/kernel.h>
-#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/regulator/consumer.h>
#include <video/omapdss.h>
-#include <plat/cpu.h>
#include "dss.h"
static struct {
@@ -60,14 +58,20 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
r = omap_dss_start_device(dssdev);
if (r) {
DSSERR("failed to start device\n");
- goto err0;
+ goto err_start_dev;
}
r = regulator_enable(sdi.vdds_sdi_reg);
if (r)
- goto err1;
+ goto err_reg_enable;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ r = dss_runtime_get();
+ if (r)
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r)
+ goto err_get_dispc;
sdi_basic_init(dssdev);
@@ -80,7 +84,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
r = dss_calc_clock_div(1, t->pixel_clock * 1000,
&dss_cinfo, &dispc_cinfo);
if (r)
- goto err2;
+ goto err_calc_clock_div;
fck = dss_cinfo.fck;
lck_div = dispc_cinfo.lck_div;
@@ -101,27 +105,34 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
r = dss_set_clock_div(&dss_cinfo);
if (r)
- goto err2;
+ goto err_set_dss_clock_div;
r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
if (r)
- goto err2;
+ goto err_set_dispc_clock_div;
dss_sdi_init(dssdev->phy.sdi.datapairs);
r = dss_sdi_enable();
if (r)
- goto err1;
+ goto err_sdi_enable;
mdelay(2);
dssdev->manager->enable(dssdev->manager);
return 0;
-err2:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+
+err_sdi_enable:
+err_set_dispc_clock_div:
+err_set_dss_clock_div:
+err_calc_clock_div:
+ dispc_runtime_put();
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
regulator_disable(sdi.vdds_sdi_reg);
-err1:
+err_reg_enable:
omap_dss_stop_device(dssdev);
-err0:
+err_start_dev:
return r;
}
EXPORT_SYMBOL(omapdss_sdi_display_enable);
@@ -132,7 +143,8 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
dss_sdi_disable();
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dispc_runtime_put();
+ dss_runtime_put();
regulator_disable(sdi.vdds_sdi_reg);
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 980f919..173c664 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -33,11 +33,13 @@
#include <linux/seq_file.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
#include <video/omapdss.h>
#include <plat/cpu.h>
#include "dss.h"
+#include "dss_features.h"
/* Venc registers */
#define VENC_REV_ID 0x00
@@ -292,6 +294,9 @@ static struct {
struct mutex venc_lock;
u32 wss_data;
struct regulator *vdda_dac_reg;
+
+ struct clk *tv_clk;
+ struct clk *tv_dac_clk;
} venc;
static inline void venc_write_reg(int idx, u32 val)
@@ -380,14 +385,25 @@ static void venc_reset(void)
#endif
}
-static void venc_enable_clocks(int enable)
+static int venc_runtime_get(void)
+{
+ int r;
+
+ DSSDBG("venc_runtime_get\n");
+
+ r = pm_runtime_get_sync(&venc.pdev->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
+}
+
+static void venc_runtime_put(void)
{
- if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_TVFCK |
- DSS_CLK_VIDFCK);
- else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_TVFCK |
- DSS_CLK_VIDFCK);
+ int r;
+
+ DSSDBG("venc_runtime_put\n");
+
+ r = pm_runtime_put(&venc.pdev->dev);
+ WARN_ON(r < 0);
}
static const struct venc_config *venc_timings_to_config(
@@ -406,8 +422,6 @@ static void venc_power_on(struct omap_dss_device *dssdev)
{
u32 l;
- venc_enable_clocks(1);
-
venc_reset();
venc_write_config(venc_timings_to_config(&dssdev->panel.timings));
@@ -448,8 +462,6 @@ static void venc_power_off(struct omap_dss_device *dssdev)
dssdev->platform_disable(dssdev);
regulator_disable(venc.vdda_dac_reg);
-
- venc_enable_clocks(0);
}
@@ -487,6 +499,10 @@ static int venc_panel_enable(struct omap_dss_device *dssdev)
goto err1;
}
+ r = venc_runtime_get();
+ if (r)
+ goto err1;
+
venc_power_on(dssdev);
venc.wss_data = 0;
@@ -520,6 +536,8 @@ static void venc_panel_disable(struct omap_dss_device *dssdev)
venc_power_off(dssdev);
+ venc_runtime_put();
+
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
omap_dss_stop_device(dssdev);
@@ -538,20 +556,6 @@ static int venc_panel_resume(struct omap_dss_device *dssdev)
return venc_panel_enable(dssdev);
}
-static enum omap_dss_update_mode venc_get_update_mode(
- struct omap_dss_device *dssdev)
-{
- return OMAP_DSS_UPDATE_AUTO;
-}
-
-static int venc_set_update_mode(struct omap_dss_device *dssdev,
- enum omap_dss_update_mode mode)
-{
- if (mode != OMAP_DSS_UPDATE_AUTO)
- return -EINVAL;
- return 0;
-}
-
static void venc_get_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
@@ -598,6 +602,7 @@ static u32 venc_get_wss(struct omap_dss_device *dssdev)
static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
{
const struct venc_config *config;
+ int r;
DSSDBG("venc_set_wss\n");
@@ -608,16 +613,19 @@ static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
/* Invert due to VENC_L21_WC_CTL:INV=1 */
venc.wss_data = (wss ^ 0xfffff) << 8;
- venc_enable_clocks(1);
+ r = venc_runtime_get();
+ if (r)
+ goto err;
venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
venc.wss_data);
- venc_enable_clocks(0);
+ venc_runtime_put();
+err:
mutex_unlock(&venc.venc_lock);
- return 0;
+ return r;
}
static struct omap_dss_driver venc_driver = {
@@ -632,9 +640,6 @@ static struct omap_dss_driver venc_driver = {
.get_resolution = omapdss_default_get_resolution,
.get_recommended_bpp = omapdss_default_get_recommended_bpp,
- .set_update_mode = venc_set_update_mode,
- .get_update_mode = venc_get_update_mode,
-
.get_timings = venc_get_timings,
.set_timings = venc_set_timings,
.check_timings = venc_check_timings,
@@ -673,7 +678,8 @@ void venc_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r))
- venc_enable_clocks(1);
+ if (venc_runtime_get())
+ return;
DUMPREG(VENC_F_CONTROL);
DUMPREG(VENC_VIDOUT_CTRL);
@@ -717,16 +723,56 @@ void venc_dump_regs(struct seq_file *s)
DUMPREG(VENC_OUTPUT_CONTROL);
DUMPREG(VENC_OUTPUT_TEST);
- venc_enable_clocks(0);
+ venc_runtime_put();
#undef DUMPREG
}
+static int venc_get_clocks(struct platform_device *pdev)
+{
+ struct clk *clk;
+
+ clk = clk_get(&pdev->dev, "fck");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get fck\n");
+ return PTR_ERR(clk);
+ }
+
+ venc.tv_clk = clk;
+
+ if (dss_has_feature(FEAT_VENC_REQUIRES_TV_DAC_CLK)) {
+ if (cpu_is_omap34xx() || cpu_is_omap3630())
+ clk = clk_get(&pdev->dev, "dss_96m_fck");
+ else
+ clk = clk_get(&pdev->dev, "tv_dac_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get tv_dac_clk\n");
+ clk_put(venc.tv_clk);
+ return PTR_ERR(clk);
+ }
+ } else {
+ clk = NULL;
+ }
+
+ venc.tv_dac_clk = clk;
+
+ return 0;
+}
+
+static void venc_put_clocks(void)
+{
+ if (venc.tv_clk)
+ clk_put(venc.tv_clk);
+ if (venc.tv_dac_clk)
+ clk_put(venc.tv_dac_clk);
+}
+
/* VENC HW IP initialisation */
static int omap_venchw_probe(struct platform_device *pdev)
{
u8 rev_id;
struct resource *venc_mem;
+ int r;
venc.pdev = pdev;
@@ -737,22 +783,40 @@ static int omap_venchw_probe(struct platform_device *pdev)
venc_mem = platform_get_resource(venc.pdev, IORESOURCE_MEM, 0);
if (!venc_mem) {
DSSERR("can't get IORESOURCE_MEM VENC\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto err_ioremap;
}
venc.base = ioremap(venc_mem->start, resource_size(venc_mem));
if (!venc.base) {
DSSERR("can't ioremap VENC\n");
- return -ENOMEM;
+ r = -ENOMEM;
+ goto err_ioremap;
}
- venc_enable_clocks(1);
+ r = venc_get_clocks(pdev);
+ if (r)
+ goto err_get_clk;
+
+ pm_runtime_enable(&pdev->dev);
+
+ r = venc_runtime_get();
+ if (r)
+ goto err_get_venc;
rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff);
dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id);
- venc_enable_clocks(0);
+ venc_runtime_put();
return omap_dss_register_driver(&venc_driver);
+
+err_get_venc:
+ pm_runtime_disable(&pdev->dev);
+ venc_put_clocks();
+err_get_clk:
+ iounmap(venc.base);
+err_ioremap:
+ return r;
}
static int omap_venchw_remove(struct platform_device *pdev)
@@ -763,16 +827,61 @@ static int omap_venchw_remove(struct platform_device *pdev)
}
omap_dss_unregister_driver(&venc_driver);
+ pm_runtime_disable(&pdev->dev);
+ venc_put_clocks();
+
iounmap(venc.base);
return 0;
}
+static int venc_runtime_suspend(struct device *dev)
+{
+ if (venc.tv_dac_clk)
+ clk_disable(venc.tv_dac_clk);
+ clk_disable(venc.tv_clk);
+
+ dispc_runtime_put();
+ dss_runtime_put();
+
+ return 0;
+}
+
+static int venc_runtime_resume(struct device *dev)
+{
+ int r;
+
+ r = dss_runtime_get();
+ if (r < 0)
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r < 0)
+ goto err_get_dispc;
+
+ clk_enable(venc.tv_clk);
+ if (venc.tv_dac_clk)
+ clk_enable(venc.tv_dac_clk);
+
+ return 0;
+
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
+ return r;
+}
+
+static const struct dev_pm_ops venc_pm_ops = {
+ .runtime_suspend = venc_runtime_suspend,
+ .runtime_resume = venc_runtime_resume,
+};
+
static struct platform_driver omap_venchw_driver = {
.probe = omap_venchw_probe,
.remove = omap_venchw_remove,
.driver = {
.name = "omapdss_venc",
.owner = THIS_MODULE,
+ .pm = &venc_pm_ops,
},
};
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index cff4503..6b1ac23 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -316,67 +316,67 @@ int omapfb_update_window(struct fb_info *fbi,
}
EXPORT_SYMBOL(omapfb_update_window);
-static int omapfb_set_update_mode(struct fb_info *fbi,
+int omapfb_set_update_mode(struct fb_info *fbi,
enum omapfb_update_mode mode)
{
struct omap_dss_device *display = fb2display(fbi);
- enum omap_dss_update_mode um;
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omapfb_display_data *d;
int r;
- if (!display || !display->driver->set_update_mode)
+ if (!display)
return -EINVAL;
- switch (mode) {
- case OMAPFB_UPDATE_DISABLED:
- um = OMAP_DSS_UPDATE_DISABLED;
- break;
+ if (mode != OMAPFB_AUTO_UPDATE && mode != OMAPFB_MANUAL_UPDATE)
+ return -EINVAL;
- case OMAPFB_AUTO_UPDATE:
- um = OMAP_DSS_UPDATE_AUTO;
- break;
+ omapfb_lock(fbdev);
- case OMAPFB_MANUAL_UPDATE:
- um = OMAP_DSS_UPDATE_MANUAL;
- break;
+ d = get_display_data(fbdev, display);
- default:
- return -EINVAL;
+ if (d->update_mode == mode) {
+ omapfb_unlock(fbdev);
+ return 0;
}
- r = display->driver->set_update_mode(display, um);
+ r = 0;
+
+ if (display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
+ if (mode == OMAPFB_AUTO_UPDATE)
+ omapfb_start_auto_update(fbdev, display);
+ else /* MANUAL_UPDATE */
+ omapfb_stop_auto_update(fbdev, display);
+
+ d->update_mode = mode;
+ } else { /* AUTO_UPDATE */
+ if (mode == OMAPFB_MANUAL_UPDATE)
+ r = -EINVAL;
+ }
+
+ omapfb_unlock(fbdev);
return r;
}
-static int omapfb_get_update_mode(struct fb_info *fbi,
+int omapfb_get_update_mode(struct fb_info *fbi,
enum omapfb_update_mode *mode)
{
struct omap_dss_device *display = fb2display(fbi);
- enum omap_dss_update_mode m;
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omapfb_display_data *d;
if (!display)
return -EINVAL;
- if (!display->driver->get_update_mode) {
- *mode = OMAPFB_AUTO_UPDATE;
- return 0;
- }
+ omapfb_lock(fbdev);
- m = display->driver->get_update_mode(display);
+ d = get_display_data(fbdev, display);
- switch (m) {
- case OMAP_DSS_UPDATE_DISABLED:
- *mode = OMAPFB_UPDATE_DISABLED;
- break;
- case OMAP_DSS_UPDATE_AUTO:
- *mode = OMAPFB_AUTO_UPDATE;
- break;
- case OMAP_DSS_UPDATE_MANUAL:
- *mode = OMAPFB_MANUAL_UPDATE;
- break;
- default:
- BUG();
- }
+ *mode = d->update_mode;
+
+ omapfb_unlock(fbdev);
return 0;
}
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 505bc12..602b71a 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -46,6 +46,10 @@ static char *def_vram;
static int def_vrfb;
static int def_rotate;
static int def_mirror;
+static bool auto_update;
+static unsigned int auto_update_freq;
+module_param(auto_update, bool, 0);
+module_param(auto_update_freq, uint, 0644);
#ifdef DEBUG
unsigned int omapfb_debug;
@@ -1242,6 +1246,7 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_device *fbdev = ofbi->fbdev;
struct omap_dss_device *display = fb2display(fbi);
+ struct omapfb_display_data *d;
int r = 0;
if (!display)
@@ -1249,6 +1254,8 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
omapfb_lock(fbdev);
+ d = get_display_data(fbdev, display);
+
switch (blank) {
case FB_BLANK_UNBLANK:
if (display->state != OMAP_DSS_DISPLAY_SUSPENDED)
@@ -1257,6 +1264,11 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
if (display->driver->resume)
r = display->driver->resume(display);
+ if ((display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) &&
+ d->update_mode == OMAPFB_AUTO_UPDATE &&
+ !d->auto_update_work_enabled)
+ omapfb_start_auto_update(fbdev, display);
+
break;
case FB_BLANK_NORMAL:
@@ -1268,6 +1280,9 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
if (display->state != OMAP_DSS_DISPLAY_ACTIVE)
goto exit;
+ if (d->auto_update_work_enabled)
+ omapfb_stop_auto_update(fbdev, display);
+
if (display->driver->suspend)
r = display->driver->suspend(display);
@@ -1724,6 +1739,78 @@ err:
return r;
}
+static void omapfb_auto_update_work(struct work_struct *work)
+{
+ struct omap_dss_device *dssdev;
+ struct omap_dss_driver *dssdrv;
+ struct omapfb_display_data *d;
+ u16 w, h;
+ unsigned int freq;
+ struct omapfb2_device *fbdev;
+
+ d = container_of(work, struct omapfb_display_data,
+ auto_update_work.work);
+
+ dssdev = d->dssdev;
+ dssdrv = dssdev->driver;
+ fbdev = d->fbdev;
+
+ if (!dssdrv || !dssdrv->update)
+ return;
+
+ if (dssdrv->sync)
+ dssdrv->sync(dssdev);
+
+ dssdrv->get_resolution(dssdev, &w, &h);
+ dssdrv->update(dssdev, 0, 0, w, h);
+
+ freq = auto_update_freq;
+ if (freq == 0)
+ freq = 20;
+ queue_delayed_work(fbdev->auto_update_wq,
+ &d->auto_update_work, HZ / freq);
+}
+
+void omapfb_start_auto_update(struct omapfb2_device *fbdev,
+ struct omap_dss_device *display)
+{
+ struct omapfb_display_data *d;
+
+ if (fbdev->auto_update_wq == NULL) {
+ struct workqueue_struct *wq;
+
+ wq = create_singlethread_workqueue("omapfb_auto_update");
+
+ if (wq == NULL) {
+ dev_err(fbdev->dev, "Failed to create workqueue for "
+ "auto-update\n");
+ return;
+ }
+
+ fbdev->auto_update_wq = wq;
+ }
+
+ d = get_display_data(fbdev, display);
+
+ INIT_DELAYED_WORK(&d->auto_update_work, omapfb_auto_update_work);
+
+ d->auto_update_work_enabled = true;
+
+ omapfb_auto_update_work(&d->auto_update_work.work);
+}
+
+void omapfb_stop_auto_update(struct omapfb2_device *fbdev,
+ struct omap_dss_device *display)
+{
+ struct omapfb_display_data *d;
+
+ d = get_display_data(fbdev, display);
+
+ cancel_delayed_work_sync(&d->auto_update_work);
+
+ d->auto_update_work_enabled = false;
+}
+
/* initialize fb_info, var, fix to something sane based on the display */
static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi)
{
@@ -1858,10 +1945,21 @@ static void omapfb_free_resources(struct omapfb2_device *fbdev)
}
for (i = 0; i < fbdev->num_displays; i++) {
- if (fbdev->displays[i]->state != OMAP_DSS_DISPLAY_DISABLED)
- fbdev->displays[i]->driver->disable(fbdev->displays[i]);
+ struct omap_dss_device *dssdev = fbdev->displays[i].dssdev;
+
+ if (fbdev->displays[i].auto_update_work_enabled)
+ omapfb_stop_auto_update(fbdev, dssdev);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)
+ dssdev->driver->disable(dssdev);
+
+ omap_dss_put_device(dssdev);
+ }
- omap_dss_put_device(fbdev->displays[i]);
+ if (fbdev->auto_update_wq != NULL) {
+ flush_workqueue(fbdev->auto_update_wq);
+ destroy_workqueue(fbdev->auto_update_wq);
+ fbdev->auto_update_wq = NULL;
}
dev_set_drvdata(fbdev->dev, NULL);
@@ -2084,14 +2182,14 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev,
int r;
u8 bpp;
struct omap_video_timings timings, temp_timings;
+ struct omapfb_display_data *d;
r = omapfb_mode_to_timings(mode_str, &timings, &bpp);
if (r)
return r;
- fbdev->bpp_overrides[fbdev->num_bpp_overrides].dssdev = display;
- fbdev->bpp_overrides[fbdev->num_bpp_overrides].bpp = bpp;
- ++fbdev->num_bpp_overrides;
+ d = get_display_data(fbdev, display);
+ d->bpp_override = bpp;
if (display->driver->check_timings) {
r = display->driver->check_timings(display, &timings);
@@ -2117,14 +2215,14 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev,
static int omapfb_get_recommended_bpp(struct omapfb2_device *fbdev,
struct omap_dss_device *dssdev)
{
- int i;
+ struct omapfb_display_data *d;
BUG_ON(dssdev->driver->get_recommended_bpp == NULL);
- for (i = 0; i < fbdev->num_bpp_overrides; ++i) {
- if (dssdev == fbdev->bpp_overrides[i].dssdev)
- return fbdev->bpp_overrides[i].bpp;
- }
+ d = get_display_data(fbdev, dssdev);
+
+ if (d->bpp_override != 0)
+ return d->bpp_override;
return dssdev->driver->get_recommended_bpp(dssdev);
}
@@ -2156,9 +2254,9 @@ static int omapfb_parse_def_modes(struct omapfb2_device *fbdev)
display = NULL;
for (i = 0; i < fbdev->num_displays; ++i) {
- if (strcmp(fbdev->displays[i]->name,
+ if (strcmp(fbdev->displays[i].dssdev->name,
display_str) == 0) {
- display = fbdev->displays[i];
+ display = fbdev->displays[i].dssdev;
break;
}
}
@@ -2182,6 +2280,7 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
struct omap_dss_device *dssdev)
{
struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct omapfb_display_data *d;
int r;
r = dssdrv->enable(dssdev);
@@ -2191,8 +2290,20 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
return r;
}
+ d = get_display_data(fbdev, dssdev);
+
+ d->fbdev = fbdev;
+
if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
u16 w, h;
+
+ if (auto_update) {
+ omapfb_start_auto_update(fbdev, dssdev);
+ d->update_mode = OMAPFB_AUTO_UPDATE;
+ } else {
+ d->update_mode = OMAPFB_MANUAL_UPDATE;
+ }
+
if (dssdrv->enable_te) {
r = dssdrv->enable_te(dssdev, 1);
if (r) {
@@ -2201,16 +2312,6 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
}
}
- if (dssdrv->set_update_mode) {
- r = dssdrv->set_update_mode(dssdev,
- OMAP_DSS_UPDATE_MANUAL);
- if (r) {
- dev_err(fbdev->dev,
- "Failed to set update mode\n");
- return r;
- }
- }
-
dssdrv->get_resolution(dssdev, &w, &h);
r = dssdrv->update(dssdev, 0, 0, w, h);
if (r) {
@@ -2219,15 +2320,7 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
return r;
}
} else {
- if (dssdrv->set_update_mode) {
- r = dssdrv->set_update_mode(dssdev,
- OMAP_DSS_UPDATE_AUTO);
- if (r) {
- dev_err(fbdev->dev,
- "Failed to set update mode\n");
- return r;
- }
- }
+ d->update_mode = OMAPFB_AUTO_UPDATE;
}
return 0;
@@ -2275,6 +2368,8 @@ static int omapfb_probe(struct platform_device *pdev)
fbdev->num_displays = 0;
dssdev = NULL;
for_each_dss_dev(dssdev) {
+ struct omapfb_display_data *d;
+
omap_dss_get_device(dssdev);
if (!dssdev->driver) {
@@ -2282,7 +2377,12 @@ static int omapfb_probe(struct platform_device *pdev)
r = -ENODEV;
}
- fbdev->displays[fbdev->num_displays++] = dssdev;
+ d = &fbdev->displays[fbdev->num_displays++];
+ d->dssdev = dssdev;
+ if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
+ d->update_mode = OMAPFB_MANUAL_UPDATE;
+ else
+ d->update_mode = OMAPFB_AUTO_UPDATE;
}
if (r)
diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c
index 2f5e817..153bf1a 100644
--- a/drivers/video/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c
@@ -518,6 +518,39 @@ static ssize_t show_virt(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region->vaddr);
}
+static ssize_t show_upd_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ enum omapfb_update_mode mode;
+ int r;
+
+ r = omapfb_get_update_mode(fbi, &mode);
+
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", (unsigned)mode);
+}
+
+static ssize_t store_upd_mode(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ unsigned mode;
+ int r;
+
+ r = kstrtouint(buf, 0, &mode);
+ if (r)
+ return r;
+
+ r = omapfb_set_update_mode(fbi, mode);
+ if (r)
+ return r;
+
+ return count;
+}
+
static struct device_attribute omapfb_attrs[] = {
__ATTR(rotate_type, S_IRUGO | S_IWUSR, show_rotate_type,
store_rotate_type),
@@ -528,6 +561,7 @@ static struct device_attribute omapfb_attrs[] = {
store_overlays_rotate),
__ATTR(phys_addr, S_IRUGO, show_phys, NULL),
__ATTR(virt_addr, S_IRUGO, show_virt, NULL),
+ __ATTR(update_mode, S_IRUGO | S_IWUSR, show_upd_mode, store_upd_mode),
};
int omapfb_create_sysfs(struct omapfb2_device *fbdev)
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
index aa1b1d9..fdf0ede 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -73,6 +73,15 @@ struct omapfb_info {
bool mirror;
};
+struct omapfb_display_data {
+ struct omapfb2_device *fbdev;
+ struct omap_dss_device *dssdev;
+ u8 bpp_override;
+ enum omapfb_update_mode update_mode;
+ bool auto_update_work_enabled;
+ struct delayed_work auto_update_work;
+};
+
struct omapfb2_device {
struct device *dev;
struct mutex mtx;
@@ -86,17 +95,13 @@ struct omapfb2_device {
struct omapfb2_mem_region regions[10];
unsigned num_displays;
- struct omap_dss_device *displays[10];
+ struct omapfb_display_data displays[10];
unsigned num_overlays;
struct omap_overlay *overlays[10];
unsigned num_managers;
struct omap_overlay_manager *managers[10];
- unsigned num_bpp_overrides;
- struct {
- struct omap_dss_device *dssdev;
- u8 bpp;
- } bpp_overrides[10];
+ struct workqueue_struct *auto_update_wq;
};
struct omapfb_colormode {
@@ -128,6 +133,13 @@ int dss_mode_to_fb_mode(enum omap_color_mode dssmode,
int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
u16 posx, u16 posy, u16 outw, u16 outh);
+void omapfb_start_auto_update(struct omapfb2_device *fbdev,
+ struct omap_dss_device *display);
+void omapfb_stop_auto_update(struct omapfb2_device *fbdev,
+ struct omap_dss_device *display);
+int omapfb_get_update_mode(struct fb_info *fbi, enum omapfb_update_mode *mode);
+int omapfb_set_update_mode(struct fb_info *fbi, enum omapfb_update_mode mode);
+
/* find the display connected to this fb, if any */
static inline struct omap_dss_device *fb2display(struct fb_info *fbi)
{
@@ -143,6 +155,19 @@ static inline struct omap_dss_device *fb2display(struct fb_info *fbi)
return NULL;
}
+static inline struct omapfb_display_data *get_display_data(
+ struct omapfb2_device *fbdev, struct omap_dss_device *dssdev)
+{
+ int i;
+
+ for (i = 0; i < fbdev->num_displays; ++i)
+ if (fbdev->displays[i].dssdev == dssdev)
+ return &fbdev->displays[i];
+
+ /* This should never happen */
+ BUG();
+}
+
static inline void omapfb_lock(struct omapfb2_device *fbdev)
{
mutex_lock(&fbdev->mtx);
diff --git a/drivers/video/savage/savagefb.h b/drivers/video/savage/savagefb.h
index 32549d1..dcaab90 100644
--- a/drivers/video/savage/savagefb.h
+++ b/drivers/video/savage/savagefb.h
@@ -55,7 +55,7 @@
#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
-#define S3_SAVAGE4_SERIES(chip) ((chip>=S3_SAVAGE4) || (chip<=S3_PROSAVAGEDDR))
+#define S3_SAVAGE4_SERIES(chip) ((chip>=S3_SAVAGE4) && (chip<=S3_PROSAVAGEDDR))
#define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE))
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 019dbd3..b048417 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -24,7 +24,7 @@
#include <linux/backlight.h>
#include <linux/gpio.h>
#include <video/sh_mobile_lcdc.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "sh_mobile_lcdcfb.h"
#include "sh_mobile_meram.h"
diff --git a/drivers/video/vermilion/vermilion.h b/drivers/video/vermilion/vermilion.h
index 7491abf..43d11ec 100644
--- a/drivers/video/vermilion/vermilion.h
+++ b/drivers/video/vermilion/vermilion.h
@@ -31,7 +31,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/mutex.h>
#define VML_DEVICE_GPU 0x5002
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 02bf7bf..b5abaae 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -1,7 +1,7 @@
/*
* dscore.c
*
- * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
*
* This program is free software; you can redistribute it and/or modify
@@ -1024,5 +1024,5 @@ module_init(ds_init);
module_exit(ds_fini);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>");
+MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
MODULE_DESCRIPTION("DS2490 USB <-> W1 bus master driver (DS9490*)");
diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c
index 1550431..f667c26 100644
--- a/drivers/w1/masters/matrox_w1.c
+++ b/drivers/w1/masters/matrox_w1.c
@@ -1,7 +1,7 @@
/*
* matrox_w1.c
*
- * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
*
* This program is free software; you can redistribute it and/or modify
@@ -20,7 +20,7 @@
*/
#include <asm/types.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/io.h>
#include <linux/delay.h>
@@ -39,7 +39,7 @@
#include "../w1_log.h"
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>");
+MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire prtocol) over VGA DDC(matrox gpio).");
static struct pci_device_id matrox_w1_tbl[] = {
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c
index c377818..7c8cdb8 100644
--- a/drivers/w1/slaves/w1_ds2408.c
+++ b/drivers/w1/slaves/w1_ds2408.c
@@ -373,7 +373,7 @@ static int w1_f29_add_slave(struct w1_slave *sl)
static void w1_f29_remove_slave(struct w1_slave *sl)
{
int i;
- for (i = NB_SYSFS_BIN_FILES; i <= 0; --i)
+ for (i = NB_SYSFS_BIN_FILES - 1; i >= 0; --i)
sysfs_remove_bin_file(&sl->dev.kobj,
&(w1_f29_sysfs_bin_files[i]));
}
diff --git a/drivers/w1/slaves/w1_smem.c b/drivers/w1/slaves/w1_smem.c
index cc8c02e..8465562 100644
--- a/drivers/w1/slaves/w1_smem.c
+++ b/drivers/w1/slaves/w1_smem.c
@@ -1,7 +1,7 @@
/*
* w1_smem.c
*
- * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
*
* This program is free software; you can redistribute it and/or modify
@@ -32,7 +32,7 @@
#include "../w1_family.h"
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>");
+MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family.");
static struct w1_family w1_smem_family_01 = {
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 17726a0..a1ef9b5 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -1,7 +1,7 @@
/*
* w1_therm.c
*
- * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
*
* This program is free software; you can redistribute it and/or modify
@@ -34,7 +34,7 @@
#include "../w1_family.h"
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>");
+MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature family.");
/* Allow the strong pullup to be disabled, but default to enabled.
@@ -86,6 +86,11 @@ static struct w1_family w1_therm_family_DS1822 = {
.fops = &w1_therm_fops,
};
+static struct w1_family w1_therm_family_DS28EA00 = {
+ .fid = W1_THERM_DS28EA00,
+ .fops = &w1_therm_fops,
+};
+
struct w1_therm_family_converter
{
u8 broken;
@@ -111,6 +116,10 @@ static struct w1_therm_family_converter w1_therm_families[] = {
.f = &w1_therm_family_DS18B20,
.convert = w1_DS18B20_convert_temp
},
+ {
+ .f = &w1_therm_family_DS28EA00,
+ .convert = w1_DS18B20_convert_temp
+ },
};
static inline int w1_DS18B20_convert_temp(u8 rom[9])
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 10606c8..c374978 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -1,7 +1,7 @@
/*
* w1.c
*
- * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
*
* This program is free software; you can redistribute it and/or modify
@@ -33,7 +33,7 @@
#include <linux/kthread.h>
#include <linux/freezer.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "w1.h"
#include "w1_log.h"
@@ -42,7 +42,7 @@
#include "w1_netlink.h"
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>");
+MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol.");
static int w1_timeout = 10;
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h
index 1ce23fc..4d012ca 100644
--- a/drivers/w1/w1.h
+++ b/drivers/w1/w1.h
@@ -1,7 +1,7 @@
/*
* w1.h
*
- * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c
index 4a09904..6335979 100644
--- a/drivers/w1/w1_family.c
+++ b/drivers/w1/w1_family.c
@@ -1,7 +1,7 @@
/*
* w1_family.c
*
- * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index 97479ae..490cda2 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -1,7 +1,7 @@
/*
* w1_family.h
*
- * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
*
* This program is free software; you can redistribute it and/or modify
@@ -24,7 +24,7 @@
#include <linux/types.h>
#include <linux/device.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define W1_FAMILY_DEFAULT 0
#define W1_FAMILY_SMEM_01 0x01
@@ -38,6 +38,7 @@
#define W1_EEPROM_DS2431 0x2D
#define W1_FAMILY_DS2760 0x30
#define W1_FAMILY_DS2780 0x32
+#define W1_THERM_DS28EA00 0x42
#define MAXNAMELEN 32
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index b50be3f..d220bce 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -1,7 +1,7 @@
/*
* w1_int.c
*
- * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_int.h b/drivers/w1/w1_int.h
index 4274082..2ad7d44 100644
--- a/drivers/w1/w1_int.h
+++ b/drivers/w1/w1_int.h
@@ -1,7 +1,7 @@
/*
* w1_int.h
*
- * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index 8e8b64c..765b37b 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -1,7 +1,7 @@
/*
* w1_io.c
*
- * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_log.h b/drivers/w1/w1_log.h
index e6ab7cf..9c7bd62 100644
--- a/drivers/w1/w1_log.h
+++ b/drivers/w1/w1_log.h
@@ -1,7 +1,7 @@
/*
* w1_log.h
*
- * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 55aabd9..40788c9 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -1,7 +1,7 @@
/*
* w1_netlink.c
*
- * Copyright (c) 2003 Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net>
*
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h
index 27e950f..b0922dc 100644
--- a/drivers/w1/w1_netlink.h
+++ b/drivers/w1/w1_netlink.h
@@ -1,7 +1,7 @@
/*
* w1_netlink.h
*
- * Copyright (c) 2003 Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net>
*
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 21d816e..86b0735 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -28,6 +28,14 @@ menuconfig WATCHDOG
if WATCHDOG
+config WATCHDOG_CORE
+ bool "WatchDog Timer Driver Core"
+ ---help---
+ Say Y here if you want to use the new watchdog timer driver core.
+ This driver provides a framework for all watchdog timer drivers
+ and gives them the /dev/watchdog interface (and later also the
+ sysfs interface).
+
config WATCHDOG_NOWAYOUT
bool "Disable watchdog shutdown on close"
help
@@ -186,6 +194,15 @@ config SA1100_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called sa1100_wdt.
+config DW_WATCHDOG
+ tristate "Synopsys DesignWare watchdog"
+ depends on ARM && HAVE_CLK
+ help
+ Say Y here if to include support for the Synopsys DesignWare
+ watchdog timer found in many ARM chips.
+ To compile this driver as a module, choose M here: the
+ module will be called dw_wdt.
+
config MPCORE_WATCHDOG
tristate "MPcore watchdog"
depends on HAVE_ARM_TWD
@@ -321,7 +338,7 @@ config MAX63XX_WATCHDOG
config IMX2_WDT
tristate "IMX2+ Watchdog"
- depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 || ARCH_MX5
+ depends on IMX_HAVE_PLATFORM_IMX2_WDT
help
This is the driver for the hardware watchdog
on the Freescale IMX2 and later processors.
@@ -879,6 +896,20 @@ config M54xx_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called m54xx_wdt.
+# MicroBlaze Architecture
+
+config XILINX_WATCHDOG
+ tristate "Xilinx Watchdog timer"
+ depends on MICROBLAZE
+ ---help---
+ Watchdog driver for the xps_timebase_wdt ip core.
+
+ IMPORTANT: The xps_timebase_wdt parent must have the property
+ "clock-frequency" at device tree.
+
+ To compile this driver as a module, choose M here: the
+ module will be called of_xilinx_wdt.
+
# MIPS Architecture
config ATH79_WDT
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index ed26f70..55bd574 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -2,6 +2,10 @@
# Makefile for the WatchDog device drivers.
#
+# The WatchDog Timer Driver Core.
+watchdog-objs += watchdog_core.o watchdog_dev.o
+obj-$(CONFIG_WATCHDOG_CORE) += watchdog.o
+
# Only one watchdog can succeed. We probe the ISA/PCI/USB based
# watchdog-cards first, then the architecture specific watchdog
# drivers and then the architecture independent "softdog" driver.
@@ -37,6 +41,7 @@ obj-$(CONFIG_IXP4XX_WATCHDOG) += ixp4xx_wdt.o
obj-$(CONFIG_KS8695_WATCHDOG) += ks8695_wdt.o
obj-$(CONFIG_S3C2410_WATCHDOG) += s3c2410_wdt.o
obj-$(CONFIG_SA1100_WATCHDOG) += sa1100_wdt.o
+obj-$(CONFIG_DW_WATCHDOG) += dw_wdt.o
obj-$(CONFIG_MPCORE_WATCHDOG) += mpcore_wdt.o
obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o
obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o
@@ -109,6 +114,9 @@ obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o
# M68K Architecture
obj-$(CONFIG_M54xx_WATCHDOG) += m54xx_wdt.o
+# MicroBlaze Architecture
+obj-$(CONFIG_XILINX_WATCHDOG) += of_xilinx_wdt.o
+
# MIPS Architecture
obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o
obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index eac2602..87445b2 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -31,7 +31,7 @@
#include <linux/bitops.h>
#include <linux/uaccess.h>
-#include <mach/at91_wdt.h>
+#include "at91sam9_wdt.h"
#define DRV_NAME "AT91SAM9 Watchdog"
@@ -284,27 +284,8 @@ static int __exit at91wdt_remove(struct platform_device *pdev)
return res;
}
-#ifdef CONFIG_PM
-
-static int at91wdt_suspend(struct platform_device *pdev, pm_message_t message)
-{
- return 0;
-}
-
-static int at91wdt_resume(struct platform_device *pdev)
-{
- return 0;
-}
-
-#else
-#define at91wdt_suspend NULL
-#define at91wdt_resume NULL
-#endif
-
static struct platform_driver at91wdt_driver = {
.remove = __exit_p(at91wdt_remove),
- .suspend = at91wdt_suspend,
- .resume = at91wdt_resume,
.driver = {
.name = "at91_wdt",
.owner = THIS_MODULE,
diff --git a/drivers/watchdog/at91sam9_wdt.h b/drivers/watchdog/at91sam9_wdt.h
new file mode 100644
index 0000000..757f9ca
--- /dev/null
+++ b/drivers/watchdog/at91sam9_wdt.h
@@ -0,0 +1,37 @@
+/*
+ * drivers/watchdog/at91sam9_wdt.h
+ *
+ * Copyright (C) 2007 Andrew Victor
+ * Copyright (C) 2007 Atmel Corporation.
+ *
+ * Watchdog Timer (WDT) - System peripherals regsters.
+ * Based on AT91SAM9261 datasheet revision D.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef AT91_WDT_H
+#define AT91_WDT_H
+
+#define AT91_WDT_CR (AT91_WDT + 0x00) /* Watchdog Control Register */
+#define AT91_WDT_WDRSTT (1 << 0) /* Restart */
+#define AT91_WDT_KEY (0xa5 << 24) /* KEY Password */
+
+#define AT91_WDT_MR (AT91_WDT + 0x04) /* Watchdog Mode Register */
+#define AT91_WDT_WDV (0xfff << 0) /* Counter Value */
+#define AT91_WDT_WDFIEN (1 << 12) /* Fault Interrupt Enable */
+#define AT91_WDT_WDRSTEN (1 << 13) /* Reset Processor */
+#define AT91_WDT_WDRPROC (1 << 14) /* Timer Restart */
+#define AT91_WDT_WDDIS (1 << 15) /* Watchdog Disable */
+#define AT91_WDT_WDD (0xfff << 16) /* Delta Value */
+#define AT91_WDT_WDDBGHLT (1 << 28) /* Debug Halt */
+#define AT91_WDT_WDIDLEHLT (1 << 29) /* Idle Halt */
+
+#define AT91_WDT_SR (AT91_WDT + 0x08) /* Watchdog Status Register */
+#define AT91_WDT_WDUNF (1 << 0) /* Watchdog Underflow */
+#define AT91_WDT_WDERR (1 << 1) /* Watchdog Error */
+
+#endif
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
new file mode 100644
index 0000000..f10f8c0
--- /dev/null
+++ b/drivers/watchdog/dw_wdt.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright 2010-2011 Picochip Ltd., Jamie Iles
+ * http://www.picochip.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This file implements a driver for the Synopsys DesignWare watchdog device
+ * in the many ARM subsystems. The watchdog has 16 different timeout periods
+ * and these are a function of the input clock frequency.
+ *
+ * The DesignWare watchdog cannot be stopped once it has been started so we
+ * use a software timer to implement a ping that will keep the watchdog alive.
+ * If we receive an expected close for the watchdog then we keep the timer
+ * running, otherwise the timer is stopped and the watchdog will expire.
+ */
+#define pr_fmt(fmt) "dw_wdt: " fmt
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+
+#define WDOG_CONTROL_REG_OFFSET 0x00
+#define WDOG_CONTROL_REG_WDT_EN_MASK 0x01
+#define WDOG_TIMEOUT_RANGE_REG_OFFSET 0x04
+#define WDOG_CURRENT_COUNT_REG_OFFSET 0x08
+#define WDOG_COUNTER_RESTART_REG_OFFSET 0x0c
+#define WDOG_COUNTER_RESTART_KICK_VALUE 0x76
+
+/* The maximum TOP (timeout period) value that can be set in the watchdog. */
+#define DW_WDT_MAX_TOP 15
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+#define WDT_TIMEOUT (HZ / 2)
+
+static struct {
+ spinlock_t lock;
+ void __iomem *regs;
+ struct clk *clk;
+ unsigned long in_use;
+ unsigned long next_heartbeat;
+ struct timer_list timer;
+ int expect_close;
+} dw_wdt;
+
+static inline int dw_wdt_is_enabled(void)
+{
+ return readl(dw_wdt.regs + WDOG_CONTROL_REG_OFFSET) &
+ WDOG_CONTROL_REG_WDT_EN_MASK;
+}
+
+static inline int dw_wdt_top_in_seconds(unsigned top)
+{
+ /*
+ * There are 16 possible timeout values in 0..15 where the number of
+ * cycles is 2 ^ (16 + i) and the watchdog counts down.
+ */
+ return (1 << (16 + top)) / clk_get_rate(dw_wdt.clk);
+}
+
+static int dw_wdt_get_top(void)
+{
+ int top = readl(dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET) & 0xF;
+
+ return dw_wdt_top_in_seconds(top);
+}
+
+static inline void dw_wdt_set_next_heartbeat(void)
+{
+ dw_wdt.next_heartbeat = jiffies + dw_wdt_get_top() * HZ;
+}
+
+static int dw_wdt_set_top(unsigned top_s)
+{
+ int i, top_val = DW_WDT_MAX_TOP;
+
+ /*
+ * Iterate over the timeout values until we find the closest match. We
+ * always look for >=.
+ */
+ for (i = 0; i <= DW_WDT_MAX_TOP; ++i)
+ if (dw_wdt_top_in_seconds(i) >= top_s) {
+ top_val = i;
+ break;
+ }
+
+ /* Set the new value in the watchdog. */
+ writel(top_val, dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
+
+ dw_wdt_set_next_heartbeat();
+
+ return dw_wdt_top_in_seconds(top_val);
+}
+
+static void dw_wdt_keepalive(void)
+{
+ writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt.regs +
+ WDOG_COUNTER_RESTART_REG_OFFSET);
+}
+
+static void dw_wdt_ping(unsigned long data)
+{
+ if (time_before(jiffies, dw_wdt.next_heartbeat) ||
+ (!nowayout && !dw_wdt.in_use)) {
+ dw_wdt_keepalive();
+ mod_timer(&dw_wdt.timer, jiffies + WDT_TIMEOUT);
+ } else
+ pr_crit("keepalive missed, machine will reset\n");
+}
+
+static int dw_wdt_open(struct inode *inode, struct file *filp)
+{
+ if (test_and_set_bit(0, &dw_wdt.in_use))
+ return -EBUSY;
+
+ /* Make sure we don't get unloaded. */
+ __module_get(THIS_MODULE);
+
+ spin_lock(&dw_wdt.lock);
+ if (!dw_wdt_is_enabled()) {
+ /*
+ * The watchdog is not currently enabled. Set the timeout to
+ * the maximum and then start it.
+ */
+ dw_wdt_set_top(DW_WDT_MAX_TOP);
+ writel(WDOG_CONTROL_REG_WDT_EN_MASK,
+ dw_wdt.regs + WDOG_CONTROL_REG_OFFSET);
+ }
+
+ dw_wdt_set_next_heartbeat();
+
+ spin_unlock(&dw_wdt.lock);
+
+ return nonseekable_open(inode, filp);
+}
+
+ssize_t dw_wdt_write(struct file *filp, const char __user *buf, size_t len,
+ loff_t *offset)
+{
+ if (!len)
+ return 0;
+
+ if (!nowayout) {
+ size_t i;
+
+ dw_wdt.expect_close = 0;
+
+ for (i = 0; i < len; ++i) {
+ char c;
+
+ if (get_user(c, buf + i))
+ return -EFAULT;
+
+ if (c == 'V') {
+ dw_wdt.expect_close = 1;
+ break;
+ }
+ }
+ }
+
+ dw_wdt_set_next_heartbeat();
+ mod_timer(&dw_wdt.timer, jiffies + WDT_TIMEOUT);
+
+ return len;
+}
+
+static u32 dw_wdt_time_left(void)
+{
+ return readl(dw_wdt.regs + WDOG_CURRENT_COUNT_REG_OFFSET) /
+ clk_get_rate(dw_wdt.clk);
+}
+
+static const struct watchdog_info dw_wdt_ident = {
+ .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
+ WDIOF_MAGICCLOSE,
+ .identity = "Synopsys DesignWare Watchdog",
+};
+
+static long dw_wdt_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ unsigned long val;
+ int timeout;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user((struct watchdog_info *)arg, &dw_wdt_ident,
+ sizeof(dw_wdt_ident)) ? -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, (int *)arg);
+
+ case WDIOC_KEEPALIVE:
+ dw_wdt_set_next_heartbeat();
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(val, (int __user *)arg))
+ return -EFAULT;
+ timeout = dw_wdt_set_top(val);
+ return put_user(timeout , (int __user *)arg);
+
+ case WDIOC_GETTIMEOUT:
+ return put_user(dw_wdt_get_top(), (int __user *)arg);
+
+ case WDIOC_GETTIMELEFT:
+ /* Get the time left until expiry. */
+ if (get_user(val, (int __user *)arg))
+ return -EFAULT;
+ return put_user(dw_wdt_time_left(), (int __user *)arg);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static int dw_wdt_release(struct inode *inode, struct file *filp)
+{
+ clear_bit(0, &dw_wdt.in_use);
+
+ if (!dw_wdt.expect_close) {
+ del_timer(&dw_wdt.timer);
+
+ if (!nowayout)
+ pr_crit("unexpected close, system will reboot soon\n");
+ else
+ pr_crit("watchdog cannot be disabled, system will reboot soon\n");
+ }
+
+ dw_wdt.expect_close = 0;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int dw_wdt_suspend(struct device *dev)
+{
+ clk_disable(dw_wdt.clk);
+
+ return 0;
+}
+
+static int dw_wdt_resume(struct device *dev)
+{
+ int err = clk_enable(dw_wdt.clk);
+
+ if (err)
+ return err;
+
+ dw_wdt_keepalive();
+
+ return 0;
+}
+
+static const struct dev_pm_ops dw_wdt_pm_ops = {
+ .suspend = dw_wdt_suspend,
+ .resume = dw_wdt_resume,
+};
+#endif /* CONFIG_PM */
+
+static const struct file_operations wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = dw_wdt_open,
+ .write = dw_wdt_write,
+ .unlocked_ioctl = dw_wdt_ioctl,
+ .release = dw_wdt_release
+};
+
+static struct miscdevice dw_wdt_miscdev = {
+ .fops = &wdt_fops,
+ .name = "watchdog",
+ .minor = WATCHDOG_MINOR,
+};
+
+static int __devinit dw_wdt_drv_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!mem)
+ return -EINVAL;
+
+ if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
+ "dw_wdt"))
+ return -ENOMEM;
+
+ dw_wdt.regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ if (!dw_wdt.regs)
+ return -ENOMEM;
+
+ dw_wdt.clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dw_wdt.clk))
+ return PTR_ERR(dw_wdt.clk);
+
+ ret = clk_enable(dw_wdt.clk);
+ if (ret)
+ goto out_put_clk;
+
+ spin_lock_init(&dw_wdt.lock);
+
+ ret = misc_register(&dw_wdt_miscdev);
+ if (ret)
+ goto out_disable_clk;
+
+ dw_wdt_set_next_heartbeat();
+ setup_timer(&dw_wdt.timer, dw_wdt_ping, 0);
+ mod_timer(&dw_wdt.timer, jiffies + WDT_TIMEOUT);
+
+ return 0;
+
+out_disable_clk:
+ clk_disable(dw_wdt.clk);
+out_put_clk:
+ clk_put(dw_wdt.clk);
+
+ return ret;
+}
+
+static int __devexit dw_wdt_drv_remove(struct platform_device *pdev)
+{
+ misc_deregister(&dw_wdt_miscdev);
+
+ clk_disable(dw_wdt.clk);
+ clk_put(dw_wdt.clk);
+
+ return 0;
+}
+
+static struct platform_driver dw_wdt_driver = {
+ .probe = dw_wdt_drv_probe,
+ .remove = __devexit_p(dw_wdt_drv_remove),
+ .driver = {
+ .name = "dw_wdt",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &dw_wdt_pm_ops,
+#endif /* CONFIG_PM */
+ },
+};
+
+static int __init dw_wdt_watchdog_init(void)
+{
+ return platform_driver_register(&dw_wdt_driver);
+}
+module_init(dw_wdt_watchdog_init);
+
+static void __exit dw_wdt_watchdog_exit(void)
+{
+ platform_driver_unregister(&dw_wdt_driver);
+}
+module_exit(dw_wdt_watchdog_exit);
+
+MODULE_AUTHOR("Jamie Iles");
+MODULE_DESCRIPTION("Synopsys DesignWare Watchdog Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 8cb2685..410fba4 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -36,7 +36,7 @@
#include <asm/cacheflush.h>
#endif /* CONFIG_HPWDT_NMI_DECODING */
-#define HPWDT_VERSION "1.2.0"
+#define HPWDT_VERSION "1.3.0"
#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
#define TICKS_TO_SECS(ticks) ((ticks) * 128 / 1000)
#define HPWDT_MAX_TIMER TICKS_TO_SECS(65535)
@@ -87,6 +87,19 @@ struct smbios_cru64_info {
};
#define SMBIOS_CRU64_INFORMATION 212
+/* type 219 */
+struct smbios_proliant_info {
+ u8 type;
+ u8 byte_length;
+ u16 handle;
+ u32 power_features;
+ u32 omega_features;
+ u32 reserved;
+ u32 misc_features;
+};
+#define SMBIOS_ICRU_INFORMATION 219
+
+
struct cmn_registers {
union {
struct {
@@ -132,6 +145,7 @@ struct cmn_registers {
static unsigned int hpwdt_nmi_decoding;
static unsigned int allow_kdump;
static unsigned int priority; /* hpwdt at end of die_notify list */
+static unsigned int is_icru;
static DEFINE_SPINLOCK(rom_lock);
static void *cru_rom_addr;
static struct cmn_registers cmn_regs;
@@ -476,19 +490,22 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
goto out;
spin_lock_irqsave(&rom_lock, rom_pl);
- if (!die_nmi_called)
+ if (!die_nmi_called && !is_icru)
asminline_call(&cmn_regs, cru_rom_addr);
die_nmi_called = 1;
spin_unlock_irqrestore(&rom_lock, rom_pl);
- if (cmn_regs.u1.ral == 0) {
- printk(KERN_WARNING "hpwdt: An NMI occurred, "
- "but unable to determine source.\n");
- } else {
- if (allow_kdump)
- hpwdt_stop();
- panic("An NMI occurred, please see the Integrated "
- "Management Log for details.\n");
+ if (!is_icru) {
+ if (cmn_regs.u1.ral == 0) {
+ printk(KERN_WARNING "hpwdt: An NMI occurred, "
+ "but unable to determine source.\n");
+ }
}
+
+ if (allow_kdump)
+ hpwdt_stop();
+ panic("An NMI occurred, please see the Integrated "
+ "Management Log for details.\n");
+
out:
return NOTIFY_OK;
}
@@ -659,30 +676,63 @@ static void __devinit hpwdt_check_nmi_decoding(struct pci_dev *dev)
}
#endif /* CONFIG_X86_LOCAL_APIC */
+/*
+ * dmi_find_icru
+ *
+ * Routine Description:
+ * This function checks whether or not we are on an iCRU-based server.
+ * This check is independent of architecture and needs to be made for
+ * any ProLiant system.
+ */
+static void __devinit dmi_find_icru(const struct dmi_header *dm, void *dummy)
+{
+ struct smbios_proliant_info *smbios_proliant_ptr;
+
+ if (dm->type == SMBIOS_ICRU_INFORMATION) {
+ smbios_proliant_ptr = (struct smbios_proliant_info *) dm;
+ if (smbios_proliant_ptr->misc_features & 0x01)
+ is_icru = 1;
+ }
+}
+
static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev)
{
int retval;
/*
- * We need to map the ROM to get the CRU service.
- * For 32 bit Operating Systems we need to go through the 32 Bit
- * BIOS Service Directory
- * For 64 bit Operating Systems we get that service through SMBIOS.
+ * On typical CRU-based systems we need to map that service in
+ * the BIOS. For 32 bit Operating Systems we need to go through
+ * the 32 Bit BIOS Service Directory. For 64 bit Operating
+ * Systems we get that service through SMBIOS.
+ *
+ * On systems that support the new iCRU service all we need to
+ * do is call dmi_walk to get the supported flag value and skip
+ * the old cru detect code.
*/
- retval = detect_cru_service();
- if (retval < 0) {
- dev_warn(&dev->dev,
- "Unable to detect the %d Bit CRU Service.\n",
- HPWDT_ARCH);
- return retval;
- }
+ dmi_walk(dmi_find_icru, NULL);
+ if (!is_icru) {
+
+ /*
+ * We need to map the ROM to get the CRU service.
+ * For 32 bit Operating Systems we need to go through the 32 Bit
+ * BIOS Service Directory
+ * For 64 bit Operating Systems we get that service through SMBIOS.
+ */
+ retval = detect_cru_service();
+ if (retval < 0) {
+ dev_warn(&dev->dev,
+ "Unable to detect the %d Bit CRU Service.\n",
+ HPWDT_ARCH);
+ return retval;
+ }
- /*
- * We know this is the only CRU call we need to make so lets keep as
- * few instructions as possible once the NMI comes in.
- */
- cmn_regs.u1.rah = 0x0D;
- cmn_regs.u1.ral = 0x02;
+ /*
+ * We know this is the only CRU call we need to make so lets keep as
+ * few instructions as possible once the NMI comes in.
+ */
+ cmn_regs.u1.rah = 0x0D;
+ cmn_regs.u1.ral = 0x02;
+ }
/*
* If the priority is set to 1, then we will be put first on the
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 5fd020d..751a591 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -120,72 +120,12 @@ enum iTCO_chipsets {
TCO_3420, /* 3420 */
TCO_3450, /* 3450 */
TCO_EP80579, /* EP80579 */
- TCO_CPT1, /* Cougar Point */
- TCO_CPT2, /* Cougar Point Desktop */
- TCO_CPT3, /* Cougar Point Mobile */
- TCO_CPT4, /* Cougar Point */
- TCO_CPT5, /* Cougar Point */
- TCO_CPT6, /* Cougar Point */
- TCO_CPT7, /* Cougar Point */
- TCO_CPT8, /* Cougar Point */
- TCO_CPT9, /* Cougar Point */
- TCO_CPT10, /* Cougar Point */
- TCO_CPT11, /* Cougar Point */
- TCO_CPT12, /* Cougar Point */
- TCO_CPT13, /* Cougar Point */
- TCO_CPT14, /* Cougar Point */
- TCO_CPT15, /* Cougar Point */
- TCO_CPT16, /* Cougar Point */
- TCO_CPT17, /* Cougar Point */
- TCO_CPT18, /* Cougar Point */
- TCO_CPT19, /* Cougar Point */
- TCO_CPT20, /* Cougar Point */
- TCO_CPT21, /* Cougar Point */
- TCO_CPT22, /* Cougar Point */
- TCO_CPT23, /* Cougar Point */
- TCO_CPT24, /* Cougar Point */
- TCO_CPT25, /* Cougar Point */
- TCO_CPT26, /* Cougar Point */
- TCO_CPT27, /* Cougar Point */
- TCO_CPT28, /* Cougar Point */
- TCO_CPT29, /* Cougar Point */
- TCO_CPT30, /* Cougar Point */
- TCO_CPT31, /* Cougar Point */
- TCO_PBG1, /* Patsburg */
- TCO_PBG2, /* Patsburg */
+ TCO_CPT, /* Cougar Point */
+ TCO_CPTD, /* Cougar Point Desktop */
+ TCO_CPTM, /* Cougar Point Mobile */
+ TCO_PBG, /* Patsburg */
TCO_DH89XXCC, /* DH89xxCC */
- TCO_PPT0, /* Panther Point */
- TCO_PPT1, /* Panther Point */
- TCO_PPT2, /* Panther Point */
- TCO_PPT3, /* Panther Point */
- TCO_PPT4, /* Panther Point */
- TCO_PPT5, /* Panther Point */
- TCO_PPT6, /* Panther Point */
- TCO_PPT7, /* Panther Point */
- TCO_PPT8, /* Panther Point */
- TCO_PPT9, /* Panther Point */
- TCO_PPT10, /* Panther Point */
- TCO_PPT11, /* Panther Point */
- TCO_PPT12, /* Panther Point */
- TCO_PPT13, /* Panther Point */
- TCO_PPT14, /* Panther Point */
- TCO_PPT15, /* Panther Point */
- TCO_PPT16, /* Panther Point */
- TCO_PPT17, /* Panther Point */
- TCO_PPT18, /* Panther Point */
- TCO_PPT19, /* Panther Point */
- TCO_PPT20, /* Panther Point */
- TCO_PPT21, /* Panther Point */
- TCO_PPT22, /* Panther Point */
- TCO_PPT23, /* Panther Point */
- TCO_PPT24, /* Panther Point */
- TCO_PPT25, /* Panther Point */
- TCO_PPT26, /* Panther Point */
- TCO_PPT27, /* Panther Point */
- TCO_PPT28, /* Panther Point */
- TCO_PPT29, /* Panther Point */
- TCO_PPT30, /* Panther Point */
- TCO_PPT31, /* Panther Point */
+ TCO_PPT, /* Panther Point */
};
static struct {
@@ -244,83 +184,14 @@ static struct {
{"3450", 2},
{"EP80579", 2},
{"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Patsburg", 2},
+ {"Cougar Point Desktop", 2},
+ {"Cougar Point Mobile", 2},
{"Patsburg", 2},
{"DH89xxCC", 2},
{"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
{NULL, 0}
};
-#define ITCO_PCI_DEVICE(dev, data) \
- .vendor = PCI_VENDOR_ID_INTEL, \
- .device = dev, \
- .subvendor = PCI_ANY_ID, \
- .subdevice = PCI_ANY_ID, \
- .class = 0, \
- .class_mask = 0, \
- .driver_data = data
-
/*
* This data only exists for exporting the supported PCI ids
* via MODULE_DEVICE_TABLE. We do not actually register a
@@ -328,138 +199,138 @@ static struct {
* functions that probably will be registered by other drivers.
*/
static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = {
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801AA_0, TCO_ICH)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801AB_0, TCO_ICH0)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801BA_0, TCO_ICH2)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801BA_10, TCO_ICH2M)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801CA_0, TCO_ICH3)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801CA_12, TCO_ICH3M)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801DB_0, TCO_ICH4)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801DB_12, TCO_ICH4M)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801E_0, TCO_CICH)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801EB_0, TCO_ICH5)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ESB_1, TCO_6300ESB)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_0, TCO_ICH6)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_1, TCO_ICH6M)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_2, TCO_ICH6W)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ESB2_0, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x2671, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x2672, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x2673, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x2674, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x2675, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x2676, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x2677, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x2678, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x2679, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x267a, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x267b, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x267c, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x267d, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x267e, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x267f, TCO_631XESB)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_0, TCO_ICH7)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_30, TCO_ICH7DH)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_1, TCO_ICH7M)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_31, TCO_ICH7MDH)},
- { ITCO_PCI_DEVICE(0x27bc, TCO_NM10)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_0, TCO_ICH8)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_2, TCO_ICH8DH)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_3, TCO_ICH8DO)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_4, TCO_ICH8M)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_1, TCO_ICH8ME)},
- { ITCO_PCI_DEVICE(0x2918, TCO_ICH9)},
- { ITCO_PCI_DEVICE(0x2916, TCO_ICH9R)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_2, TCO_ICH9DH)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_4, TCO_ICH9DO)},
- { ITCO_PCI_DEVICE(0x2919, TCO_ICH9M)},
- { ITCO_PCI_DEVICE(0x2917, TCO_ICH9ME)},
- { ITCO_PCI_DEVICE(0x3a18, TCO_ICH10)},
- { ITCO_PCI_DEVICE(0x3a16, TCO_ICH10R)},
- { ITCO_PCI_DEVICE(0x3a1a, TCO_ICH10D)},
- { ITCO_PCI_DEVICE(0x3a14, TCO_ICH10DO)},
- { ITCO_PCI_DEVICE(0x3b00, TCO_PCH)},
- { ITCO_PCI_DEVICE(0x3b01, TCO_PCHM)},
- { ITCO_PCI_DEVICE(0x3b02, TCO_P55)},
- { ITCO_PCI_DEVICE(0x3b03, TCO_PM55)},
- { ITCO_PCI_DEVICE(0x3b06, TCO_H55)},
- { ITCO_PCI_DEVICE(0x3b07, TCO_QM57)},
- { ITCO_PCI_DEVICE(0x3b08, TCO_H57)},
- { ITCO_PCI_DEVICE(0x3b09, TCO_HM55)},
- { ITCO_PCI_DEVICE(0x3b0a, TCO_Q57)},
- { ITCO_PCI_DEVICE(0x3b0b, TCO_HM57)},
- { ITCO_PCI_DEVICE(0x3b0d, TCO_PCHMSFF)},
- { ITCO_PCI_DEVICE(0x3b0f, TCO_QS57)},
- { ITCO_PCI_DEVICE(0x3b12, TCO_3400)},
- { ITCO_PCI_DEVICE(0x3b14, TCO_3420)},
- { ITCO_PCI_DEVICE(0x3b16, TCO_3450)},
- { ITCO_PCI_DEVICE(0x5031, TCO_EP80579)},
- { ITCO_PCI_DEVICE(0x1c41, TCO_CPT1)},
- { ITCO_PCI_DEVICE(0x1c42, TCO_CPT2)},
- { ITCO_PCI_DEVICE(0x1c43, TCO_CPT3)},
- { ITCO_PCI_DEVICE(0x1c44, TCO_CPT4)},
- { ITCO_PCI_DEVICE(0x1c45, TCO_CPT5)},
- { ITCO_PCI_DEVICE(0x1c46, TCO_CPT6)},
- { ITCO_PCI_DEVICE(0x1c47, TCO_CPT7)},
- { ITCO_PCI_DEVICE(0x1c48, TCO_CPT8)},
- { ITCO_PCI_DEVICE(0x1c49, TCO_CPT9)},
- { ITCO_PCI_DEVICE(0x1c4a, TCO_CPT10)},
- { ITCO_PCI_DEVICE(0x1c4b, TCO_CPT11)},
- { ITCO_PCI_DEVICE(0x1c4c, TCO_CPT12)},
- { ITCO_PCI_DEVICE(0x1c4d, TCO_CPT13)},
- { ITCO_PCI_DEVICE(0x1c4e, TCO_CPT14)},
- { ITCO_PCI_DEVICE(0x1c4f, TCO_CPT15)},
- { ITCO_PCI_DEVICE(0x1c50, TCO_CPT16)},
- { ITCO_PCI_DEVICE(0x1c51, TCO_CPT17)},
- { ITCO_PCI_DEVICE(0x1c52, TCO_CPT18)},
- { ITCO_PCI_DEVICE(0x1c53, TCO_CPT19)},
- { ITCO_PCI_DEVICE(0x1c54, TCO_CPT20)},
- { ITCO_PCI_DEVICE(0x1c55, TCO_CPT21)},
- { ITCO_PCI_DEVICE(0x1c56, TCO_CPT22)},
- { ITCO_PCI_DEVICE(0x1c57, TCO_CPT23)},
- { ITCO_PCI_DEVICE(0x1c58, TCO_CPT24)},
- { ITCO_PCI_DEVICE(0x1c59, TCO_CPT25)},
- { ITCO_PCI_DEVICE(0x1c5a, TCO_CPT26)},
- { ITCO_PCI_DEVICE(0x1c5b, TCO_CPT27)},
- { ITCO_PCI_DEVICE(0x1c5c, TCO_CPT28)},
- { ITCO_PCI_DEVICE(0x1c5d, TCO_CPT29)},
- { ITCO_PCI_DEVICE(0x1c5e, TCO_CPT30)},
- { ITCO_PCI_DEVICE(0x1c5f, TCO_CPT31)},
- { ITCO_PCI_DEVICE(0x1d40, TCO_PBG1)},
- { ITCO_PCI_DEVICE(0x1d41, TCO_PBG2)},
- { ITCO_PCI_DEVICE(0x2310, TCO_DH89XXCC)},
- { ITCO_PCI_DEVICE(0x1e40, TCO_PPT0)},
- { ITCO_PCI_DEVICE(0x1e41, TCO_PPT1)},
- { ITCO_PCI_DEVICE(0x1e42, TCO_PPT2)},
- { ITCO_PCI_DEVICE(0x1e43, TCO_PPT3)},
- { ITCO_PCI_DEVICE(0x1e44, TCO_PPT4)},
- { ITCO_PCI_DEVICE(0x1e45, TCO_PPT5)},
- { ITCO_PCI_DEVICE(0x1e46, TCO_PPT6)},
- { ITCO_PCI_DEVICE(0x1e47, TCO_PPT7)},
- { ITCO_PCI_DEVICE(0x1e48, TCO_PPT8)},
- { ITCO_PCI_DEVICE(0x1e49, TCO_PPT9)},
- { ITCO_PCI_DEVICE(0x1e4a, TCO_PPT10)},
- { ITCO_PCI_DEVICE(0x1e4b, TCO_PPT11)},
- { ITCO_PCI_DEVICE(0x1e4c, TCO_PPT12)},
- { ITCO_PCI_DEVICE(0x1e4d, TCO_PPT13)},
- { ITCO_PCI_DEVICE(0x1e4e, TCO_PPT14)},
- { ITCO_PCI_DEVICE(0x1e4f, TCO_PPT15)},
- { ITCO_PCI_DEVICE(0x1e50, TCO_PPT16)},
- { ITCO_PCI_DEVICE(0x1e51, TCO_PPT17)},
- { ITCO_PCI_DEVICE(0x1e52, TCO_PPT18)},
- { ITCO_PCI_DEVICE(0x1e53, TCO_PPT19)},
- { ITCO_PCI_DEVICE(0x1e54, TCO_PPT20)},
- { ITCO_PCI_DEVICE(0x1e55, TCO_PPT21)},
- { ITCO_PCI_DEVICE(0x1e56, TCO_PPT22)},
- { ITCO_PCI_DEVICE(0x1e57, TCO_PPT23)},
- { ITCO_PCI_DEVICE(0x1e58, TCO_PPT24)},
- { ITCO_PCI_DEVICE(0x1e59, TCO_PPT25)},
- { ITCO_PCI_DEVICE(0x1e5a, TCO_PPT26)},
- { ITCO_PCI_DEVICE(0x1e5b, TCO_PPT27)},
- { ITCO_PCI_DEVICE(0x1e5c, TCO_PPT28)},
- { ITCO_PCI_DEVICE(0x1e5d, TCO_PPT29)},
- { ITCO_PCI_DEVICE(0x1e5e, TCO_PPT30)},
- { ITCO_PCI_DEVICE(0x1e5f, TCO_PPT31)},
+ { PCI_VDEVICE(INTEL, 0x2410), TCO_ICH},
+ { PCI_VDEVICE(INTEL, 0x2420), TCO_ICH0},
+ { PCI_VDEVICE(INTEL, 0x2440), TCO_ICH2},
+ { PCI_VDEVICE(INTEL, 0x244c), TCO_ICH2M},
+ { PCI_VDEVICE(INTEL, 0x2480), TCO_ICH3},
+ { PCI_VDEVICE(INTEL, 0x248c), TCO_ICH3M},
+ { PCI_VDEVICE(INTEL, 0x24c0), TCO_ICH4},
+ { PCI_VDEVICE(INTEL, 0x24cc), TCO_ICH4M},
+ { PCI_VDEVICE(INTEL, 0x2450), TCO_CICH},
+ { PCI_VDEVICE(INTEL, 0x24d0), TCO_ICH5},
+ { PCI_VDEVICE(INTEL, 0x25a1), TCO_6300ESB},
+ { PCI_VDEVICE(INTEL, 0x2640), TCO_ICH6},
+ { PCI_VDEVICE(INTEL, 0x2641), TCO_ICH6M},
+ { PCI_VDEVICE(INTEL, 0x2642), TCO_ICH6W},
+ { PCI_VDEVICE(INTEL, 0x2670), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2671), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2672), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2673), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2674), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2675), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2676), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2677), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2678), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2679), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267a), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267b), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267c), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267d), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267e), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267f), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x27b8), TCO_ICH7},
+ { PCI_VDEVICE(INTEL, 0x27b0), TCO_ICH7DH},
+ { PCI_VDEVICE(INTEL, 0x27b9), TCO_ICH7M},
+ { PCI_VDEVICE(INTEL, 0x27bd), TCO_ICH7MDH},
+ { PCI_VDEVICE(INTEL, 0x27bc), TCO_NM10},
+ { PCI_VDEVICE(INTEL, 0x2810), TCO_ICH8},
+ { PCI_VDEVICE(INTEL, 0x2812), TCO_ICH8DH},
+ { PCI_VDEVICE(INTEL, 0x2814), TCO_ICH8DO},
+ { PCI_VDEVICE(INTEL, 0x2815), TCO_ICH8M},
+ { PCI_VDEVICE(INTEL, 0x2811), TCO_ICH8ME},
+ { PCI_VDEVICE(INTEL, 0x2918), TCO_ICH9},
+ { PCI_VDEVICE(INTEL, 0x2916), TCO_ICH9R},
+ { PCI_VDEVICE(INTEL, 0x2912), TCO_ICH9DH},
+ { PCI_VDEVICE(INTEL, 0x2914), TCO_ICH9DO},
+ { PCI_VDEVICE(INTEL, 0x2919), TCO_ICH9M},
+ { PCI_VDEVICE(INTEL, 0x2917), TCO_ICH9ME},
+ { PCI_VDEVICE(INTEL, 0x3a18), TCO_ICH10},
+ { PCI_VDEVICE(INTEL, 0x3a16), TCO_ICH10R},
+ { PCI_VDEVICE(INTEL, 0x3a1a), TCO_ICH10D},
+ { PCI_VDEVICE(INTEL, 0x3a14), TCO_ICH10DO},
+ { PCI_VDEVICE(INTEL, 0x3b00), TCO_PCH},
+ { PCI_VDEVICE(INTEL, 0x3b01), TCO_PCHM},
+ { PCI_VDEVICE(INTEL, 0x3b02), TCO_P55},
+ { PCI_VDEVICE(INTEL, 0x3b03), TCO_PM55},
+ { PCI_VDEVICE(INTEL, 0x3b06), TCO_H55},
+ { PCI_VDEVICE(INTEL, 0x3b07), TCO_QM57},
+ { PCI_VDEVICE(INTEL, 0x3b08), TCO_H57},
+ { PCI_VDEVICE(INTEL, 0x3b09), TCO_HM55},
+ { PCI_VDEVICE(INTEL, 0x3b0a), TCO_Q57},
+ { PCI_VDEVICE(INTEL, 0x3b0b), TCO_HM57},
+ { PCI_VDEVICE(INTEL, 0x3b0d), TCO_PCHMSFF},
+ { PCI_VDEVICE(INTEL, 0x3b0f), TCO_QS57},
+ { PCI_VDEVICE(INTEL, 0x3b12), TCO_3400},
+ { PCI_VDEVICE(INTEL, 0x3b14), TCO_3420},
+ { PCI_VDEVICE(INTEL, 0x3b16), TCO_3450},
+ { PCI_VDEVICE(INTEL, 0x5031), TCO_EP80579},
+ { PCI_VDEVICE(INTEL, 0x1c41), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c42), TCO_CPTD},
+ { PCI_VDEVICE(INTEL, 0x1c43), TCO_CPTM},
+ { PCI_VDEVICE(INTEL, 0x1c44), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c45), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c46), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c47), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c48), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c49), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4a), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4b), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4c), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4d), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4e), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4f), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c50), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c51), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c52), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c53), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c54), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c55), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c56), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c57), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c58), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c59), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5a), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5b), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5c), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5d), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5e), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5f), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1d40), TCO_PBG},
+ { PCI_VDEVICE(INTEL, 0x1d41), TCO_PBG},
+ { PCI_VDEVICE(INTEL, 0x2310), TCO_DH89XXCC},
+ { PCI_VDEVICE(INTEL, 0x1e40), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e41), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e42), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e43), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e44), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e45), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e46), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e47), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e48), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e49), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4a), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4b), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4c), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4d), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4e), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4f), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e50), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e51), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e52), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e53), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e54), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e55), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e56), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e57), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e58), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e59), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5a), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5b), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5c), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5d), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5e), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5f), TCO_PPT},
{ 0, }, /* End of list */
};
MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
@@ -1052,15 +923,10 @@ static void iTCO_wdt_shutdown(struct platform_device *dev)
iTCO_wdt_stop();
}
-#define iTCO_wdt_suspend NULL
-#define iTCO_wdt_resume NULL
-
static struct platform_driver iTCO_wdt_driver = {
.probe = iTCO_wdt_probe,
.remove = __devexit_p(iTCO_wdt_remove),
.shutdown = iTCO_wdt_shutdown,
- .suspend = iTCO_wdt_suspend,
- .resume = iTCO_wdt_resume,
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 86f7cac..b8ef2c6 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -329,12 +329,18 @@ static void imx2_wdt_shutdown(struct platform_device *pdev)
}
}
+static const struct of_device_id imx2_wdt_dt_ids[] = {
+ { .compatible = "fsl,imx21-wdt", },
+ { /* sentinel */ }
+};
+
static struct platform_driver imx2_wdt_driver = {
.remove = __exit_p(imx2_wdt_remove),
.shutdown = imx2_wdt_shutdown,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .of_match_table = imx2_wdt_dt_ids,
},
};
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
index ba43860..1abdc04 100644
--- a/drivers/watchdog/intel_scu_watchdog.c
+++ b/drivers/watchdog/intel_scu_watchdog.c
@@ -43,7 +43,7 @@
#include <linux/signal.h>
#include <linux/sfi.h>
#include <asm/irq.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/intel_scu_ipc.h>
#include <asm/apb_timer.h>
#include <asm/mrst.h>
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c
index 6143f52..8d2d850 100644
--- a/drivers/watchdog/it8712f_wdt.c
+++ b/drivers/watchdog/it8712f_wdt.c
@@ -28,10 +28,10 @@
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/fs.h>
-#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/ioport.h>
#define NAME "it8712f_wdt"
@@ -51,7 +51,6 @@ MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
static unsigned long wdt_open;
static unsigned expect_close;
-static spinlock_t io_lock;
static unsigned char revision;
/* Dog Food address - We use the game port address */
@@ -121,20 +120,26 @@ static inline void superio_select(int ldn)
outb(ldn, VAL);
}
-static inline void superio_enter(void)
+static inline int superio_enter(void)
{
- spin_lock(&io_lock);
+ /*
+ * Try to reserve REG and REG + 1 for exclusive access.
+ */
+ if (!request_muxed_region(REG, 2, NAME))
+ return -EBUSY;
+
outb(0x87, REG);
outb(0x01, REG);
outb(0x55, REG);
outb(0x55, REG);
+ return 0;
}
static inline void superio_exit(void)
{
outb(0x02, REG);
outb(0x02, VAL);
- spin_unlock(&io_lock);
+ release_region(REG, 2);
}
static inline void it8712f_wdt_ping(void)
@@ -173,10 +178,13 @@ static int it8712f_wdt_get_status(void)
return 0;
}
-static void it8712f_wdt_enable(void)
+static int it8712f_wdt_enable(void)
{
+ int ret = superio_enter();
+ if (ret)
+ return ret;
+
printk(KERN_DEBUG NAME ": enabling watchdog timer\n");
- superio_enter();
superio_select(LDN_GPIO);
superio_outb(wdt_control_reg, WDT_CONTROL);
@@ -186,13 +194,17 @@ static void it8712f_wdt_enable(void)
superio_exit();
it8712f_wdt_ping();
+
+ return 0;
}
-static void it8712f_wdt_disable(void)
+static int it8712f_wdt_disable(void)
{
- printk(KERN_DEBUG NAME ": disabling watchdog timer\n");
+ int ret = superio_enter();
+ if (ret)
+ return ret;
- superio_enter();
+ printk(KERN_DEBUG NAME ": disabling watchdog timer\n");
superio_select(LDN_GPIO);
superio_outb(0, WDT_CONFIG);
@@ -202,6 +214,7 @@ static void it8712f_wdt_disable(void)
superio_outb(0, WDT_TIMEOUT);
superio_exit();
+ return 0;
}
static int it8712f_wdt_notify(struct notifier_block *this,
@@ -252,6 +265,7 @@ static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd,
WDIOF_MAGICCLOSE,
};
int value;
+ int ret;
switch (cmd) {
case WDIOC_GETSUPPORT:
@@ -259,7 +273,9 @@ static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd,
return -EFAULT;
return 0;
case WDIOC_GETSTATUS:
- superio_enter();
+ ret = superio_enter();
+ if (ret)
+ return ret;
superio_select(LDN_GPIO);
value = it8712f_wdt_get_status();
@@ -280,7 +296,9 @@ static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd,
if (value > (max_units * 60))
return -EINVAL;
margin = value;
- superio_enter();
+ ret = superio_enter();
+ if (ret)
+ return ret;
superio_select(LDN_GPIO);
it8712f_wdt_update_margin();
@@ -299,10 +317,14 @@ static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd,
static int it8712f_wdt_open(struct inode *inode, struct file *file)
{
+ int ret;
/* only allow one at a time */
if (test_and_set_bit(0, &wdt_open))
return -EBUSY;
- it8712f_wdt_enable();
+
+ ret = it8712f_wdt_enable();
+ if (ret)
+ return ret;
return nonseekable_open(inode, file);
}
@@ -313,7 +335,8 @@ static int it8712f_wdt_release(struct inode *inode, struct file *file)
": watchdog device closed unexpectedly, will not"
" disable the watchdog timer\n");
} else if (!nowayout) {
- it8712f_wdt_disable();
+ if (it8712f_wdt_disable())
+ printk(KERN_WARNING NAME "Watchdog disable failed\n");
}
expect_close = 0;
clear_bit(0, &wdt_open);
@@ -340,8 +363,10 @@ static int __init it8712f_wdt_find(unsigned short *address)
{
int err = -ENODEV;
int chip_type;
+ int ret = superio_enter();
+ if (ret)
+ return ret;
- superio_enter();
chip_type = superio_inw(DEVID);
if (chip_type != IT8712F_DEVID)
goto exit;
@@ -382,8 +407,6 @@ static int __init it8712f_wdt_init(void)
{
int err = 0;
- spin_lock_init(&io_lock);
-
if (it8712f_wdt_find(&address))
return -ENODEV;
@@ -392,7 +415,11 @@ static int __init it8712f_wdt_init(void)
return -EBUSY;
}
- it8712f_wdt_disable();
+ err = it8712f_wdt_disable();
+ if (err) {
+ printk(KERN_ERR NAME ": unable to disable watchdog timer.\n");
+ goto out;
+ }
err = register_reboot_notifier(&it8712f_wdt_notifier);
if (err) {
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index b1bc72f..a2d9a12 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -137,7 +137,6 @@
static unsigned int base, gpact, ciract, max_units, chip_type;
static unsigned long wdt_status;
-static DEFINE_SPINLOCK(spinlock);
static int nogameport = DEFAULT_NOGAMEPORT;
static int exclusive = DEFAULT_EXCLUSIVE;
@@ -163,18 +162,26 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started, default="
/* Superio Chip */
-static inline void superio_enter(void)
+static inline int superio_enter(void)
{
+ /*
+ * Try to reserve REG and REG + 1 for exclusive access.
+ */
+ if (!request_muxed_region(REG, 2, WATCHDOG_NAME))
+ return -EBUSY;
+
outb(0x87, REG);
outb(0x01, REG);
outb(0x55, REG);
outb(0x55, REG);
+ return 0;
}
static inline void superio_exit(void)
{
outb(0x02, REG);
outb(0x02, VAL);
+ release_region(REG, 2);
}
static inline void superio_select(int ldn)
@@ -255,12 +262,11 @@ static void wdt_keepalive(void)
set_bit(WDTS_KEEPALIVE, &wdt_status);
}
-static void wdt_start(void)
+static int wdt_start(void)
{
- unsigned long flags;
-
- spin_lock_irqsave(&spinlock, flags);
- superio_enter();
+ int ret = superio_enter();
+ if (ret)
+ return ret;
superio_select(GPIO);
if (test_bit(WDTS_USE_GP, &wdt_status))
@@ -270,15 +276,15 @@ static void wdt_start(void)
wdt_update_timeout();
superio_exit();
- spin_unlock_irqrestore(&spinlock, flags);
+
+ return 0;
}
-static void wdt_stop(void)
+static int wdt_stop(void)
{
- unsigned long flags;
-
- spin_lock_irqsave(&spinlock, flags);
- superio_enter();
+ int ret = superio_enter();
+ if (ret)
+ return ret;
superio_select(GPIO);
superio_outb(0x00, WDTCTRL);
@@ -288,7 +294,7 @@ static void wdt_stop(void)
superio_outb(0x00, WDTVALMSB);
superio_exit();
- spin_unlock_irqrestore(&spinlock, flags);
+ return 0;
}
/**
@@ -303,8 +309,6 @@ static void wdt_stop(void)
static int wdt_set_timeout(int t)
{
- unsigned long flags;
-
if (t < 1 || t > max_units * 60)
return -EINVAL;
@@ -313,14 +317,15 @@ static int wdt_set_timeout(int t)
else
timeout = t;
- spin_lock_irqsave(&spinlock, flags);
if (test_bit(WDTS_TIMER_RUN, &wdt_status)) {
- superio_enter();
+ int ret = superio_enter();
+ if (ret)
+ return ret;
+
superio_select(GPIO);
wdt_update_timeout();
superio_exit();
}
- spin_unlock_irqrestore(&spinlock, flags);
return 0;
}
@@ -339,12 +344,12 @@ static int wdt_set_timeout(int t)
static int wdt_get_status(int *status)
{
- unsigned long flags;
-
*status = 0;
if (testmode) {
- spin_lock_irqsave(&spinlock, flags);
- superio_enter();
+ int ret = superio_enter();
+ if (ret)
+ return ret;
+
superio_select(GPIO);
if (superio_inb(WDTCTRL) & WDT_ZERO) {
superio_outb(0x00, WDTCTRL);
@@ -353,7 +358,6 @@ static int wdt_get_status(int *status)
}
superio_exit();
- spin_unlock_irqrestore(&spinlock, flags);
}
if (test_and_clear_bit(WDTS_KEEPALIVE, &wdt_status))
*status |= WDIOF_KEEPALIVEPING;
@@ -379,9 +383,17 @@ static int wdt_open(struct inode *inode, struct file *file)
if (exclusive && test_and_set_bit(WDTS_DEV_OPEN, &wdt_status))
return -EBUSY;
if (!test_and_set_bit(WDTS_TIMER_RUN, &wdt_status)) {
+ int ret;
if (nowayout && !test_and_set_bit(WDTS_LOCKED, &wdt_status))
__module_get(THIS_MODULE);
- wdt_start();
+
+ ret = wdt_start();
+ if (ret) {
+ clear_bit(WDTS_LOCKED, &wdt_status);
+ clear_bit(WDTS_TIMER_RUN, &wdt_status);
+ clear_bit(WDTS_DEV_OPEN, &wdt_status);
+ return ret;
+ }
}
return nonseekable_open(inode, file);
}
@@ -403,7 +415,16 @@ static int wdt_release(struct inode *inode, struct file *file)
{
if (test_bit(WDTS_TIMER_RUN, &wdt_status)) {
if (test_and_clear_bit(WDTS_EXPECTED, &wdt_status)) {
- wdt_stop();
+ int ret = wdt_stop();
+ if (ret) {
+ /*
+ * Stop failed. Just keep the watchdog alive
+ * and hope nothing bad happens.
+ */
+ set_bit(WDTS_EXPECTED, &wdt_status);
+ wdt_keepalive();
+ return ret;
+ }
clear_bit(WDTS_TIMER_RUN, &wdt_status);
} else {
wdt_keepalive();
@@ -484,7 +505,9 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
&ident, sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
- wdt_get_status(&status);
+ rc = wdt_get_status(&status);
+ if (rc)
+ return rc;
return put_user(status, uarg.i);
case WDIOC_GETBOOTSTATUS:
@@ -500,14 +523,22 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (new_options) {
case WDIOS_DISABLECARD:
- if (test_bit(WDTS_TIMER_RUN, &wdt_status))
- wdt_stop();
+ if (test_bit(WDTS_TIMER_RUN, &wdt_status)) {
+ rc = wdt_stop();
+ if (rc)
+ return rc;
+ }
clear_bit(WDTS_TIMER_RUN, &wdt_status);
return 0;
case WDIOS_ENABLECARD:
- if (!test_and_set_bit(WDTS_TIMER_RUN, &wdt_status))
- wdt_start();
+ if (!test_and_set_bit(WDTS_TIMER_RUN, &wdt_status)) {
+ rc = wdt_start();
+ if (rc) {
+ clear_bit(WDTS_TIMER_RUN, &wdt_status);
+ return rc;
+ }
+ }
return 0;
default:
@@ -560,16 +591,17 @@ static int __init it87_wdt_init(void)
int rc = 0;
int try_gameport = !nogameport;
u8 chip_rev;
- unsigned long flags;
+ int gp_rreq_fail = 0;
wdt_status = 0;
- spin_lock_irqsave(&spinlock, flags);
- superio_enter();
+ rc = superio_enter();
+ if (rc)
+ return rc;
+
chip_type = superio_inw(CHIPID);
chip_rev = superio_inb(CHIPREV) & 0x0f;
superio_exit();
- spin_unlock_irqrestore(&spinlock, flags);
switch (chip_type) {
case IT8702_ID:
@@ -603,8 +635,9 @@ static int __init it87_wdt_init(void)
return -ENODEV;
}
- spin_lock_irqsave(&spinlock, flags);
- superio_enter();
+ rc = superio_enter();
+ if (rc)
+ return rc;
superio_select(GPIO);
superio_outb(WDT_TOV1, WDTCFG);
@@ -620,21 +653,16 @@ static int __init it87_wdt_init(void)
}
gpact = superio_inb(ACTREG);
superio_outb(0x01, ACTREG);
- superio_exit();
- spin_unlock_irqrestore(&spinlock, flags);
if (request_region(base, 1, WATCHDOG_NAME))
set_bit(WDTS_USE_GP, &wdt_status);
else
- rc = -EIO;
- } else {
- superio_exit();
- spin_unlock_irqrestore(&spinlock, flags);
+ gp_rreq_fail = 1;
}
/* If we haven't Gameport support, try to get CIR support */
if (!test_bit(WDTS_USE_GP, &wdt_status)) {
if (!request_region(CIR_BASE, 8, WATCHDOG_NAME)) {
- if (rc == -EIO)
+ if (gp_rreq_fail)
printk(KERN_ERR PFX
"I/O Address 0x%04x and 0x%04x"
" already in use\n", base, CIR_BASE);
@@ -646,21 +674,16 @@ static int __init it87_wdt_init(void)
goto err_out;
}
base = CIR_BASE;
- spin_lock_irqsave(&spinlock, flags);
- superio_enter();
superio_select(CIR);
superio_outw(base, BASEREG);
superio_outb(0x00, CIR_ILS);
ciract = superio_inb(ACTREG);
superio_outb(0x01, ACTREG);
- if (rc == -EIO) {
+ if (gp_rreq_fail) {
superio_select(GAMEPORT);
superio_outb(gpact, ACTREG);
}
-
- superio_exit();
- spin_unlock_irqrestore(&spinlock, flags);
}
if (timeout < 1 || timeout > max_units * 60) {
@@ -704,6 +727,7 @@ static int __init it87_wdt_init(void)
"nogameport=%d)\n", chip_type, chip_rev, timeout,
nowayout, testmode, exclusive, nogameport);
+ superio_exit();
return 0;
err_out_reboot:
@@ -711,49 +735,37 @@ err_out_reboot:
err_out_region:
release_region(base, test_bit(WDTS_USE_GP, &wdt_status) ? 1 : 8);
if (!test_bit(WDTS_USE_GP, &wdt_status)) {
- spin_lock_irqsave(&spinlock, flags);
- superio_enter();
superio_select(CIR);
superio_outb(ciract, ACTREG);
- superio_exit();
- spin_unlock_irqrestore(&spinlock, flags);
}
err_out:
if (try_gameport) {
- spin_lock_irqsave(&spinlock, flags);
- superio_enter();
superio_select(GAMEPORT);
superio_outb(gpact, ACTREG);
- superio_exit();
- spin_unlock_irqrestore(&spinlock, flags);
}
+ superio_exit();
return rc;
}
static void __exit it87_wdt_exit(void)
{
- unsigned long flags;
- int nolock;
-
- nolock = !spin_trylock_irqsave(&spinlock, flags);
- superio_enter();
- superio_select(GPIO);
- superio_outb(0x00, WDTCTRL);
- superio_outb(0x00, WDTCFG);
- superio_outb(0x00, WDTVALLSB);
- if (max_units > 255)
- superio_outb(0x00, WDTVALMSB);
- if (test_bit(WDTS_USE_GP, &wdt_status)) {
- superio_select(GAMEPORT);
- superio_outb(gpact, ACTREG);
- } else {
- superio_select(CIR);
- superio_outb(ciract, ACTREG);
+ if (superio_enter() == 0) {
+ superio_select(GPIO);
+ superio_outb(0x00, WDTCTRL);
+ superio_outb(0x00, WDTCFG);
+ superio_outb(0x00, WDTVALLSB);
+ if (max_units > 255)
+ superio_outb(0x00, WDTVALMSB);
+ if (test_bit(WDTS_USE_GP, &wdt_status)) {
+ superio_select(GAMEPORT);
+ superio_outb(gpact, ACTREG);
+ } else {
+ superio_select(CIR);
+ superio_outb(ciract, ACTREG);
+ }
+ superio_exit();
}
- superio_exit();
- if (!nolock)
- spin_unlock_irqrestore(&spinlock, flags);
misc_deregister(&wdt_miscdev);
unregister_reboot_notifier(&wdt_notifier);
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index 2b4af22..4dc3102 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -407,12 +407,35 @@ static int __devexit mpcore_wdt_remove(struct platform_device *dev)
return 0;
}
+#ifdef CONFIG_PM
+static int mpcore_wdt_suspend(struct platform_device *dev, pm_message_t msg)
+{
+ struct mpcore_wdt *wdt = platform_get_drvdata(dev);
+ mpcore_wdt_stop(wdt); /* Turn the WDT off */
+ return 0;
+}
+
+static int mpcore_wdt_resume(struct platform_device *dev)
+{
+ struct mpcore_wdt *wdt = platform_get_drvdata(dev);
+ /* re-activate timer */
+ if (test_bit(0, &wdt->timer_alive))
+ mpcore_wdt_start(wdt);
+ return 0;
+}
+#else
+#define mpcore_wdt_suspend NULL
+#define mpcore_wdt_resume NULL
+#endif
+
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:mpcore_wdt");
static struct platform_driver mpcore_wdt_driver = {
.probe = mpcore_wdt_probe,
.remove = __devexit_p(mpcore_wdt_remove),
+ .suspend = mpcore_wdt_suspend,
+ .resume = mpcore_wdt_resume,
.shutdown = mpcore_wdt_shutdown,
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index 0430e09..ac37bb8 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -225,11 +225,11 @@ static int __devinit mtx1_wdt_probe(struct platform_device *pdev)
ret = misc_register(&mtx1_wdt_misc);
if (ret < 0) {
- printk(KERN_ERR " mtx-1_wdt : failed to register\n");
+ dev_err(&pdev->dev, "failed to register\n");
return ret;
}
mtx1_wdt_start();
- printk(KERN_INFO "MTX-1 Watchdog driver\n");
+ dev_info(&pdev->dev, "MTX-1 Watchdog driver\n");
return 0;
}
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index afa78a5..809f41c 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -458,7 +458,15 @@ static int __devexit nv_tco_remove(struct platform_device *dev)
static void nv_tco_shutdown(struct platform_device *dev)
{
+ u32 val;
+
tco_timer_stop();
+
+ /* Some BIOSes fail the POST (once) if the NO_REBOOT flag is not
+ * unset during shutdown. */
+ pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
+ val &= ~MCP51_SMBUS_SETUP_B_TCO_REBOOT;
+ pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val);
}
static struct platform_driver nv_tco_driver = {
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
new file mode 100644
index 0000000..4ec741a
--- /dev/null
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -0,0 +1,433 @@
+/*
+* of_xilinx_wdt.c 1.01 A Watchdog Device Driver for Xilinx xps_timebase_wdt
+*
+* (C) Copyright 2011 (Alejandro Cabrera <aldaya@gmail.com>)
+*
+* -----------------------
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version
+* 2 of the License, or (at your option) any later version.
+*
+* -----------------------
+* 30-May-2011 Alejandro Cabrera <aldaya@gmail.com>
+* - If "xlnx,wdt-enable-once" wasn't found on device tree the
+* module will use CONFIG_WATCHDOG_NOWAYOUT
+* - If the device tree parameters ("clock-frequency" and
+* "xlnx,wdt-interval") wasn't found the driver won't
+* know the wdt reset interval
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/watchdog.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+
+/* Register offsets for the Wdt device */
+#define XWT_TWCSR0_OFFSET 0x0 /* Control/Status Register0 */
+#define XWT_TWCSR1_OFFSET 0x4 /* Control/Status Register1 */
+#define XWT_TBR_OFFSET 0x8 /* Timebase Register Offset */
+
+/* Control/Status Register Masks */
+#define XWT_CSR0_WRS_MASK 0x00000008 /* Reset status */
+#define XWT_CSR0_WDS_MASK 0x00000004 /* Timer state */
+#define XWT_CSR0_EWDT1_MASK 0x00000002 /* Enable bit 1 */
+
+/* Control/Status Register 0/1 bits */
+#define XWT_CSRX_EWDT2_MASK 0x00000001 /* Enable bit 2 */
+
+/* SelfTest constants */
+#define XWT_MAX_SELFTEST_LOOP_COUNT 0x00010000
+#define XWT_TIMER_FAILED 0xFFFFFFFF
+
+#define WATCHDOG_NAME "Xilinx Watchdog"
+#define PFX WATCHDOG_NAME ": "
+
+struct xwdt_device {
+ struct resource res;
+ void __iomem *base;
+ u32 nowayout;
+ u32 wdt_interval;
+ u32 boot_status;
+};
+
+static struct xwdt_device xdev;
+
+static u32 timeout;
+static u32 control_status_reg;
+static u8 expect_close;
+static u8 no_timeout;
+static unsigned long driver_open;
+
+static DEFINE_SPINLOCK(spinlock);
+
+static void xwdt_start(void)
+{
+ spin_lock(&spinlock);
+
+ /* Clean previous status and enable the watchdog timer */
+ control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET);
+ control_status_reg |= (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK);
+
+ iowrite32((control_status_reg | XWT_CSR0_EWDT1_MASK),
+ xdev.base + XWT_TWCSR0_OFFSET);
+
+ iowrite32(XWT_CSRX_EWDT2_MASK, xdev.base + XWT_TWCSR1_OFFSET);
+
+ spin_unlock(&spinlock);
+}
+
+static void xwdt_stop(void)
+{
+ spin_lock(&spinlock);
+
+ control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET);
+
+ iowrite32((control_status_reg & ~XWT_CSR0_EWDT1_MASK),
+ xdev.base + XWT_TWCSR0_OFFSET);
+
+ iowrite32(0, xdev.base + XWT_TWCSR1_OFFSET);
+
+ spin_unlock(&spinlock);
+ printk(KERN_INFO PFX "Stopped!\n");
+}
+
+static void xwdt_keepalive(void)
+{
+ spin_lock(&spinlock);
+
+ control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET);
+ control_status_reg |= (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK);
+ iowrite32(control_status_reg, xdev.base + XWT_TWCSR0_OFFSET);
+
+ spin_unlock(&spinlock);
+}
+
+static void xwdt_get_status(int *status)
+{
+ int new_status;
+
+ spin_lock(&spinlock);
+
+ control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET);
+ new_status = ((control_status_reg &
+ (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK)) != 0);
+ spin_unlock(&spinlock);
+
+ *status = 0;
+ if (new_status & 1)
+ *status |= WDIOF_CARDRESET;
+}
+
+static u32 xwdt_selftest(void)
+{
+ int i;
+ u32 timer_value1;
+ u32 timer_value2;
+
+ spin_lock(&spinlock);
+
+ timer_value1 = ioread32(xdev.base + XWT_TBR_OFFSET);
+ timer_value2 = ioread32(xdev.base + XWT_TBR_OFFSET);
+
+ for (i = 0;
+ ((i <= XWT_MAX_SELFTEST_LOOP_COUNT) &&
+ (timer_value2 == timer_value1)); i++) {
+ timer_value2 = ioread32(xdev.base + XWT_TBR_OFFSET);
+ }
+
+ spin_unlock(&spinlock);
+
+ if (timer_value2 != timer_value1)
+ return ~XWT_TIMER_FAILED;
+ else
+ return XWT_TIMER_FAILED;
+}
+
+static int xwdt_open(struct inode *inode, struct file *file)
+{
+ /* Only one process can handle the wdt at a time */
+ if (test_and_set_bit(0, &driver_open))
+ return -EBUSY;
+
+ /* Make sure that the module are always loaded...*/
+ if (xdev.nowayout)
+ __module_get(THIS_MODULE);
+
+ xwdt_start();
+ printk(KERN_INFO PFX "Started...\n");
+
+ return nonseekable_open(inode, file);
+}
+
+static int xwdt_release(struct inode *inode, struct file *file)
+{
+ if (expect_close == 42) {
+ xwdt_stop();
+ } else {
+ printk(KERN_CRIT PFX
+ "Unexpected close, not stopping watchdog!\n");
+ xwdt_keepalive();
+ }
+
+ clear_bit(0, &driver_open);
+ expect_close = 0;
+ return 0;
+}
+
+/*
+ * xwdt_write:
+ * @file: file handle to the watchdog
+ * @buf: buffer to write (unused as data does not matter here
+ * @count: count of bytes
+ * @ppos: pointer to the position to write. No seeks allowed
+ *
+ * A write to a watchdog device is defined as a keepalive signal. Any
+ * write of data will do, as we don't define content meaning.
+ */
+static ssize_t xwdt_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ if (len) {
+ if (!xdev.nowayout) {
+ size_t i;
+
+ /* In case it was set long ago */
+ expect_close = 0;
+
+ for (i = 0; i != len; i++) {
+ char c;
+
+ if (get_user(c, buf + i))
+ return -EFAULT;
+ if (c == 'V')
+ expect_close = 42;
+ }
+ }
+ xwdt_keepalive();
+ }
+ return len;
+}
+
+static const struct watchdog_info ident = {
+ .options = WDIOF_MAGICCLOSE |
+ WDIOF_KEEPALIVEPING,
+ .firmware_version = 1,
+ .identity = WATCHDOG_NAME,
+};
+
+/*
+ * xwdt_ioctl:
+ * @file: file handle to the device
+ * @cmd: watchdog command
+ * @arg: argument pointer
+ *
+ * The watchdog API defines a common set of functions for all watchdogs
+ * according to their available features.
+ */
+static long xwdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int status;
+
+ union {
+ struct watchdog_info __user *ident;
+ int __user *i;
+ } uarg;
+
+ uarg.i = (int __user *)arg;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(uarg.ident, &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(xdev.boot_status, uarg.i);
+
+ case WDIOC_GETSTATUS:
+ xwdt_get_status(&status);
+ return put_user(status, uarg.i);
+
+ case WDIOC_KEEPALIVE:
+ xwdt_keepalive();
+ return 0;
+
+ case WDIOC_GETTIMEOUT:
+ if (no_timeout)
+ return -ENOTTY;
+ else
+ return put_user(timeout, uarg.i);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct file_operations xwdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = xwdt_write,
+ .open = xwdt_open,
+ .release = xwdt_release,
+ .unlocked_ioctl = xwdt_ioctl,
+};
+
+static struct miscdevice xwdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &xwdt_fops,
+};
+
+static int __devinit xwdt_probe(struct platform_device *pdev)
+{
+ int rc;
+ u32 *tmptr;
+ u32 *pfreq;
+
+ no_timeout = 0;
+
+ pfreq = (u32 *)of_get_property(pdev->dev.of_node->parent,
+ "clock-frequency", NULL);
+
+ if (pfreq == NULL) {
+ printk(KERN_WARNING PFX
+ "The watchdog clock frequency cannot be obtained!\n");
+ no_timeout = 1;
+ }
+
+ rc = of_address_to_resource(pdev->dev.of_node, 0, &xdev.res);
+ if (rc) {
+ printk(KERN_WARNING PFX "invalid address!\n");
+ return rc;
+ }
+
+ tmptr = (u32 *)of_get_property(pdev->dev.of_node,
+ "xlnx,wdt-interval", NULL);
+ if (tmptr == NULL) {
+ printk(KERN_WARNING PFX "Parameter \"xlnx,wdt-interval\""
+ " not found in device tree!\n");
+ no_timeout = 1;
+ } else {
+ xdev.wdt_interval = *tmptr;
+ }
+
+ tmptr = (u32 *)of_get_property(pdev->dev.of_node,
+ "xlnx,wdt-enable-once", NULL);
+ if (tmptr == NULL) {
+ printk(KERN_WARNING PFX "Parameter \"xlnx,wdt-enable-once\""
+ " not found in device tree!\n");
+ xdev.nowayout = WATCHDOG_NOWAYOUT;
+ }
+
+/*
+ * Twice of the 2^wdt_interval / freq because the first wdt overflow is
+ * ignored (interrupt), reset is only generated at second wdt overflow
+ */
+ if (!no_timeout)
+ timeout = 2 * ((1<<xdev.wdt_interval) / *pfreq);
+
+ if (!request_mem_region(xdev.res.start,
+ xdev.res.end - xdev.res.start + 1, WATCHDOG_NAME)) {
+ rc = -ENXIO;
+ printk(KERN_ERR PFX "memory request failure!\n");
+ goto err_out;
+ }
+
+ xdev.base = ioremap(xdev.res.start, xdev.res.end - xdev.res.start + 1);
+ if (xdev.base == NULL) {
+ rc = -ENOMEM;
+ printk(KERN_ERR PFX "ioremap failure!\n");
+ goto release_mem;
+ }
+
+ rc = xwdt_selftest();
+ if (rc == XWT_TIMER_FAILED) {
+ printk(KERN_ERR PFX "SelfTest routine error!\n");
+ goto unmap_io;
+ }
+
+ xwdt_get_status(&xdev.boot_status);
+
+ rc = misc_register(&xwdt_miscdev);
+ if (rc) {
+ printk(KERN_ERR PFX
+ "cannot register miscdev on minor=%d (err=%d)\n",
+ xwdt_miscdev.minor, rc);
+ goto unmap_io;
+ }
+
+ if (no_timeout)
+ printk(KERN_INFO PFX
+ "driver loaded (timeout=? sec, nowayout=%d)\n",
+ xdev.nowayout);
+ else
+ printk(KERN_INFO PFX
+ "driver loaded (timeout=%d sec, nowayout=%d)\n",
+ timeout, xdev.nowayout);
+
+ expect_close = 0;
+ clear_bit(0, &driver_open);
+
+ return 0;
+
+unmap_io:
+ iounmap(xdev.base);
+release_mem:
+ release_mem_region(xdev.res.start, resource_size(&xdev.res));
+err_out:
+ return rc;
+}
+
+static int __devexit xwdt_remove(struct platform_device *dev)
+{
+ misc_deregister(&xwdt_miscdev);
+ iounmap(xdev.base);
+ release_mem_region(xdev.res.start, resource_size(&xdev.res));
+
+ return 0;
+}
+
+/* Match table for of_platform binding */
+static struct of_device_id __devinitdata xwdt_of_match[] = {
+ { .compatible = "xlnx,xps-timebase-wdt-1.01.a", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xwdt_of_match);
+
+static struct platform_driver xwdt_driver = {
+ .probe = xwdt_probe,
+ .remove = __devexit_p(xwdt_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = WATCHDOG_NAME,
+ .of_match_table = xwdt_of_match,
+ },
+};
+
+static int __init xwdt_init(void)
+{
+ return platform_driver_register(&xwdt_driver);
+}
+
+static void __exit xwdt_exit(void)
+{
+ platform_driver_unregister(&xwdt_driver);
+}
+
+module_init(xwdt_init);
+module_exit(xwdt_exit);
+
+MODULE_AUTHOR("Alejandro Cabrera <aldaya@gmail.com>");
+MODULE_DESCRIPTION("Xilinx Watchdog driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index b7c1390..e78d899 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -56,6 +56,7 @@
#define IO_DEFAULT 0x2E /* Address used on Portwell Boards */
static int io = IO_DEFAULT;
+static int swc_base_addr = -1;
static int timeout = DEFAULT_TIMEOUT; /* timeout value */
static unsigned long timer_enabled; /* is the timer enabled? */
@@ -116,9 +117,8 @@ static inline void pc87413_enable_swc(void)
/* Read SWC I/O base address */
-static inline unsigned int pc87413_get_swc_base(void)
+static void pc87413_get_swc_base_addr(void)
{
- unsigned int swc_base_addr = 0;
unsigned char addr_l, addr_h = 0;
/* Step 3: Read SWC I/O Base Address */
@@ -136,12 +136,11 @@ static inline unsigned int pc87413_get_swc_base(void)
"Read SWC I/O Base Address: low %d, high %d, res %d\n",
addr_l, addr_h, swc_base_addr);
#endif
- return swc_base_addr;
}
/* Select Bank 3 of SWC */
-static inline void pc87413_swc_bank3(unsigned int swc_base_addr)
+static inline void pc87413_swc_bank3(void)
{
/* Step 4: Select Bank3 of SWC */
outb_p(inb(swc_base_addr + 0x0f) | 0x03, swc_base_addr + 0x0f);
@@ -152,8 +151,7 @@ static inline void pc87413_swc_bank3(unsigned int swc_base_addr)
/* Set watchdog timeout to x minutes */
-static inline void pc87413_programm_wdto(unsigned int swc_base_addr,
- char pc87413_time)
+static inline void pc87413_programm_wdto(char pc87413_time)
{
/* Step 5: Programm WDTO, Twd. */
outb_p(pc87413_time, swc_base_addr + WDTO);
@@ -164,7 +162,7 @@ static inline void pc87413_programm_wdto(unsigned int swc_base_addr,
/* Enable WDEN */
-static inline void pc87413_enable_wden(unsigned int swc_base_addr)
+static inline void pc87413_enable_wden(void)
{
/* Step 6: Enable WDEN */
outb_p(inb(swc_base_addr + WDCTL) | 0x01, swc_base_addr + WDCTL);
@@ -174,7 +172,7 @@ static inline void pc87413_enable_wden(unsigned int swc_base_addr)
}
/* Enable SW_WD_TREN */
-static inline void pc87413_enable_sw_wd_tren(unsigned int swc_base_addr)
+static inline void pc87413_enable_sw_wd_tren(void)
{
/* Enable SW_WD_TREN */
outb_p(inb(swc_base_addr + WDCFG) | 0x80, swc_base_addr + WDCFG);
@@ -185,7 +183,7 @@ static inline void pc87413_enable_sw_wd_tren(unsigned int swc_base_addr)
/* Disable SW_WD_TREN */
-static inline void pc87413_disable_sw_wd_tren(unsigned int swc_base_addr)
+static inline void pc87413_disable_sw_wd_tren(void)
{
/* Disable SW_WD_TREN */
outb_p(inb(swc_base_addr + WDCFG) & 0x7f, swc_base_addr + WDCFG);
@@ -196,7 +194,7 @@ static inline void pc87413_disable_sw_wd_tren(unsigned int swc_base_addr)
/* Enable SW_WD_TRG */
-static inline void pc87413_enable_sw_wd_trg(unsigned int swc_base_addr)
+static inline void pc87413_enable_sw_wd_trg(void)
{
/* Enable SW_WD_TRG */
outb_p(inb(swc_base_addr + WDCTL) | 0x80, swc_base_addr + WDCTL);
@@ -207,7 +205,7 @@ static inline void pc87413_enable_sw_wd_trg(unsigned int swc_base_addr)
/* Disable SW_WD_TRG */
-static inline void pc87413_disable_sw_wd_trg(unsigned int swc_base_addr)
+static inline void pc87413_disable_sw_wd_trg(void)
{
/* Disable SW_WD_TRG */
outb_p(inb(swc_base_addr + WDCTL) & 0x7f, swc_base_addr + WDCTL);
@@ -222,18 +220,13 @@ static inline void pc87413_disable_sw_wd_trg(unsigned int swc_base_addr)
static void pc87413_enable(void)
{
- unsigned int swc_base_addr;
-
spin_lock(&io_lock);
- pc87413_select_wdt_out();
- pc87413_enable_swc();
- swc_base_addr = pc87413_get_swc_base();
- pc87413_swc_bank3(swc_base_addr);
- pc87413_programm_wdto(swc_base_addr, timeout);
- pc87413_enable_wden(swc_base_addr);
- pc87413_enable_sw_wd_tren(swc_base_addr);
- pc87413_enable_sw_wd_trg(swc_base_addr);
+ pc87413_swc_bank3();
+ pc87413_programm_wdto(timeout);
+ pc87413_enable_wden();
+ pc87413_enable_sw_wd_tren();
+ pc87413_enable_sw_wd_trg();
spin_unlock(&io_lock);
}
@@ -242,17 +235,12 @@ static void pc87413_enable(void)
static void pc87413_disable(void)
{
- unsigned int swc_base_addr;
-
spin_lock(&io_lock);
- pc87413_select_wdt_out();
- pc87413_enable_swc();
- swc_base_addr = pc87413_get_swc_base();
- pc87413_swc_bank3(swc_base_addr);
- pc87413_disable_sw_wd_tren(swc_base_addr);
- pc87413_disable_sw_wd_trg(swc_base_addr);
- pc87413_programm_wdto(swc_base_addr, 0);
+ pc87413_swc_bank3();
+ pc87413_disable_sw_wd_tren();
+ pc87413_disable_sw_wd_trg();
+ pc87413_programm_wdto(0);
spin_unlock(&io_lock);
}
@@ -261,20 +249,15 @@ static void pc87413_disable(void)
static void pc87413_refresh(void)
{
- unsigned int swc_base_addr;
-
spin_lock(&io_lock);
- pc87413_select_wdt_out();
- pc87413_enable_swc();
- swc_base_addr = pc87413_get_swc_base();
- pc87413_swc_bank3(swc_base_addr);
- pc87413_disable_sw_wd_tren(swc_base_addr);
- pc87413_disable_sw_wd_trg(swc_base_addr);
- pc87413_programm_wdto(swc_base_addr, timeout);
- pc87413_enable_wden(swc_base_addr);
- pc87413_enable_sw_wd_tren(swc_base_addr);
- pc87413_enable_sw_wd_trg(swc_base_addr);
+ pc87413_swc_bank3();
+ pc87413_disable_sw_wd_tren();
+ pc87413_disable_sw_wd_trg();
+ pc87413_programm_wdto(timeout);
+ pc87413_enable_wden();
+ pc87413_enable_sw_wd_tren();
+ pc87413_enable_sw_wd_trg();
spin_unlock(&io_lock);
}
@@ -528,7 +511,8 @@ static int __init pc87413_init(void)
printk(KERN_INFO PFX "Version " VERSION " at io 0x%X\n",
WDT_INDEX_IO_PORT);
- /* request_region(io, 2, "pc87413"); */
+ if (!request_muxed_region(io, 2, MODNAME))
+ return -EBUSY;
ret = register_reboot_notifier(&pc87413_notifier);
if (ret != 0) {
@@ -541,12 +525,32 @@ static int __init pc87413_init(void)
printk(KERN_ERR PFX
"cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, ret);
- unregister_reboot_notifier(&pc87413_notifier);
- return ret;
+ goto reboot_unreg;
}
printk(KERN_INFO PFX "initialized. timeout=%d min \n", timeout);
+
+ pc87413_select_wdt_out();
+ pc87413_enable_swc();
+ pc87413_get_swc_base_addr();
+
+ if (!request_region(swc_base_addr, 0x20, MODNAME)) {
+ printk(KERN_ERR PFX
+ "cannot request SWC region at 0x%x\n", swc_base_addr);
+ ret = -EBUSY;
+ goto misc_unreg;
+ }
+
pc87413_enable();
+
+ release_region(io, 2);
return 0;
+
+misc_unreg:
+ misc_deregister(&pc87413_miscdev);
+reboot_unreg:
+ unregister_reboot_notifier(&pc87413_notifier);
+ release_region(io, 2);
+ return ret;
}
/**
@@ -569,7 +573,7 @@ static void __exit pc87413_exit(void)
misc_deregister(&pc87413_miscdev);
unregister_reboot_notifier(&pc87413_notifier);
- /* release_region(io, 2); */
+ release_region(swc_base_addr, 0x20);
printk(KERN_INFO MODNAME " watchdog component driver removed.\n");
}
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index f7f5aa0..30da88f 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -589,6 +589,15 @@ static int s3c2410wdt_resume(struct platform_device *dev)
#define s3c2410wdt_resume NULL
#endif /* CONFIG_PM */
+#ifdef CONFIG_OF
+static const struct of_device_id s3c2410_wdt_match[] = {
+ { .compatible = "samsung,s3c2410-wdt" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, s3c2410_wdt_match);
+#else
+#define s3c2410_wdt_match NULL
+#endif
static struct platform_driver s3c2410wdt_driver = {
.probe = s3c2410wdt_probe,
@@ -599,6 +608,7 @@ static struct platform_driver s3c2410wdt_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "s3c2410-wdt",
+ .of_match_table = s3c2410_wdt_match,
},
};
diff --git a/drivers/watchdog/sbc7240_wdt.c b/drivers/watchdog/sbc7240_wdt.c
index ff11504..93ac589 100644
--- a/drivers/watchdog/sbc7240_wdt.c
+++ b/drivers/watchdog/sbc7240_wdt.c
@@ -29,7 +29,7 @@
#include <linux/watchdog.h>
#include <linux/io.h>
#include <linux/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/system.h>
#define SBC7240_PREFIX "sbc7240_wdt: "
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index c7cf4b0..029467e 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -472,15 +472,10 @@ static void sch311x_wdt_shutdown(struct platform_device *dev)
sch311x_wdt_stop();
}
-#define sch311x_wdt_suspend NULL
-#define sch311x_wdt_resume NULL
-
static struct platform_driver sch311x_wdt_driver = {
.probe = sch311x_wdt_probe,
.remove = __devexit_p(sch311x_wdt_remove),
.shutdown = sch311x_wdt_shutdown,
- .suspend = sch311x_wdt_suspend,
- .resume = sch311x_wdt_resume,
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index db84f23..a267dc0 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -64,7 +64,7 @@
* misses its deadline, the kernel timer will allow the WDT to overflow.
*/
static int clock_division_ratio = WTCSR_CKS_4096;
-#define next_ping_period(cks) msecs_to_jiffies(cks - 4)
+#define next_ping_period(cks) (jiffies + msecs_to_jiffies(cks - 4))
static const struct watchdog_info sh_wdt_info;
static struct platform_device *sh_wdt_dev;
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 0d80e08..cc2cfbe 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -134,6 +134,8 @@ static void wdt_enable(void)
writel(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL);
writel(LOCK, wdt->base + WDTLOCK);
+ /* Flush posted writes. */
+ readl(wdt->base + WDTLOCK);
spin_unlock(&wdt->lock);
}
@@ -144,9 +146,10 @@ static void wdt_disable(void)
writel(UNLOCK, wdt->base + WDTLOCK);
writel(0, wdt->base + WDTCONTROL);
- writel(0, wdt->base + WDTLOAD);
writel(LOCK, wdt->base + WDTLOCK);
+ /* Flush posted writes. */
+ readl(wdt->base + WDTLOCK);
spin_unlock(&wdt->lock);
}
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
new file mode 100644
index 0000000..cfa1a15
--- /dev/null
+++ b/drivers/watchdog/watchdog_core.c
@@ -0,0 +1,111 @@
+/*
+ * watchdog_core.c
+ *
+ * (c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
+ * All Rights Reserved.
+ *
+ * (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
+ *
+ * This source code is part of the generic code that can be used
+ * by all the watchdog timer drivers.
+ *
+ * Based on source code of the following authors:
+ * Matt Domsch <Matt_Domsch@dell.com>,
+ * Rob Radez <rob@osinvestor.com>,
+ * Rusty Lynch <rusty@linux.co.intel.com>
+ * Satyam Sharma <satyam@infradead.org>
+ * Randy Dunlap <randy.dunlap@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
+ * admit liability nor provide warranty for any of this software.
+ * This material is provided "AS-IS" and at no charge.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h> /* For EXPORT_SYMBOL/module stuff/... */
+#include <linux/types.h> /* For standard types */
+#include <linux/errno.h> /* For the -ENODEV/... values */
+#include <linux/kernel.h> /* For printk/panic/... */
+#include <linux/watchdog.h> /* For watchdog specific items */
+#include <linux/init.h> /* For __init/__exit/... */
+
+#include "watchdog_dev.h" /* For watchdog_dev_register/... */
+
+/**
+ * watchdog_register_device() - register a watchdog device
+ * @wdd: watchdog device
+ *
+ * Register a watchdog device with the kernel so that the
+ * watchdog timer can be accessed from userspace.
+ *
+ * A zero is returned on success and a negative errno code for
+ * failure.
+ */
+int watchdog_register_device(struct watchdog_device *wdd)
+{
+ int ret;
+
+ if (wdd == NULL || wdd->info == NULL || wdd->ops == NULL)
+ return -EINVAL;
+
+ /* Mandatory operations need to be supported */
+ if (wdd->ops->start == NULL || wdd->ops->stop == NULL)
+ return -EINVAL;
+
+ /*
+ * Check that we have valid min and max timeout values, if
+ * not reset them both to 0 (=not used or unknown)
+ */
+ if (wdd->min_timeout > wdd->max_timeout) {
+ pr_info("Invalid min and max timeout values, resetting to 0!\n");
+ wdd->min_timeout = 0;
+ wdd->max_timeout = 0;
+ }
+
+ /*
+ * Note: now that all watchdog_device data has been verified, we
+ * will not check this anymore in other functions. If data gets
+ * corrupted in a later stage then we expect a kernel panic!
+ */
+
+ /* We only support 1 watchdog device via the /dev/watchdog interface */
+ ret = watchdog_dev_register(wdd);
+ if (ret) {
+ pr_err("error registering /dev/watchdog (err=%d).\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(watchdog_register_device);
+
+/**
+ * watchdog_unregister_device() - unregister a watchdog device
+ * @wdd: watchdog device to unregister
+ *
+ * Unregister a watchdog device that was previously successfully
+ * registered with watchdog_register_device().
+ */
+void watchdog_unregister_device(struct watchdog_device *wdd)
+{
+ int ret;
+
+ if (wdd == NULL)
+ return;
+
+ ret = watchdog_dev_unregister(wdd);
+ if (ret)
+ pr_err("error unregistering /dev/watchdog (err=%d).\n", ret);
+}
+EXPORT_SYMBOL_GPL(watchdog_unregister_device);
+
+MODULE_AUTHOR("Alan Cox <alan@lxorguk.ukuu.org.uk>");
+MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
+MODULE_DESCRIPTION("WatchDog Timer Driver Core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
new file mode 100644
index 0000000..d33520d
--- /dev/null
+++ b/drivers/watchdog/watchdog_dev.c
@@ -0,0 +1,395 @@
+/*
+ * watchdog_dev.c
+ *
+ * (c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
+ * All Rights Reserved.
+ *
+ * (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
+ *
+ *
+ * This source code is part of the generic code that can be used
+ * by all the watchdog timer drivers.
+ *
+ * This part of the generic code takes care of the following
+ * misc device: /dev/watchdog.
+ *
+ * Based on source code of the following authors:
+ * Matt Domsch <Matt_Domsch@dell.com>,
+ * Rob Radez <rob@osinvestor.com>,
+ * Rusty Lynch <rusty@linux.co.intel.com>
+ * Satyam Sharma <satyam@infradead.org>
+ * Randy Dunlap <randy.dunlap@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
+ * admit liability nor provide warranty for any of this software.
+ * This material is provided "AS-IS" and at no charge.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h> /* For module stuff/... */
+#include <linux/types.h> /* For standard types (like size_t) */
+#include <linux/errno.h> /* For the -ENODEV/... values */
+#include <linux/kernel.h> /* For printk/panic/... */
+#include <linux/fs.h> /* For file operations */
+#include <linux/watchdog.h> /* For watchdog specific items */
+#include <linux/miscdevice.h> /* For handling misc devices */
+#include <linux/init.h> /* For __init/__exit/... */
+#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
+
+/* make sure we only register one /dev/watchdog device */
+static unsigned long watchdog_dev_busy;
+/* the watchdog device behind /dev/watchdog */
+static struct watchdog_device *wdd;
+
+/*
+ * watchdog_ping: ping the watchdog.
+ * @wddev: the watchdog device to ping
+ *
+ * If the watchdog has no own ping operation then it needs to be
+ * restarted via the start operation. This wrapper function does
+ * exactly that.
+ * We only ping when the watchdog device is running.
+ */
+
+static int watchdog_ping(struct watchdog_device *wddev)
+{
+ if (test_bit(WDOG_ACTIVE, &wdd->status)) {
+ if (wddev->ops->ping)
+ return wddev->ops->ping(wddev); /* ping the watchdog */
+ else
+ return wddev->ops->start(wddev); /* restart watchdog */
+ }
+ return 0;
+}
+
+/*
+ * watchdog_start: wrapper to start the watchdog.
+ * @wddev: the watchdog device to start
+ *
+ * Start the watchdog if it is not active and mark it active.
+ * This function returns zero on success or a negative errno code for
+ * failure.
+ */
+
+static int watchdog_start(struct watchdog_device *wddev)
+{
+ int err;
+
+ if (!test_bit(WDOG_ACTIVE, &wdd->status)) {
+ err = wddev->ops->start(wddev);
+ if (err < 0)
+ return err;
+
+ set_bit(WDOG_ACTIVE, &wdd->status);
+ }
+ return 0;
+}
+
+/*
+ * watchdog_stop: wrapper to stop the watchdog.
+ * @wddev: the watchdog device to stop
+ *
+ * Stop the watchdog if it is still active and unmark it active.
+ * This function returns zero on success or a negative errno code for
+ * failure.
+ * If the 'nowayout' feature was set, the watchdog cannot be stopped.
+ */
+
+static int watchdog_stop(struct watchdog_device *wddev)
+{
+ int err = -EBUSY;
+
+ if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) {
+ pr_info("%s: nowayout prevents watchdog to be stopped!\n",
+ wdd->info->identity);
+ return err;
+ }
+
+ if (test_bit(WDOG_ACTIVE, &wdd->status)) {
+ err = wddev->ops->stop(wddev);
+ if (err < 0)
+ return err;
+
+ clear_bit(WDOG_ACTIVE, &wdd->status);
+ }
+ return 0;
+}
+
+/*
+ * watchdog_write: writes to the watchdog.
+ * @file: file from VFS
+ * @data: user address of data
+ * @len: length of data
+ * @ppos: pointer to the file offset
+ *
+ * A write to a watchdog device is defined as a keepalive ping.
+ * Writing the magic 'V' sequence allows the next close to turn
+ * off the watchdog (if 'nowayout' is not set).
+ */
+
+static ssize_t watchdog_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ size_t i;
+ char c;
+
+ if (len == 0)
+ return 0;
+
+ /*
+ * Note: just in case someone wrote the magic character
+ * five months ago...
+ */
+ clear_bit(WDOG_ALLOW_RELEASE, &wdd->status);
+
+ /* scan to see whether or not we got the magic character */
+ for (i = 0; i != len; i++) {
+ if (get_user(c, data + i))
+ return -EFAULT;
+ if (c == 'V')
+ set_bit(WDOG_ALLOW_RELEASE, &wdd->status);
+ }
+
+ /* someone wrote to us, so we send the watchdog a keepalive ping */
+ watchdog_ping(wdd);
+
+ return len;
+}
+
+/*
+ * watchdog_ioctl: handle the different ioctl's for the watchdog device.
+ * @file: file handle to the device
+ * @cmd: watchdog command
+ * @arg: argument pointer
+ *
+ * The watchdog API defines a common set of functions for all watchdogs
+ * according to their available features.
+ */
+
+static long watchdog_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ unsigned int val;
+ int err;
+
+ if (wdd->ops->ioctl) {
+ err = wdd->ops->ioctl(wdd, cmd, arg);
+ if (err != -ENOIOCTLCMD)
+ return err;
+ }
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, wdd->info,
+ sizeof(struct watchdog_info)) ? -EFAULT : 0;
+ case WDIOC_GETSTATUS:
+ val = wdd->ops->status ? wdd->ops->status(wdd) : 0;
+ return put_user(val, p);
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(wdd->bootstatus, p);
+ case WDIOC_SETOPTIONS:
+ if (get_user(val, p))
+ return -EFAULT;
+ if (val & WDIOS_DISABLECARD) {
+ err = watchdog_stop(wdd);
+ if (err < 0)
+ return err;
+ }
+ if (val & WDIOS_ENABLECARD) {
+ err = watchdog_start(wdd);
+ if (err < 0)
+ return err;
+ }
+ return 0;
+ case WDIOC_KEEPALIVE:
+ if (!(wdd->info->options & WDIOF_KEEPALIVEPING))
+ return -EOPNOTSUPP;
+ watchdog_ping(wdd);
+ return 0;
+ case WDIOC_SETTIMEOUT:
+ if ((wdd->ops->set_timeout == NULL) ||
+ !(wdd->info->options & WDIOF_SETTIMEOUT))
+ return -EOPNOTSUPP;
+ if (get_user(val, p))
+ return -EFAULT;
+ if ((wdd->max_timeout != 0) &&
+ (val < wdd->min_timeout || val > wdd->max_timeout))
+ return -EINVAL;
+ err = wdd->ops->set_timeout(wdd, val);
+ if (err < 0)
+ return err;
+ wdd->timeout = val;
+ /* If the watchdog is active then we send a keepalive ping
+ * to make sure that the watchdog keep's running (and if
+ * possible that it takes the new timeout) */
+ watchdog_ping(wdd);
+ /* Fall */
+ case WDIOC_GETTIMEOUT:
+ /* timeout == 0 means that we don't know the timeout */
+ if (wdd->timeout == 0)
+ return -EOPNOTSUPP;
+ return put_user(wdd->timeout, p);
+ default:
+ return -ENOTTY;
+ }
+}
+
+/*
+ * watchdog_open: open the /dev/watchdog device.
+ * @inode: inode of device
+ * @file: file handle to device
+ *
+ * When the /dev/watchdog device gets opened, we start the watchdog.
+ * Watch out: the /dev/watchdog device is single open, so we make sure
+ * it can only be opened once.
+ */
+
+static int watchdog_open(struct inode *inode, struct file *file)
+{
+ int err = -EBUSY;
+
+ /* the watchdog is single open! */
+ if (test_and_set_bit(WDOG_DEV_OPEN, &wdd->status))
+ return -EBUSY;
+
+ /*
+ * If the /dev/watchdog device is open, we don't want the module
+ * to be unloaded.
+ */
+ if (!try_module_get(wdd->ops->owner))
+ goto out;
+
+ err = watchdog_start(wdd);
+ if (err < 0)
+ goto out_mod;
+
+ /* dev/watchdog is a virtual (and thus non-seekable) filesystem */
+ return nonseekable_open(inode, file);
+
+out_mod:
+ module_put(wdd->ops->owner);
+out:
+ clear_bit(WDOG_DEV_OPEN, &wdd->status);
+ return err;
+}
+
+/*
+ * watchdog_release: release the /dev/watchdog device.
+ * @inode: inode of device
+ * @file: file handle to device
+ *
+ * This is the code for when /dev/watchdog gets closed. We will only
+ * stop the watchdog when we have received the magic char (and nowayout
+ * was not set), else the watchdog will keep running.
+ */
+
+static int watchdog_release(struct inode *inode, struct file *file)
+{
+ int err = -EBUSY;
+
+ /*
+ * We only stop the watchdog if we received the magic character
+ * or if WDIOF_MAGICCLOSE is not set. If nowayout was set then
+ * watchdog_stop will fail.
+ */
+ if (test_and_clear_bit(WDOG_ALLOW_RELEASE, &wdd->status) ||
+ !(wdd->info->options & WDIOF_MAGICCLOSE))
+ err = watchdog_stop(wdd);
+
+ /* If the watchdog was not stopped, send a keepalive ping */
+ if (err < 0) {
+ pr_crit("%s: watchdog did not stop!\n", wdd->info->identity);
+ watchdog_ping(wdd);
+ }
+
+ /* Allow the owner module to be unloaded again */
+ module_put(wdd->ops->owner);
+
+ /* make sure that /dev/watchdog can be re-opened */
+ clear_bit(WDOG_DEV_OPEN, &wdd->status);
+
+ return 0;
+}
+
+static const struct file_operations watchdog_fops = {
+ .owner = THIS_MODULE,
+ .write = watchdog_write,
+ .unlocked_ioctl = watchdog_ioctl,
+ .open = watchdog_open,
+ .release = watchdog_release,
+};
+
+static struct miscdevice watchdog_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &watchdog_fops,
+};
+
+/*
+ * watchdog_dev_register:
+ * @watchdog: watchdog device
+ *
+ * Register a watchdog device as /dev/watchdog. /dev/watchdog
+ * is actually a miscdevice and thus we set it up like that.
+ */
+
+int watchdog_dev_register(struct watchdog_device *watchdog)
+{
+ int err;
+
+ /* Only one device can register for /dev/watchdog */
+ if (test_and_set_bit(0, &watchdog_dev_busy)) {
+ pr_err("only one watchdog can use /dev/watchdog.\n");
+ return -EBUSY;
+ }
+
+ wdd = watchdog;
+
+ err = misc_register(&watchdog_miscdev);
+ if (err != 0) {
+ pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n",
+ watchdog->info->identity, WATCHDOG_MINOR, err);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ wdd = NULL;
+ clear_bit(0, &watchdog_dev_busy);
+ return err;
+}
+
+/*
+ * watchdog_dev_unregister:
+ * @watchdog: watchdog device
+ *
+ * Deregister the /dev/watchdog device.
+ */
+
+int watchdog_dev_unregister(struct watchdog_device *watchdog)
+{
+ /* Check that a watchdog device was registered in the past */
+ if (!test_bit(0, &watchdog_dev_busy) || !wdd)
+ return -ENODEV;
+
+ /* We can only unregister the watchdog device that was registered */
+ if (watchdog != wdd) {
+ pr_err("%s: watchdog was not registered as /dev/watchdog.\n",
+ watchdog->info->identity);
+ return -ENODEV;
+ }
+
+ misc_deregister(&watchdog_miscdev);
+ wdd = NULL;
+ clear_bit(0, &watchdog_dev_busy);
+ return 0;
+}
diff --git a/drivers/watchdog/watchdog_dev.h b/drivers/watchdog/watchdog_dev.h
new file mode 100644
index 0000000..bc7612b
--- /dev/null
+++ b/drivers/watchdog/watchdog_dev.h
@@ -0,0 +1,33 @@
+/*
+ * watchdog_core.h
+ *
+ * (c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
+ * All Rights Reserved.
+ *
+ * (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
+ *
+ * This source code is part of the generic code that can be used
+ * by all the watchdog timer drivers.
+ *
+ * Based on source code of the following authors:
+ * Matt Domsch <Matt_Domsch@dell.com>,
+ * Rob Radez <rob@osinvestor.com>,
+ * Rusty Lynch <rusty@linux.co.intel.com>
+ * Satyam Sharma <satyam@infradead.org>
+ * Randy Dunlap <randy.dunlap@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
+ * admit liability nor provide warranty for any of this software.
+ * This material is provided "AS-IS" and at no charge.
+ */
+
+/*
+ * Functions/procedures to be called by the core
+ */
+int watchdog_dev_register(struct watchdog_device *);
+int watchdog_dev_unregister(struct watchdog_device *);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index f815283..5f7ff8e 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -11,7 +11,7 @@ config XEN_BALLOON
config XEN_SELFBALLOONING
bool "Dynamically self-balloon kernel memory to target"
- depends on XEN && XEN_BALLOON && CLEANCACHE && SWAP
+ depends on XEN && XEN_BALLOON && CLEANCACHE && SWAP && XEN_TMEM
default n
help
Self-ballooning dynamically balloons available kernel memory driven
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index fd725cd..4f44b34 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -82,7 +82,7 @@ static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
static int get_free_entries(unsigned count)
{
unsigned long flags;
- int ref, rc;
+ int ref, rc = 0;
grant_ref_t head;
spin_lock_irqsave(&gnttab_list_lock, flags);
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 206c4ce0..978d2c6 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -11,7 +11,6 @@
#include <xen/xenbus.h>
#include <xen/events.h>
#include <asm/xen/pci.h>
-#include <linux/workqueue.h>
#include "pciback.h"
#define DRV_NAME "xen-pciback"
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 010937b..6ea852e 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -70,10 +70,11 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/mman.h>
-
+#include <linux/module.h>
+#include <linux/workqueue.h>
#include <xen/balloon.h>
-
#include <xen/tmem.h>
+#include <xen/xen.h>
/* Enable/disable with sysfs. */
static int xen_selfballooning_enabled __read_mostly;