aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorArtem B. Bityuckiy <dedekind@infradead.org>2005-07-06 15:43:18 +0100
committerThomas Gleixner <tglx@mtd.linutronix.de>2005-07-06 19:40:38 +0200
commitb3539219c9ea20ebf6a5ea3cc534f423a3607c41 (patch)
treed17c31c0eac0a7290ba5011b59a100fd9e9c9532 /drivers
parent6430a8def12edebc1c9c7c2621d33ca0e8653c33 (diff)
parenta18bcb7450840f07a772a45229de4811d930f461 (diff)
downloadmrst-s0i3-test-b3539219c9ea20ebf6a5ea3cc534f423a3607c41.tar.gz
mrst-s0i3-test-b3539219c9ea20ebf6a5ea3cc534f423a3607c41.tar.xz
mrst-s0i3-test-b3539219c9ea20ebf6a5ea3cc534f423a3607c41.zip
Merge with rsync://fileserver/linux
Update to 2.6.12-rc3
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/container.c2
-rw-r--r--drivers/acpi/pci_bind.c27
-rw-r--r--drivers/acpi/pci_irq.c2
-rw-r--r--drivers/acpi/pci_root.c24
-rw-r--r--drivers/acpi/processor_core.c2
-rw-r--r--drivers/acpi/scan.c126
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/bus.c117
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/dd.c2
-rw-r--r--drivers/base/driver.c35
-rw-r--r--drivers/base/firmware_class.c13
-rw-r--r--drivers/block/as-iosched.c5
-rw-r--r--drivers/block/cciss.c18
-rw-r--r--drivers/block/cfq-iosched.c2081
-rw-r--r--drivers/block/deadline-iosched.c3
-rw-r--r--drivers/block/elevator.c9
-rw-r--r--drivers/block/ll_rw_blk.c206
-rw-r--r--drivers/block/swim3.c10
-rw-r--r--drivers/block/sx8.c7
-rw-r--r--drivers/bluetooth/bluecard_cs.c9
-rw-r--r--drivers/bluetooth/bt3c_cs.c7
-rw-r--r--drivers/bluetooth/btuart_cs.c7
-rw-r--r--drivers/bluetooth/dtl1_cs.c8
-rw-r--r--drivers/char/agp/amd64-agp.c9
-rw-r--r--drivers/char/drm/Kconfig2
-rw-r--r--drivers/char/drm/Makefile5
-rw-r--r--drivers/char/drm/drmP.h5
-rw-r--r--drivers/char/drm/drm_bufs.c25
-rw-r--r--drivers/char/drm/drm_context.c6
-rw-r--r--drivers/char/drm/drm_ioc32.c1069
-rw-r--r--drivers/char/drm/i915_dma.c24
-rw-r--r--drivers/char/drm/i915_drm.h27
-rw-r--r--drivers/char/drm/i915_drv.c25
-rw-r--r--drivers/char/drm/i915_drv.h24
-rw-r--r--drivers/char/drm/i915_irq.c24
-rw-r--r--drivers/char/drm/i915_mem.c24
-rw-r--r--drivers/char/drm/radeon_drv.c3
-rw-r--r--drivers/char/drm/radeon_drv.h3
-rw-r--r--drivers/char/drm/radeon_ioc32.c395
-rw-r--r--drivers/char/drm/radeon_irq.c27
-rw-r--r--drivers/char/hw_random.c2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4
-rw-r--r--drivers/char/misc.c3
-rw-r--r--drivers/char/moxa.c2
-rw-r--r--drivers/char/pcmcia/synclink_cs.c9
-rw-r--r--drivers/char/rio/rio_linux.c4
-rw-r--r--drivers/char/rtc.c16
-rw-r--r--drivers/char/tipar.c2
-rw-r--r--drivers/char/tpm/tpm.c2
-rw-r--r--drivers/char/tty_ioctl.c4
-rw-r--r--drivers/char/vt_ioctl.c5
-rw-r--r--drivers/char/watchdog/i8xx_tco.c2
-rw-r--r--drivers/char/watchdog/ixp2000_wdt.c2
-rw-r--r--drivers/char/watchdog/ixp4xx_wdt.c2
-rw-r--r--drivers/firmware/pcdp.c24
-rw-r--r--drivers/firmware/pcdp.h33
-rw-r--r--drivers/i2c/chips/atxp1.c2
-rw-r--r--drivers/ide/Kconfig6
-rw-r--r--drivers/ide/Makefile1
-rw-r--r--drivers/ide/ide-disk.c4
-rw-r--r--drivers/ide/ide-dma.c1
-rw-r--r--drivers/ide/ide-iops.c3
-rw-r--r--drivers/ide/ide-lib.c13
-rw-r--r--drivers/ide/legacy/hd.c4
-rw-r--r--drivers/ide/legacy/ide-cs.c35
-rw-r--r--drivers/ide/pci/Makefile1
-rw-r--r--drivers/ide/pci/alim15x3.c10
-rw-r--r--drivers/ide/pci/amd74xx.c7
-rw-r--r--drivers/ide/pci/cs5530.c4
-rw-r--r--drivers/ide/pci/cy82c693.c8
-rw-r--r--drivers/ide/pci/generic.c73
-rw-r--r--drivers/ide/pci/hpt366.c470
-rw-r--r--drivers/ide/pci/it8172.c4
-rw-r--r--drivers/ide/pci/it821x.c812
-rw-r--r--drivers/ide/pci/ns87415.c2
-rw-r--r--drivers/ide/pci/opti621.c2
-rw-r--r--drivers/ide/pci/sc1200.c2
-rw-r--r--drivers/ide/pci/serverworks.c10
-rw-r--r--drivers/ide/pci/sl82c105.c6
-rw-r--r--drivers/ide/pci/slc90e66.c2
-rw-r--r--drivers/ide/pci/triflex.c2
-rw-r--r--drivers/ide/pci/via82cxxx.c4
-rw-r--r--drivers/ide/ppc/pmac.c8
-rw-r--r--drivers/ide/setup-pci.c2
-rw-r--r--drivers/ieee1394/ohci1394.c10
-rw-r--r--drivers/infiniband/core/packer.c4
-rw-r--r--drivers/infiniband/core/sa_query.c18
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c531
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.h48
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c101
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_doorbell.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c58
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c32
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mcg.c63
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c367
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h14
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c139
-rw-r--r--drivers/input/evdev.c407
-rw-r--r--drivers/input/gameport/Kconfig14
-rw-r--r--drivers/input/gameport/Makefile2
-rw-r--r--drivers/input/gameport/cs461x.c322
-rw-r--r--drivers/input/gameport/gameport.c34
-rw-r--r--drivers/input/gameport/ns558.c12
-rw-r--r--drivers/input/gameport/vortex.c186
-rw-r--r--drivers/input/input.c37
-rw-r--r--drivers/input/joydev.c116
-rw-r--r--drivers/input/joystick/a3d.c2
-rw-r--r--drivers/input/joystick/adi.c4
-rw-r--r--drivers/input/joystick/amijoy.c29
-rw-r--r--drivers/input/joystick/analog.c4
-rw-r--r--drivers/input/joystick/db9.c17
-rw-r--r--drivers/input/joystick/gamecon.c29
-rw-r--r--drivers/input/joystick/gf2k.c2
-rw-r--r--drivers/input/joystick/grip_mp.c2
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c1
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c1
-rw-r--r--drivers/input/joystick/spaceball.c4
-rw-r--r--drivers/input/joystick/spaceorb.c2
-rw-r--r--drivers/input/joystick/tmdc.c2
-rw-r--r--drivers/input/joystick/turbografx.c35
-rw-r--r--drivers/input/keyboard/atkbd.c6
-rw-r--r--drivers/input/keyboard/corgikbd.c6
-rw-r--r--drivers/input/keyboard/lkkbd.c8
-rw-r--r--drivers/input/keyboard/locomokbd.c28
-rw-r--r--drivers/input/keyboard/maple_keyb.c22
-rw-r--r--drivers/input/misc/uinput.c6
-rw-r--r--drivers/input/mouse/Makefile2
-rw-r--r--drivers/input/mouse/alps.c52
-rw-r--r--drivers/input/mouse/amimouse.c8
-rw-r--r--drivers/input/mouse/inport.c38
-rw-r--r--drivers/input/mouse/lifebook.c134
-rw-r--r--drivers/input/mouse/lifebook.h17
-rw-r--r--drivers/input/mouse/logibm.c17
-rw-r--r--drivers/input/mouse/maplemouse.c75
-rw-r--r--drivers/input/mouse/pc110pad.c21
-rw-r--r--drivers/input/mouse/psmouse-base.c351
-rw-r--r--drivers/input/mouse/psmouse.h4
-rw-r--r--drivers/input/mouse/rpcmouse.c2
-rw-r--r--drivers/input/mouse/vsxxxaa.c4
-rw-r--r--drivers/input/mousedev.c8
-rw-r--r--drivers/input/serio/libps2.c136
-rw-r--r--drivers/input/serio/serio.c89
-rw-r--r--drivers/input/touchscreen/elo.c2
-rw-r--r--drivers/input/touchscreen/h3600_ts_input.c180
-rw-r--r--drivers/input/touchscreen/mk712.c21
-rw-r--r--drivers/isdn/hardware/avm/avm_cs.c9
-rw-r--r--drivers/isdn/hardware/eicon/dadapter.c2
-rw-r--r--drivers/isdn/hisax/avma1_cs.c8
-rw-r--r--drivers/isdn/hisax/elsa_cs.c8
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c8
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c13
-rw-r--r--drivers/isdn/hisax/teles_cs.c7
-rw-r--r--drivers/isdn/hysdn/hycapi.c20
-rw-r--r--drivers/isdn/hysdn/hysdn_boot.c4
-rw-r--r--drivers/isdn/hysdn/hysdn_defs.h12
-rw-r--r--drivers/isdn/hysdn/hysdn_init.c2
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c4
-rw-r--r--drivers/macintosh/Kconfig42
-rw-r--r--drivers/macintosh/Makefile3
-rw-r--r--drivers/macintosh/adb.c10
-rw-r--r--drivers/macintosh/macserial.c3036
-rw-r--r--drivers/macintosh/macserial.h461
-rw-r--r--drivers/macintosh/via-pmu.c78
-rw-r--r--drivers/md/md.c1
-rw-r--r--drivers/media/video/Kconfig2
-rw-r--r--drivers/media/video/Makefile3
-rw-r--r--drivers/media/video/bttv-driver.c73
-rw-r--r--drivers/media/video/bttvp.h5
-rw-r--r--drivers/media/video/mt20xx.c6
-rw-r--r--drivers/media/video/tda8290.c20
-rw-r--r--drivers/media/video/tda9887.c7
-rw-r--r--drivers/media/video/tea5767.c334
-rw-r--r--drivers/media/video/tuner-core.c163
-rw-r--r--drivers/media/video/tuner-simple.c34
-rw-r--r--drivers/message/fusion/mptfc.c4
-rw-r--r--drivers/message/fusion/mptscsih.c10
-rw-r--r--drivers/message/fusion/mptscsih.h2
-rw-r--r--drivers/message/fusion/mptspi.c4
-rw-r--r--drivers/mmc/mmci.c9
-rw-r--r--drivers/mmc/wbsd.c80
-rw-r--r--drivers/mmc/wbsd.h9
-rw-r--r--drivers/mtd/afs.c16
-rw-r--r--drivers/mtd/maps/Kconfig10
-rw-r--r--drivers/mtd/maps/pcmciamtd.c29
-rw-r--r--drivers/net/3c503.c16
-rw-r--r--drivers/net/3c505.c6
-rw-r--r--drivers/net/3c509.c1
-rw-r--r--drivers/net/3c515.c16
-rw-r--r--drivers/net/3c523.c19
-rw-r--r--drivers/net/3c59x.c6
-rw-r--r--drivers/net/8139cp.c25
-rw-r--r--drivers/net/82596.c14
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/ac3200.c19
-rw-r--r--drivers/net/acenic.c5
-rwxr-xr-xdrivers/net/amd8111e.c8
-rw-r--r--drivers/net/arm/etherh.c16
-rw-r--r--drivers/net/at1700.c4
-rw-r--r--drivers/net/b44.c3
-rw-r--r--drivers/net/bonding/bond_3ad.c3
-rw-r--r--drivers/net/bonding/bond_main.c390
-rw-r--r--drivers/net/bonding/bonding.h12
-rw-r--r--drivers/net/cs89x0.c59
-rw-r--r--drivers/net/cs89x0.h2
-rw-r--r--drivers/net/defxx.c88
-rw-r--r--drivers/net/dl2k.c8
-rw-r--r--drivers/net/dm9000.c4
-rw-r--r--drivers/net/e100.c37
-rw-r--r--drivers/net/e1000/e1000.h4
-rw-r--r--drivers/net/e1000/e1000_ethtool.c131
-rw-r--r--drivers/net/e1000/e1000_hw.c23
-rw-r--r--drivers/net/e1000/e1000_hw.h1
-rw-r--r--drivers/net/e1000/e1000_main.c111
-rw-r--r--drivers/net/e2100.c15
-rw-r--r--drivers/net/eepro.c21
-rw-r--r--drivers/net/eepro100.c8
-rw-r--r--drivers/net/eexpress.c12
-rw-r--r--drivers/net/epic100.c6
-rw-r--r--drivers/net/es3210.c16
-rw-r--r--drivers/net/eth16i.c20
-rw-r--r--drivers/net/ewrk3.c12
-rw-r--r--drivers/net/fealnx.c11
-rw-r--r--drivers/net/forcedeth.c53
-rw-r--r--drivers/net/gianfar.c652
-rw-r--r--drivers/net/gianfar.h363
-rw-r--r--drivers/net/gianfar_ethtool.c277
-rw-r--r--drivers/net/gianfar_phy.c2
-rw-r--r--drivers/net/hamachi.c12
-rw-r--r--drivers/net/hp-plus.c15
-rw-r--r--drivers/net/hp.c17
-rw-r--r--drivers/net/hp100.c44
-rw-r--r--drivers/net/isa-skeleton.c20
-rw-r--r--drivers/net/ixgb/ixgb_main.c5
-rw-r--r--drivers/net/lance.c17
-rw-r--r--drivers/net/lasi_82596.c8
-rw-r--r--drivers/net/lne390.c19
-rw-r--r--drivers/net/myri_code.h1283
-rw-r--r--drivers/net/natsemi.c4
-rw-r--r--drivers/net/ne-h8300.c19
-rw-r--r--drivers/net/ne.c18
-rw-r--r--drivers/net/ne2.c19
-rw-r--r--drivers/net/ns83820.c5
-rw-r--r--drivers/net/pcmcia/3c574_cs.c8
-rw-r--r--drivers/net/pcmcia/3c589_cs.c12
-rw-r--r--drivers/net/pcmcia/axnet_cs.c29
-rw-r--r--drivers/net/pcmcia/com20020_cs.c7
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c30
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c11
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c8
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c213
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c320
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c28
-rw-r--r--drivers/net/pcnet32.c8
-rw-r--r--drivers/net/r8169.c4
-rw-r--r--drivers/net/s2io.c15
-rw-r--r--drivers/net/sb1000.c14
-rw-r--r--drivers/net/sb1250-mac.c4
-rw-r--r--drivers/net/shaper.c42
-rw-r--r--drivers/net/sis900.c11
-rw-r--r--drivers/net/sk98lin/skge.c5
-rw-r--r--drivers/net/skfp/Makefile4
-rw-r--r--drivers/net/skfp/drvfbi.c222
-rw-r--r--drivers/net/skfp/ess.c4
-rw-r--r--drivers/net/skfp/fplustm.c70
-rw-r--r--drivers/net/skfp/h/cmtdef.h7
-rw-r--r--drivers/net/skfp/h/hwmtm.h25
-rw-r--r--drivers/net/skfp/h/osdef1st.h2
-rw-r--r--drivers/net/skfp/hwmtm.c34
-rw-r--r--drivers/net/skfp/lnkstat.c204
-rw-r--r--drivers/net/skfp/pcmplc.c7
-rw-r--r--drivers/net/skfp/pmf.c11
-rw-r--r--drivers/net/skfp/skfddi.c1
-rw-r--r--drivers/net/skfp/smt.c48
-rw-r--r--drivers/net/skfp/smtdef.c5
-rw-r--r--drivers/net/skfp/smtparse.c467
-rw-r--r--drivers/net/skge.c1710
-rw-r--r--drivers/net/skge.h587
-rw-r--r--drivers/net/slip.c7
-rw-r--r--drivers/net/smc-mca.c60
-rw-r--r--drivers/net/smc-mca.h61
-rw-r--r--drivers/net/smc-ultra.c15
-rw-r--r--drivers/net/smc91x.c45
-rw-r--r--drivers/net/smc91x.h13
-rw-r--r--drivers/net/starfire.c6
-rw-r--r--drivers/net/sundance.c6
-rw-r--r--drivers/net/sungem.c5
-rw-r--r--drivers/net/tg3.c69
-rw-r--r--drivers/net/tg3.h10
-rw-r--r--drivers/net/tlan.c3
-rw-r--r--drivers/net/tokenring/3c359.c3
-rw-r--r--drivers/net/tokenring/3c359_microcode.h2
-rw-r--r--drivers/net/tokenring/abyss.c11
-rw-r--r--drivers/net/tokenring/ibmtr.c28
-rw-r--r--drivers/net/tokenring/lanstreamer.c9
-rw-r--r--drivers/net/tokenring/madgemc.c14
-rw-r--r--drivers/net/tokenring/proteon.c11
-rw-r--r--drivers/net/tokenring/skisa.c11
-rw-r--r--drivers/net/tokenring/smctr.c2
-rw-r--r--drivers/net/tokenring/smctr_firmware.h2
-rw-r--r--drivers/net/tokenring/tms380tr.c13
-rw-r--r--drivers/net/tokenring/tmspci.c11
-rw-r--r--drivers/net/tulip/de2104x.c6
-rw-r--r--drivers/net/tulip/dmfe.c23
-rw-r--r--drivers/net/tulip/eeprom.c16
-rw-r--r--drivers/net/tulip/interrupt.c10
-rw-r--r--drivers/net/tulip/media.c3
-rw-r--r--drivers/net/tulip/tulip_core.c50
-rw-r--r--drivers/net/tulip/winbond-840.c9
-rw-r--r--drivers/net/tulip/xircom_tulip_cb.c4
-rw-r--r--drivers/net/typhoon.c4
-rw-r--r--drivers/net/via-rhine.c26
-rw-r--r--drivers/net/via-velocity.c6
-rw-r--r--drivers/net/wan/farsync.c1
-rw-r--r--drivers/net/wan/hdlc_cisco.c2
-rw-r--r--drivers/net/wan/wanxl.c5
-rw-r--r--drivers/net/wd.c18
-rw-r--r--drivers/net/wireless/airo.c12
-rw-r--r--drivers/net/wireless/airo_cs.c10
-rw-r--r--drivers/net/wireless/arlan-main.c26
-rw-r--r--drivers/net/wireless/atmel_cs.c22
-rw-r--r--drivers/net/wireless/netwave_cs.c7
-rw-r--r--drivers/net/wireless/orinoco.c2465
-rw-r--r--drivers/net/wireless/orinoco.h30
-rw-r--r--drivers/net/wireless/orinoco_cs.c51
-rw-r--r--drivers/net/wireless/prism54/isl_38xx.c6
-rw-r--r--drivers/net/wireless/ray_cs.c7
-rw-r--r--drivers/net/wireless/wavelan_cs.c10
-rw-r--r--drivers/net/wireless/wl3501_cs.c7
-rw-r--r--drivers/net/yellowfin.c8
-rw-r--r--drivers/parisc/dino.c1
-rw-r--r--drivers/parisc/lba_pci.c2
-rw-r--r--drivers/parport/parport_cs.c9
-rw-r--r--drivers/parport/parport_pc.c2
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/bus.c11
-rw-r--r--drivers/pci/hotplug.c2
-rw-r--r--drivers/pci/hotplug/Makefile4
-rw-r--r--drivers/pci/hotplug/acpiphp.h47
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c9
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c882
-rw-r--r--drivers/pci/hotplug/acpiphp_pci.c449
-rw-r--r--drivers/pci/hotplug/acpiphp_res.c700
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c5
-rw-r--r--drivers/pci/msi.c88
-rw-r--r--drivers/pci/msi.h9
-rw-r--r--drivers/pci/pci-driver.c196
-rw-r--r--drivers/pci/pci-sysfs.c26
-rw-r--r--drivers/pci/pci.c6
-rw-r--r--drivers/pci/pcie/portdrv.h5
-rw-r--r--drivers/pci/pcie/portdrv_core.c8
-rw-r--r--drivers/pci/pcie/portdrv_pci.c79
-rw-r--r--drivers/pci/probe.c53
-rw-r--r--drivers/pci/proc.c14
-rw-r--r--drivers/pci/quirks.c1
-rw-r--r--drivers/pci/remove.c14
-rw-r--r--drivers/pci/setup-bus.c7
-rw-r--r--drivers/pcmcia/Kconfig43
-rw-r--r--drivers/pcmcia/Makefile3
-rw-r--r--drivers/pcmcia/cistpl.c28
-rw-r--r--drivers/pcmcia/cs.c1164
-rw-r--r--drivers/pcmcia/cs_internal.h13
-rw-r--r--drivers/pcmcia/ds.c1255
-rw-r--r--drivers/pcmcia/ds_internal.h21
-rw-r--r--drivers/pcmcia/i82365.c23
-rw-r--r--drivers/pcmcia/pcmcia_compat.c34
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c786
-rw-r--r--drivers/pcmcia/pcmcia_resource.c998
-rw-r--r--drivers/pcmcia/rsrc_mgr.c11
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c170
-rw-r--r--drivers/pcmcia/socket_sysfs.c166
-rw-r--r--drivers/pcmcia/yenta_socket.c6
-rw-r--r--drivers/sbus/char/bpp.c20
-rw-r--r--drivers/scsi/3w-9xxx.c8
-rw-r--r--drivers/scsi/3w-xxxx.c8
-rw-r--r--drivers/scsi/ahci.c22
-rw-r--r--drivers/scsi/ipr.c10
-rw-r--r--drivers/scsi/libata-core.c6
-rw-r--r--drivers/scsi/libata-scsi.c16
-rw-r--r--drivers/scsi/megaraid.c8
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c11
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c10
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c13
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c22
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c9
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/serial/68328serial.c17
-rw-r--r--drivers/serial/8250.c61
-rw-r--r--drivers/serial/8250_accent.c47
-rw-r--r--drivers/serial/8250_boca.c61
-rw-r--r--drivers/serial/8250_fourport.c53
-rw-r--r--drivers/serial/8250_hub6.c58
-rw-r--r--drivers/serial/8250_mca.c64
-rw-r--r--drivers/serial/Kconfig77
-rw-r--r--drivers/serial/Makefile5
-rw-r--r--drivers/serial/au1x00_uart.c3
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_cpm1.c32
-rw-r--r--drivers/serial/ip22zilog.c13
-rw-r--r--drivers/serial/mpsc.c3
-rw-r--r--drivers/serial/pmac_zilog.c4
-rw-r--r--drivers/serial/pxa.c3
-rw-r--r--drivers/serial/s3c2410.c5
-rw-r--r--drivers/serial/serial_core.c42
-rw-r--r--drivers/serial/serial_cs.c106
-rw-r--r--drivers/serial/serial_txx9.c3
-rw-r--r--drivers/serial/sunsab.c7
-rw-r--r--drivers/serial/sunsu.c3
-rw-r--r--drivers/serial/sunzilog.c13
-rw-r--r--drivers/telephony/ixj_pcmcia.c7
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/atm/Kconfig50
-rw-r--r--drivers/usb/atm/Makefile7
-rw-r--r--drivers/usb/atm/cxacru.c878
-rw-r--r--drivers/usb/atm/speedtch.c1085
-rw-r--r--drivers/usb/atm/usb_atm.c1188
-rw-r--r--drivers/usb/atm/usb_atm.h176
-rw-r--r--drivers/usb/atm/usbatm.c1230
-rw-r--r--drivers/usb/atm/usbatm.h184
-rw-r--r--drivers/usb/atm/xusbatm.c196
-rw-r--r--drivers/usb/class/cdc-acm.c209
-rw-r--r--drivers/usb/class/cdc-acm.h25
-rw-r--r--drivers/usb/class/usblp.c3
-rw-r--r--drivers/usb/core/devio.c6
-rw-r--r--drivers/usb/core/hcd.c279
-rw-r--r--drivers/usb/core/hcd.h19
-rw-r--r--drivers/usb/core/hub.c19
-rw-r--r--drivers/usb/core/hub.h11
-rw-r--r--drivers/usb/gadget/Kconfig11
-rw-r--r--drivers/usb/gadget/dummy_hcd.c745
-rw-r--r--drivers/usb/gadget/ether.c353
-rw-r--r--drivers/usb/gadget/file_storage.c61
-rw-r--r--drivers/usb/gadget/goku_udc.c28
-rw-r--r--drivers/usb/gadget/inode.c12
-rw-r--r--drivers/usb/gadget/ndis.h14
-rw-r--r--drivers/usb/gadget/net2280.c51
-rw-r--r--drivers/usb/gadget/omap_udc.c301
-rw-r--r--drivers/usb/gadget/omap_udc.h4
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.c43
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.h10
-rw-r--r--drivers/usb/gadget/rndis.c515
-rw-r--r--drivers/usb/gadget/rndis.h95
-rw-r--r--drivers/usb/gadget/serial.c36
-rw-r--r--drivers/usb/gadget/zero.c6
-rw-r--r--drivers/usb/host/Kconfig13
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-dbg.c59
-rw-r--r--drivers/usb/host/ehci-hcd.c58
-rw-r--r--drivers/usb/host/ehci-hub.c2
-rw-r--r--drivers/usb/host/ehci-q.c2
-rw-r--r--drivers/usb/host/ehci-sched.c17
-rw-r--r--drivers/usb/host/isp116x-hcd.c1875
-rw-r--r--drivers/usb/host/isp116x.h583
-rw-r--r--drivers/usb/host/ohci-hcd.c58
-rw-r--r--drivers/usb/host/ohci-mem.c1
-rw-r--r--drivers/usb/host/ohci-omap.c4
-rw-r--r--drivers/usb/host/ohci-pci.c13
-rw-r--r--drivers/usb/host/ohci.h2
-rw-r--r--drivers/usb/host/sl811-hcd.c18
-rw-r--r--drivers/usb/host/sl811_cs.c21
-rw-r--r--drivers/usb/host/uhci-debug.c32
-rw-r--r--drivers/usb/host/uhci-hcd.c773
-rw-r--r--drivers/usb/host/uhci-hcd.h59
-rw-r--r--drivers/usb/host/uhci-hub.c83
-rw-r--r--drivers/usb/host/uhci-q.c58
-rw-r--r--drivers/usb/input/Kconfig24
-rw-r--r--drivers/usb/input/Makefile2
-rw-r--r--drivers/usb/input/acecad.c285
-rw-r--r--drivers/usb/input/aiptek.c32
-rw-r--r--drivers/usb/input/ati_remote.c248
-rw-r--r--drivers/usb/input/hid-core.c26
-rw-r--r--drivers/usb/input/hid-debug.h16
-rw-r--r--drivers/usb/input/hid-input.c16
-rw-r--r--drivers/usb/input/hid-lgff.c18
-rw-r--r--drivers/usb/input/hid.h18
-rw-r--r--drivers/usb/input/hiddev.c56
-rw-r--r--drivers/usb/input/itmtouch.c268
-rw-r--r--drivers/usb/input/kbtab.c17
-rw-r--r--drivers/usb/input/mtouchusb.c410
-rw-r--r--drivers/usb/input/powermate.c30
-rw-r--r--drivers/usb/input/touchkitusb.c11
-rw-r--r--drivers/usb/input/usbkbd.c29
-rw-r--r--drivers/usb/input/usbmouse.c31
-rw-r--r--drivers/usb/input/wacom.c398
-rw-r--r--drivers/usb/input/xpad.c75
-rw-r--r--drivers/usb/media/stv680.c8
-rw-r--r--drivers/usb/media/stv680.h5
-rw-r--r--drivers/usb/misc/idmouse.c149
-rw-r--r--drivers/usb/misc/usbtest.c60
-rw-r--r--drivers/usb/net/pegasus.c2
-rw-r--r--drivers/usb/net/pegasus.h2
-rw-r--r--drivers/usb/net/rtl8150.c2
-rw-r--r--drivers/usb/net/usbnet.c2
-rw-r--r--drivers/usb/net/zd1201.c41
-rw-r--r--drivers/usb/net/zd1201.h1
-rw-r--r--drivers/usb/serial/cyberjack.c19
-rw-r--r--drivers/usb/serial/generic.c24
-rw-r--r--drivers/usb/serial/ipaq.c5
-rw-r--r--drivers/usb/serial/ipw.c14
-rw-r--r--drivers/usb/serial/ir-usb.c16
-rw-r--r--drivers/usb/serial/keyspan_pda.c19
-rw-r--r--drivers/usb/serial/omninet.c17
-rw-r--r--drivers/usb/serial/safe_serial.c13
-rw-r--r--drivers/usb/serial/usb-serial.c1
-rw-r--r--drivers/usb/serial/usb-serial.h3
-rw-r--r--drivers/usb/storage/scsiglue.c54
-rw-r--r--drivers/usb/storage/scsiglue.h1
-rw-r--r--drivers/usb/storage/transport.c116
-rw-r--r--drivers/usb/storage/transport.h1
-rw-r--r--drivers/video/aty/aty128fb.c14
-rw-r--r--drivers/video/chipsfb.c176
-rw-r--r--drivers/video/console/fbcon.c8
515 files changed, 26856 insertions, 21660 deletions
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 5a0adbf8bc0..97013ddfa20 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -153,7 +153,7 @@ container_device_add(struct acpi_device **device, acpi_handle handle)
return_VALUE(-ENODEV);
}
- result = acpi_bus_scan(*device);
+ result = acpi_bus_start(*device);
return_VALUE(result);
}
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
index 5d19b39e9e2..5148f3c10b5 100644
--- a/drivers/acpi/pci_bind.c
+++ b/drivers/acpi/pci_bind.c
@@ -61,15 +61,14 @@ acpi_pci_data_handler (
/**
- * acpi_os_get_pci_id
+ * acpi_get_pci_id
* ------------------
* This function is used by the ACPI Interpreter (a.k.a. Core Subsystem)
* to resolve PCI information for ACPI-PCI devices defined in the namespace.
* This typically occurs when resolving PCI operation region information.
*/
-#ifdef ACPI_FUTURE_USAGE
acpi_status
-acpi_os_get_pci_id (
+acpi_get_pci_id (
acpi_handle handle,
struct acpi_pci_id *id)
{
@@ -78,7 +77,7 @@ acpi_os_get_pci_id (
struct acpi_device *device = NULL;
struct acpi_pci_data *data = NULL;
- ACPI_FUNCTION_TRACE("acpi_os_get_pci_id");
+ ACPI_FUNCTION_TRACE("acpi_get_pci_id");
if (!id)
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -92,7 +91,7 @@ acpi_os_get_pci_id (
}
status = acpi_get_data(handle, acpi_pci_data_handler, (void**) &data);
- if (ACPI_FAILURE(status) || !data || !data->dev) {
+ if (ACPI_FAILURE(status) || !data) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Invalid ACPI-PCI context for device %s\n",
acpi_device_bid(device)));
@@ -115,7 +114,7 @@ acpi_os_get_pci_id (
return_ACPI_STATUS(AE_OK);
}
-#endif /* ACPI_FUTURE_USAGE */
+EXPORT_SYMBOL(acpi_get_pci_id);
int
@@ -129,6 +128,8 @@ acpi_pci_bind (
char *pathname = NULL;
struct acpi_buffer buffer = {0, NULL};
acpi_handle handle = NULL;
+ struct pci_dev *dev;
+ struct pci_bus *bus;
ACPI_FUNCTION_TRACE("acpi_pci_bind");
@@ -193,8 +194,20 @@ acpi_pci_bind (
* Locate matching device in PCI namespace. If it doesn't exist
* this typically means that the device isn't currently inserted
* (e.g. docking station, port replicator, etc.).
+ * We cannot simply search the global pci device list, since
+ * PCI devices are added to the global pci list when the root
+ * bridge start ops are run, which may not have happened yet.
*/
- data->dev = pci_find_slot(data->id.bus, PCI_DEVFN(data->id.device, data->id.function));
+ bus = pci_find_bus(data->id.segment, data->id.bus);
+ if (bus) {
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (dev->devfn == PCI_DEVFN(data->id.device,
+ data->id.function)) {
+ data->dev = dev;
+ break;
+ }
+ }
+ }
if (!data->dev) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Device %02x:%02x:%02x.%02x not present in PCI namespace\n",
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 8dbf802ee7f..d1f42b97282 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -433,7 +433,7 @@ acpi_pci_irq_enable (
printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: no GSI",
pci_name(dev), ('A' + pin));
/* Interrupt Line values above 0xF are forbidden */
- if (dev->irq >= 0 && (dev->irq <= 0xF)) {
+ if (dev->irq > 0 && (dev->irq <= 0xF)) {
printk(" - using IRQ %d\n", dev->irq);
acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
return_VALUE(0);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 7e6b8e3b2ed..5d2f77fcd50 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -46,6 +46,7 @@ ACPI_MODULE_NAME ("pci_root")
static int acpi_pci_root_add (struct acpi_device *device);
static int acpi_pci_root_remove (struct acpi_device *device, int type);
+static int acpi_pci_root_start (struct acpi_device *device);
static struct acpi_driver acpi_pci_root_driver = {
.name = ACPI_PCI_ROOT_DRIVER_NAME,
@@ -54,6 +55,7 @@ static struct acpi_driver acpi_pci_root_driver = {
.ops = {
.add = acpi_pci_root_add,
.remove = acpi_pci_root_remove,
+ .start = acpi_pci_root_start,
},
};
@@ -169,6 +171,7 @@ acpi_pci_root_add (
if (!root)
return_VALUE(-ENOMEM);
memset(root, 0, sizeof(struct acpi_pci_root));
+ INIT_LIST_HEAD(&root->node);
root->handle = device->handle;
strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
@@ -298,12 +301,31 @@ acpi_pci_root_add (
root->id.bus);
end:
- if (result)
+ if (result) {
+ if (!list_empty(&root->node))
+ list_del(&root->node);
kfree(root);
+ }
return_VALUE(result);
}
+static int
+acpi_pci_root_start (
+ struct acpi_device *device)
+{
+ struct acpi_pci_root *root;
+
+ ACPI_FUNCTION_TRACE("acpi_pci_root_start");
+
+ list_for_each_entry(root, &acpi_pci_roots, node) {
+ if (root->handle == device->handle) {
+ pci_bus_add_devices(root->bus);
+ return_VALUE(0);
+ }
+ }
+ return_VALUE(-ENODEV);
+}
static int
acpi_pci_root_remove (
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index f4778747e88..76156ac91bd 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -723,7 +723,7 @@ int acpi_processor_device_add(
return_VALUE(-ENODEV);
}
- acpi_bus_scan(*device);
+ acpi_bus_start(*device);
pr = acpi_driver_data(*device);
if (!pr)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index e8588559328..337d49b5564 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -553,20 +553,29 @@ acpi_bus_driver_init (
* upon possible configuration and currently allocated resources.
*/
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Driver successfully bound to device\n"));
+ return_VALUE(0);
+}
+
+int
+acpi_start_single_object (
+ struct acpi_device *device)
+{
+ int result = 0;
+ struct acpi_driver *driver;
+
+ ACPI_FUNCTION_TRACE("acpi_start_single_object");
+
+ if (!(driver = device->driver))
+ return_VALUE(0);
+
if (driver->ops.start) {
result = driver->ops.start(device);
if (result && driver->ops.remove)
driver->ops.remove(device, ACPI_BUS_REMOVAL_NORMAL);
- return_VALUE(result);
}
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Driver successfully bound to device\n"));
-
- if (driver->ops.scan) {
- driver->ops.scan(device);
- }
-
- return_VALUE(0);
+ return_VALUE(result);
}
static int acpi_driver_attach(struct acpi_driver * drv)
@@ -586,6 +595,7 @@ static int acpi_driver_attach(struct acpi_driver * drv)
if (!acpi_bus_match(dev, drv)) {
if (!acpi_bus_driver_init(dev, drv)) {
+ acpi_start_single_object(dev);
atomic_inc(&drv->references);
count++;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n",
@@ -1009,8 +1019,8 @@ acpi_bus_remove (
}
-int
-acpi_bus_add (
+static int
+acpi_add_single_object (
struct acpi_device **child,
struct acpi_device *parent,
acpi_handle handle,
@@ -1019,7 +1029,7 @@ acpi_bus_add (
int result = 0;
struct acpi_device *device = NULL;
- ACPI_FUNCTION_TRACE("acpi_bus_add");
+ ACPI_FUNCTION_TRACE("acpi_add_single_object");
if (!child)
return_VALUE(-EINVAL);
@@ -1140,7 +1150,7 @@ acpi_bus_add (
*
* TBD: Assumes LDM provides driver hot-plug capability.
*/
- acpi_bus_find_driver(device);
+ result = acpi_bus_find_driver(device);
end:
if (!result)
@@ -1153,10 +1163,10 @@ end:
return_VALUE(result);
}
-EXPORT_SYMBOL(acpi_bus_add);
-int acpi_bus_scan (struct acpi_device *start)
+static int acpi_bus_scan (struct acpi_device *start,
+ struct acpi_bus_ops *ops)
{
acpi_status status = AE_OK;
struct acpi_device *parent = NULL;
@@ -1229,9 +1239,20 @@ int acpi_bus_scan (struct acpi_device *start)
continue;
}
- status = acpi_bus_add(&child, parent, chandle, type);
- if (ACPI_FAILURE(status))
- continue;
+ if (ops->acpi_op_add)
+ status = acpi_add_single_object(&child, parent,
+ chandle, type);
+ else
+ status = acpi_bus_get_device(chandle, &child);
+
+ if (ACPI_FAILURE(status))
+ continue;
+
+ if (ops->acpi_op_start) {
+ status = acpi_start_single_object(child);
+ if (ACPI_FAILURE(status))
+ continue;
+ }
/*
* If the device is present, enabled, and functioning then
@@ -1257,8 +1278,50 @@ int acpi_bus_scan (struct acpi_device *start)
return_VALUE(0);
}
-EXPORT_SYMBOL(acpi_bus_scan);
+int
+acpi_bus_add (
+ struct acpi_device **child,
+ struct acpi_device *parent,
+ acpi_handle handle,
+ int type)
+{
+ int result;
+ struct acpi_bus_ops ops;
+
+ ACPI_FUNCTION_TRACE("acpi_bus_add");
+
+ result = acpi_add_single_object(child, parent, handle, type);
+ if (!result) {
+ memset(&ops, 0, sizeof(ops));
+ ops.acpi_op_add = 1;
+ result = acpi_bus_scan(*child, &ops);
+ }
+ return_VALUE(result);
+}
+EXPORT_SYMBOL(acpi_bus_add);
+
+int
+acpi_bus_start (
+ struct acpi_device *device)
+{
+ int result;
+ struct acpi_bus_ops ops;
+
+ ACPI_FUNCTION_TRACE("acpi_bus_start");
+
+ if (!device)
+ return_VALUE(-EINVAL);
+
+ result = acpi_start_single_object(device);
+ if (!result) {
+ memset(&ops, 0, sizeof(ops));
+ ops.acpi_op_start = 1;
+ result = acpi_bus_scan(device, &ops);
+ }
+ return_VALUE(result);
+}
+EXPORT_SYMBOL(acpi_bus_start);
static int
acpi_bus_trim(struct acpi_device *start,
@@ -1331,13 +1394,19 @@ acpi_bus_scan_fixed (
/*
* Enumerate all fixed-feature devices.
*/
- if (acpi_fadt.pwr_button == 0)
- result = acpi_bus_add(&device, acpi_root,
+ if (acpi_fadt.pwr_button == 0) {
+ result = acpi_add_single_object(&device, acpi_root,
NULL, ACPI_BUS_TYPE_POWER_BUTTON);
+ if (!result)
+ result = acpi_start_single_object(device);
+ }
- if (acpi_fadt.sleep_button == 0)
- result = acpi_bus_add(&device, acpi_root,
+ if (acpi_fadt.sleep_button == 0) {
+ result = acpi_add_single_object(&device, acpi_root,
NULL, ACPI_BUS_TYPE_SLEEP_BUTTON);
+ if (!result)
+ result = acpi_start_single_object(device);
+ }
return_VALUE(result);
}
@@ -1346,6 +1415,7 @@ acpi_bus_scan_fixed (
static int __init acpi_scan_init(void)
{
int result;
+ struct acpi_bus_ops ops;
ACPI_FUNCTION_TRACE("acpi_scan_init");
@@ -1357,17 +1427,23 @@ static int __init acpi_scan_init(void)
/*
* Create the root device in the bus's device tree
*/
- result = acpi_bus_add(&acpi_root, NULL, ACPI_ROOT_OBJECT,
+ result = acpi_add_single_object(&acpi_root, NULL, ACPI_ROOT_OBJECT,
ACPI_BUS_TYPE_SYSTEM);
if (result)
goto Done;
+ result = acpi_start_single_object(acpi_root);
+
/*
* Enumerate devices in the ACPI namespace.
*/
result = acpi_bus_scan_fixed(acpi_root);
- if (!result)
- result = acpi_bus_scan(acpi_root);
+ if (!result) {
+ memset(&ops, 0, sizeof(ops));
+ ops.acpi_op_add = 1;
+ ops.acpi_op_start = 1;
+ result = acpi_bus_scan(acpi_root, &ops);
+ }
if (result)
acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 645f6269292..783752b68a9 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -5,6 +5,7 @@ extern int bus_add_driver(struct device_driver *);
extern void bus_remove_driver(struct device_driver *);
extern void driver_detach(struct device_driver * drv);
+extern int driver_probe_device(struct device_driver *, struct device *);
static inline struct class_device *to_class_dev(struct kobject *obj)
{
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index c3fac7fd555..96fe2f95675 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -133,6 +133,58 @@ static struct kobj_type ktype_bus = {
decl_subsys(bus, &ktype_bus, NULL);
+/* Manually detach a device from it's associated driver. */
+static int driver_helper(struct device *dev, void *data)
+{
+ const char *name = data;
+
+ if (strcmp(name, dev->bus_id) == 0)
+ return 1;
+ return 0;
+}
+
+static ssize_t driver_unbind(struct device_driver *drv,
+ const char *buf, size_t count)
+{
+ struct bus_type *bus = get_bus(drv->bus);
+ struct device *dev;
+ int err = -ENODEV;
+
+ dev = bus_find_device(bus, NULL, (void *)buf, driver_helper);
+ if ((dev) &&
+ (dev->driver == drv)) {
+ device_release_driver(dev);
+ err = count;
+ }
+ return err;
+}
+static DRIVER_ATTR(unbind, S_IWUSR, NULL, driver_unbind);
+
+/*
+ * Manually attach a device to a driver.
+ * Note: the driver must want to bind to the device,
+ * it is not possible to override the driver's id table.
+ */
+static ssize_t driver_bind(struct device_driver *drv,
+ const char *buf, size_t count)
+{
+ struct bus_type *bus = get_bus(drv->bus);
+ struct device *dev;
+ int err = -ENODEV;
+
+ dev = bus_find_device(bus, NULL, (void *)buf, driver_helper);
+ if ((dev) &&
+ (dev->driver == NULL)) {
+ down(&dev->sem);
+ err = driver_probe_device(drv, dev);
+ up(&dev->sem);
+ put_device(dev);
+ }
+ return err;
+}
+static DRIVER_ATTR(bind, S_IWUSR, NULL, driver_bind);
+
+
static struct device * next_device(struct klist_iter * i)
{
struct klist_node * n = klist_next(i);
@@ -177,6 +229,39 @@ int bus_for_each_dev(struct bus_type * bus, struct device * start,
return error;
}
+/**
+ * bus_find_device - device iterator for locating a particular device.
+ * @bus: bus type
+ * @start: Device to begin with
+ * @data: Data to pass to match function
+ * @match: Callback function to check device
+ *
+ * This is similar to the bus_for_each_dev() function above, but it
+ * returns a reference to a device that is 'found' for later use, as
+ * determined by the @match callback.
+ *
+ * The callback should return 0 if the device doesn't match and non-zero
+ * if it does. If the callback returns non-zero, this function will
+ * return to the caller and not iterate over any more devices.
+ */
+struct device * bus_find_device(struct bus_type *bus,
+ struct device *start, void *data,
+ int (*match)(struct device *, void *))
+{
+ struct klist_iter i;
+ struct device *dev;
+
+ if (!bus)
+ return NULL;
+
+ klist_iter_init_node(&bus->klist_devices, &i,
+ (start ? &start->knode_bus : NULL));
+ while ((dev = next_device(&i)))
+ if (match(dev, data) && get_device(dev))
+ break;
+ klist_iter_exit(&i);
+ return dev;
+}
static struct device_driver * next_driver(struct klist_iter * i)
@@ -363,6 +448,8 @@ int bus_add_driver(struct device_driver * drv)
module_add_driver(drv->owner, drv);
driver_add_attrs(bus, drv);
+ driver_create_file(drv, &driver_attr_unbind);
+ driver_create_file(drv, &driver_attr_bind);
}
return error;
}
@@ -380,6 +467,8 @@ int bus_add_driver(struct device_driver * drv)
void bus_remove_driver(struct device_driver * drv)
{
if (drv->bus) {
+ driver_remove_file(drv, &driver_attr_bind);
+ driver_remove_file(drv, &driver_attr_unbind);
driver_remove_attrs(drv->bus, drv);
klist_remove(&drv->knode_bus);
pr_debug("bus %s: remove driver %s\n", drv->bus->name, drv->name);
@@ -394,31 +483,22 @@ void bus_remove_driver(struct device_driver * drv)
/* Helper for bus_rescan_devices's iter */
static int bus_rescan_devices_helper(struct device *dev, void *data)
{
- int *count = data;
-
- if (!dev->driver && (device_attach(dev) > 0))
- (*count)++;
-
+ if (!dev->driver)
+ device_attach(dev);
return 0;
}
-
/**
- * bus_rescan_devices - rescan devices on the bus for possible drivers
- * @bus: the bus to scan.
+ * bus_rescan_devices - rescan devices on the bus for possible drivers
+ * @bus: the bus to scan.
*
- * This function will look for devices on the bus with no driver
- * attached and rescan it against existing drivers to see if it
- * matches any. Calls device_attach(). Returns the number of devices
- * that were sucessfully bound to a driver.
+ * This function will look for devices on the bus with no driver
+ * attached and rescan it against existing drivers to see if it matches
+ * any by calling device_attach() for the unbound devices.
*/
-int bus_rescan_devices(struct bus_type * bus)
+void bus_rescan_devices(struct bus_type * bus)
{
- int count = 0;
-
- bus_for_each_dev(bus, NULL, &count, bus_rescan_devices_helper);
-
- return count;
+ bus_for_each_dev(bus, NULL, NULL, bus_rescan_devices_helper);
}
@@ -557,6 +637,7 @@ int __init buses_init(void)
EXPORT_SYMBOL_GPL(bus_for_each_dev);
+EXPORT_SYMBOL_GPL(bus_find_device);
EXPORT_SYMBOL_GPL(bus_for_each_drv);
EXPORT_SYMBOL_GPL(bus_add_device);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 86d79755fbf..efe03a024a5 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -333,7 +333,7 @@ void device_del(struct device * dev)
struct device * parent = dev->parent;
if (parent)
- klist_remove(&dev->knode_parent);
+ klist_del(&dev->knode_parent);
/* Notify the platform of the removal, in case they
* need to do anything...
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 6db3a789c54..16323f9cbff 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -65,7 +65,7 @@ void device_bind_driver(struct device * dev)
*
* This function must be called with @dev->sem held.
*/
-static int driver_probe_device(struct device_driver * drv, struct device * dev)
+int driver_probe_device(struct device_driver * drv, struct device * dev)
{
int ret = 0;
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 1b645886e9e..291c5954a3a 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -56,6 +56,41 @@ EXPORT_SYMBOL_GPL(driver_for_each_device);
/**
+ * driver_find_device - device iterator for locating a particular device.
+ * @driver: The device's driver
+ * @start: Device to begin with
+ * @data: Data to pass to match function
+ * @match: Callback function to check device
+ *
+ * This is similar to the driver_for_each_device() function above, but
+ * it returns a reference to a device that is 'found' for later use, as
+ * determined by the @match callback.
+ *
+ * The callback should return 0 if the device doesn't match and non-zero
+ * if it does. If the callback returns non-zero, this function will
+ * return to the caller and not iterate over any more devices.
+ */
+struct device * driver_find_device(struct device_driver *drv,
+ struct device * start, void * data,
+ int (*match)(struct device *, void *))
+{
+ struct klist_iter i;
+ struct device *dev;
+
+ if (!drv)
+ return NULL;
+
+ klist_iter_init_node(&drv->klist_devices, &i,
+ (start ? &start->knode_driver : NULL));
+ while ((dev = next_device(&i)))
+ if (match(dev, data) && get_device(dev))
+ break;
+ klist_iter_exit(&i);
+ return dev;
+}
+EXPORT_SYMBOL_GPL(driver_find_device);
+
+/**
* driver_create_file - create sysfs file for driver.
* @drv: driver.
* @attr: driver attribute descriptor.
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 97fe13f7f07..652281402c9 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -74,6 +74,8 @@ static ssize_t
firmware_timeout_store(struct class *class, const char *buf, size_t count)
{
loading_timeout = simple_strtol(buf, NULL, 10);
+ if (loading_timeout < 0)
+ loading_timeout = 0;
return count;
}
@@ -138,6 +140,10 @@ firmware_loading_store(struct class_device *class_dev,
switch (loading) {
case 1:
down(&fw_lock);
+ if (!fw_priv->fw) {
+ up(&fw_lock);
+ break;
+ }
vfree(fw_priv->fw->data);
fw_priv->fw->data = NULL;
fw_priv->fw->size = 0;
@@ -178,7 +184,7 @@ firmware_data_read(struct kobject *kobj,
down(&fw_lock);
fw = fw_priv->fw;
- if (test_bit(FW_STATUS_DONE, &fw_priv->status)) {
+ if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) {
ret_count = -ENODEV;
goto out;
}
@@ -238,9 +244,10 @@ firmware_data_write(struct kobject *kobj,
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
+
down(&fw_lock);
fw = fw_priv->fw;
- if (test_bit(FW_STATUS_DONE, &fw_priv->status)) {
+ if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) {
retval = -ENODEV;
goto out;
}
@@ -418,7 +425,7 @@ request_firmware(const struct firmware **firmware_p, const char *name,
fw_priv = class_get_devdata(class_dev);
- if (loading_timeout) {
+ if (loading_timeout > 0) {
fw_priv->timeout.expires = jiffies + loading_timeout * HZ;
add_timer(&fw_priv->timeout);
}
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c
index 3410b4d294b..91aeb678135 100644
--- a/drivers/block/as-iosched.c
+++ b/drivers/block/as-iosched.c
@@ -1806,7 +1806,8 @@ static void as_put_request(request_queue_t *q, struct request *rq)
rq->elevator_private = NULL;
}
-static int as_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
+static int as_set_request(request_queue_t *q, struct request *rq,
+ struct bio *bio, int gfp_mask)
{
struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask);
@@ -1827,7 +1828,7 @@ static int as_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
return 1;
}
-static int as_may_queue(request_queue_t *q, int rw)
+static int as_may_queue(request_queue_t *q, int rw, struct bio *bio)
{
int ret = ELV_MQUEUE_MAY;
struct as_data *ad = q->elevator->elevator_data;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index abde27027c0..3e9fb6e4a52 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1,6 +1,6 @@
/*
* Disk Array driver for HP SA 5xxx and 6xxx Controllers
- * Copyright 2000, 2002 Hewlett-Packard Development Company, L.P.
+ * Copyright 2000, 2005 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -54,7 +54,7 @@
MODULE_AUTHOR("Hewlett-Packard Company");
MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.6");
MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
- " SA6i P600 P800 E400");
+ " SA6i P600 P800 E400 E300");
MODULE_LICENSE("GPL");
#include "cciss_cmd.h"
@@ -85,8 +85,10 @@ static const struct pci_device_id cciss_pci_device_id[] = {
0x103C, 0x3225, 0, 0, 0},
{ PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSB,
0x103c, 0x3223, 0, 0, 0},
- { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSB,
+ { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
0x103c, 0x3231, 0, 0, 0},
+ { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
+ 0x103c, 0x3233, 0, 0, 0},
{0,}
};
MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
@@ -110,6 +112,7 @@ static struct board_type products[] = {
{ 0x3225103C, "Smart Array P600", &SA5_access},
{ 0x3223103C, "Smart Array P800", &SA5_access},
{ 0x3231103C, "Smart Array E400", &SA5_access},
+ { 0x3233103C, "Smart Array E300", &SA5_access},
};
/* How long to wait (in millesconds) for board to go into simple mode */
@@ -635,6 +638,7 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
cciss_pci_info_struct pciinfo;
if (!arg) return -EINVAL;
+ pciinfo.domain = pci_domain_nr(host->pdev->bus);
pciinfo.bus = host->pdev->bus->number;
pciinfo.dev_fn = host->pdev->devfn;
pciinfo.board_id = host->board_id;
@@ -782,18 +786,10 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
case CCISS_GETLUNINFO: {
LogvolInfo_struct luninfo;
- int i;
luninfo.LunID = drv->LunID;
luninfo.num_opens = drv->usage_count;
luninfo.num_parts = 0;
- /* count partitions 1 to 15 with sizes > 0 */
- for (i = 0; i < MAX_PART - 1; i++) {
- if (!disk->part[i])
- continue;
- if (disk->part[i]->nr_sects != 0)
- luninfo.num_parts++;
- }
if (copy_to_user(argp, &luninfo,
sizeof(LogvolInfo_struct)))
return -EFAULT;
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c
index 3ac47dde64d..de5746e38af 100644
--- a/drivers/block/cfq-iosched.c
+++ b/drivers/block/cfq-iosched.c
@@ -21,22 +21,34 @@
#include <linux/hash.h>
#include <linux/rbtree.h>
#include <linux/mempool.h>
-
-static unsigned long max_elapsed_crq;
-static unsigned long max_elapsed_dispatch;
+#include <linux/ioprio.h>
+#include <linux/writeback.h>
/*
* tunables
*/
static int cfq_quantum = 4; /* max queue in one round of service */
static int cfq_queued = 8; /* minimum rq allocate limit per-queue*/
-static int cfq_service = HZ; /* period over which service is avg */
-static int cfq_fifo_expire_r = HZ / 2; /* fifo timeout for sync requests */
-static int cfq_fifo_expire_w = 5 * HZ; /* fifo timeout for async requests */
-static int cfq_fifo_rate = HZ / 8; /* fifo expiry rate */
+static int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
static int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
static int cfq_back_penalty = 2; /* penalty of a backwards seek */
+static int cfq_slice_sync = HZ / 10;
+static int cfq_slice_async = HZ / 25;
+static int cfq_slice_async_rq = 2;
+static int cfq_slice_idle = HZ / 100;
+
+#define CFQ_IDLE_GRACE (HZ / 10)
+#define CFQ_SLICE_SCALE (5)
+
+#define CFQ_KEY_ASYNC (0)
+#define CFQ_KEY_ANY (0xffff)
+
+/*
+ * disable queueing at the driver/hardware level
+ */
+static int cfq_max_depth = 1;
+
/*
* for the hash of cfqq inside the cfqd
*/
@@ -55,6 +67,7 @@ static int cfq_back_penalty = 2; /* penalty of a backwards seek */
#define list_entry_hash(ptr) hlist_entry((ptr), struct cfq_rq, hash)
#define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list)
+#define list_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
#define RQ_DATA(rq) (rq)->elevator_private
@@ -75,78 +88,110 @@ static int cfq_back_penalty = 2; /* penalty of a backwards seek */
#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
#define rq_rb_key(rq) (rq)->sector
-/*
- * threshold for switching off non-tag accounting
- */
-#define CFQ_MAX_TAG (4)
-
-/*
- * sort key types and names
- */
-enum {
- CFQ_KEY_PGID,
- CFQ_KEY_TGID,
- CFQ_KEY_UID,
- CFQ_KEY_GID,
- CFQ_KEY_LAST,
-};
-
-static char *cfq_key_types[] = { "pgid", "tgid", "uid", "gid", NULL };
-
static kmem_cache_t *crq_pool;
static kmem_cache_t *cfq_pool;
static kmem_cache_t *cfq_ioc_pool;
+#define CFQ_PRIO_LISTS IOPRIO_BE_NR
+#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
+#define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE)
+#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
+
+#define ASYNC (0)
+#define SYNC (1)
+
+#define cfq_cfqq_dispatched(cfqq) \
+ ((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC])
+
+#define cfq_cfqq_class_sync(cfqq) ((cfqq)->key != CFQ_KEY_ASYNC)
+
+#define cfq_cfqq_sync(cfqq) \
+ (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC])
+
+/*
+ * Per block device queue structure
+ */
struct cfq_data {
- struct list_head rr_list;
+ atomic_t ref;
+ request_queue_t *queue;
+
+ /*
+ * rr list of queues with requests and the count of them
+ */
+ struct list_head rr_list[CFQ_PRIO_LISTS];
+ struct list_head busy_rr;
+ struct list_head cur_rr;
+ struct list_head idle_rr;
+ unsigned int busy_queues;
+
+ /*
+ * non-ordered list of empty cfqq's
+ */
struct list_head empty_list;
+ /*
+ * cfqq lookup hash
+ */
struct hlist_head *cfq_hash;
- struct hlist_head *crq_hash;
- /* queues on rr_list (ie they have pending requests */
- unsigned int busy_queues;
+ /*
+ * global crq hash for all queues
+ */
+ struct hlist_head *crq_hash;
unsigned int max_queued;
- atomic_t ref;
+ mempool_t *crq_pool;
- int key_type;
+ int rq_in_driver;
- mempool_t *crq_pool;
+ /*
+ * schedule slice state info
+ */
+ /*
+ * idle window management
+ */
+ struct timer_list idle_slice_timer;
+ struct work_struct unplug_work;
- request_queue_t *queue;
+ struct cfq_queue *active_queue;
+ struct cfq_io_context *active_cic;
+ int cur_prio, cur_end_prio;
+ unsigned int dispatch_slice;
+
+ struct timer_list idle_class_timer;
sector_t last_sector;
+ unsigned long last_end_request;
- int rq_in_driver;
+ unsigned int rq_starved;
/*
* tunables, see top of file
*/
unsigned int cfq_quantum;
unsigned int cfq_queued;
- unsigned int cfq_fifo_expire_r;
- unsigned int cfq_fifo_expire_w;
- unsigned int cfq_fifo_batch_expire;
+ unsigned int cfq_fifo_expire[2];
unsigned int cfq_back_penalty;
unsigned int cfq_back_max;
- unsigned int find_best_crq;
-
- unsigned int cfq_tagged;
+ unsigned int cfq_slice[2];
+ unsigned int cfq_slice_async_rq;
+ unsigned int cfq_slice_idle;
+ unsigned int cfq_max_depth;
};
+/*
+ * Per process-grouping structure
+ */
struct cfq_queue {
/* reference count */
atomic_t ref;
/* parent cfq_data */
struct cfq_data *cfqd;
- /* hash of mergeable requests */
+ /* cfqq lookup hash */
struct hlist_node cfq_hash;
/* hash key */
- unsigned long key;
- /* whether queue is on rr (or empty) list */
- int on_rr;
+ unsigned int key;
/* on either rr or empty list of cfqd */
struct list_head cfq_list;
/* sorted list of pending requests */
@@ -158,21 +203,22 @@ struct cfq_queue {
/* currently allocated requests */
int allocated[2];
/* fifo list of requests in sort_list */
- struct list_head fifo[2];
- /* last time fifo expired */
- unsigned long last_fifo_expire;
+ struct list_head fifo;
- int key_type;
+ unsigned long slice_start;
+ unsigned long slice_end;
+ unsigned long slice_left;
+ unsigned long service_last;
- unsigned long service_start;
- unsigned long service_used;
+ /* number of requests that are on the dispatch list */
+ int on_dispatch[2];
- unsigned int max_rate;
+ /* io prio of this group */
+ unsigned short ioprio, org_ioprio;
+ unsigned short ioprio_class, org_ioprio_class;
- /* number of requests that have been handed to the driver */
- int in_flight;
- /* number of currently allocated requests */
- int alloc_limit[2];
+ /* various state flags, see below */
+ unsigned int flags;
};
struct cfq_rq {
@@ -184,42 +230,78 @@ struct cfq_rq {
struct cfq_queue *cfq_queue;
struct cfq_io_context *io_context;
- unsigned long service_start;
- unsigned long queue_start;
+ unsigned int crq_flags;
+};
+
+enum cfqq_state_flags {
+ CFQ_CFQQ_FLAG_on_rr = 0,
+ CFQ_CFQQ_FLAG_wait_request,
+ CFQ_CFQQ_FLAG_must_alloc,
+ CFQ_CFQQ_FLAG_must_alloc_slice,
+ CFQ_CFQQ_FLAG_must_dispatch,
+ CFQ_CFQQ_FLAG_fifo_expire,
+ CFQ_CFQQ_FLAG_idle_window,
+ CFQ_CFQQ_FLAG_prio_changed,
+ CFQ_CFQQ_FLAG_expired,
+};
- unsigned int in_flight : 1;
- unsigned int accounted : 1;
- unsigned int is_sync : 1;
- unsigned int is_write : 1;
+#define CFQ_CFQQ_FNS(name) \
+static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
+{ \
+ cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
+} \
+static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
+{ \
+ cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
+} \
+static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
+{ \
+ return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
+}
+
+CFQ_CFQQ_FNS(on_rr);
+CFQ_CFQQ_FNS(wait_request);
+CFQ_CFQQ_FNS(must_alloc);
+CFQ_CFQQ_FNS(must_alloc_slice);
+CFQ_CFQQ_FNS(must_dispatch);
+CFQ_CFQQ_FNS(fifo_expire);
+CFQ_CFQQ_FNS(idle_window);
+CFQ_CFQQ_FNS(prio_changed);
+CFQ_CFQQ_FNS(expired);
+#undef CFQ_CFQQ_FNS
+
+enum cfq_rq_state_flags {
+ CFQ_CRQ_FLAG_in_flight = 0,
+ CFQ_CRQ_FLAG_in_driver,
+ CFQ_CRQ_FLAG_is_sync,
+ CFQ_CRQ_FLAG_requeued,
};
-static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned long);
+#define CFQ_CRQ_FNS(name) \
+static inline void cfq_mark_crq_##name(struct cfq_rq *crq) \
+{ \
+ crq->crq_flags |= (1 << CFQ_CRQ_FLAG_##name); \
+} \
+static inline void cfq_clear_crq_##name(struct cfq_rq *crq) \
+{ \
+ crq->crq_flags &= ~(1 << CFQ_CRQ_FLAG_##name); \
+} \
+static inline int cfq_crq_##name(const struct cfq_rq *crq) \
+{ \
+ return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0; \
+}
+
+CFQ_CRQ_FNS(in_flight);
+CFQ_CRQ_FNS(in_driver);
+CFQ_CRQ_FNS(is_sync);
+CFQ_CRQ_FNS(requeued);
+#undef CFQ_CRQ_FNS
+
+static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *);
-static void cfq_update_next_crq(struct cfq_rq *);
static void cfq_put_cfqd(struct cfq_data *cfqd);
-/*
- * what the fairness is based on (ie how processes are grouped and
- * differentiated)
- */
-static inline unsigned long
-cfq_hash_key(struct cfq_data *cfqd, struct task_struct *tsk)
-{
- /*
- * optimize this so that ->key_type is the offset into the struct
- */
- switch (cfqd->key_type) {
- case CFQ_KEY_PGID:
- return process_group(tsk);
- default:
- case CFQ_KEY_TGID:
- return tsk->tgid;
- case CFQ_KEY_UID:
- return tsk->uid;
- case CFQ_KEY_GID:
- return tsk->gid;
- }
-}
+#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE)
/*
* lots of deadline iosched dupes, can be abstracted later...
@@ -235,16 +317,12 @@ static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
if (q->last_merge == crq->request)
q->last_merge = NULL;
-
- cfq_update_next_crq(crq);
}
static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
{
const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request));
- BUG_ON(!hlist_unhashed(&crq->hash));
-
hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]);
}
@@ -257,8 +335,6 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
struct cfq_rq *crq = list_entry_hash(entry);
struct request *__rq = crq->request;
- BUG_ON(hlist_unhashed(&crq->hash));
-
if (!rq_mergeable(__rq)) {
cfq_del_crq_hash(crq);
continue;
@@ -271,6 +347,28 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
return NULL;
}
+static inline int cfq_pending_requests(struct cfq_data *cfqd)
+{
+ return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues;
+}
+
+/*
+ * scheduler run of queue, if there are requests pending and no one in the
+ * driver that will restart queueing
+ */
+static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
+{
+ if (!cfqd->rq_in_driver && cfq_pending_requests(cfqd))
+ kblockd_schedule_work(&cfqd->unplug_work);
+}
+
+static int cfq_queue_empty(request_queue_t *q)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+
+ return !cfq_pending_requests(cfqd);
+}
+
/*
* Lifted from AS - choose which of crq1 and crq2 that is best served now.
* We choose the request that is closest to the head right now. Distance
@@ -287,36 +385,16 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
return crq2;
if (crq2 == NULL)
return crq1;
+ if (cfq_crq_requeued(crq1))
+ return crq1;
+ if (cfq_crq_requeued(crq2))
+ return crq2;
s1 = crq1->request->sector;
s2 = crq2->request->sector;
last = cfqd->last_sector;
-#if 0
- if (!list_empty(&cfqd->queue->queue_head)) {
- struct list_head *entry = &cfqd->queue->queue_head;
- unsigned long distance = ~0UL;
- struct request *rq;
-
- while ((entry = entry->prev) != &cfqd->queue->queue_head) {
- rq = list_entry_rq(entry);
-
- if (blk_barrier_rq(rq))
- break;
-
- if (distance < abs(s1 - rq->sector + rq->nr_sectors)) {
- distance = abs(s1 - rq->sector +rq->nr_sectors);
- last = rq->sector + rq->nr_sectors;
- }
- if (distance < abs(s2 - rq->sector + rq->nr_sectors)) {
- distance = abs(s2 - rq->sector +rq->nr_sectors);
- last = rq->sector + rq->nr_sectors;
- }
- }
- }
-#endif
-
/*
* by definition, 1KiB is 2 sectors
*/
@@ -377,11 +455,14 @@ cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct cfq_rq *crq_next = NULL, *crq_prev = NULL;
struct rb_node *rbnext, *rbprev;
- if (!ON_RB(&last->rb_node))
- return NULL;
-
- if ((rbnext = rb_next(&last->rb_node)) == NULL)
+ rbnext = NULL;
+ if (ON_RB(&last->rb_node))
+ rbnext = rb_next(&last->rb_node);
+ if (!rbnext) {
rbnext = rb_first(&cfqq->sort_list);
+ if (rbnext == &last->rb_node)
+ rbnext = NULL;
+ }
rbprev = rb_prev(&last->rb_node);
@@ -401,67 +482,53 @@ static void cfq_update_next_crq(struct cfq_rq *crq)
cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq);
}
-static int cfq_check_sort_rr_list(struct cfq_queue *cfqq)
+static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
{
- struct list_head *head = &cfqq->cfqd->rr_list;
- struct list_head *next, *prev;
-
- /*
- * list might still be ordered
- */
- next = cfqq->cfq_list.next;
- if (next != head) {
- struct cfq_queue *cnext = list_entry_cfqq(next);
+ struct cfq_data *cfqd = cfqq->cfqd;
+ struct list_head *list, *entry;
- if (cfqq->service_used > cnext->service_used)
- return 1;
- }
+ BUG_ON(!cfq_cfqq_on_rr(cfqq));
- prev = cfqq->cfq_list.prev;
- if (prev != head) {
- struct cfq_queue *cprev = list_entry_cfqq(prev);
+ list_del(&cfqq->cfq_list);
- if (cfqq->service_used < cprev->service_used)
- return 1;
+ if (cfq_class_rt(cfqq))
+ list = &cfqd->cur_rr;
+ else if (cfq_class_idle(cfqq))
+ list = &cfqd->idle_rr;
+ else {
+ /*
+ * if cfqq has requests in flight, don't allow it to be
+ * found in cfq_set_active_queue before it has finished them.
+ * this is done to increase fairness between a process that
+ * has lots of io pending vs one that only generates one
+ * sporadically or synchronously
+ */
+ if (cfq_cfqq_dispatched(cfqq))
+ list = &cfqd->busy_rr;
+ else
+ list = &cfqd->rr_list[cfqq->ioprio];
}
- return 0;
-}
-
-static void cfq_sort_rr_list(struct cfq_queue *cfqq, int new_queue)
-{
- struct list_head *entry = &cfqq->cfqd->rr_list;
-
- if (!cfqq->on_rr)
- return;
- if (!new_queue && !cfq_check_sort_rr_list(cfqq))
+ /*
+ * if queue was preempted, just add to front to be fair. busy_rr
+ * isn't sorted.
+ */
+ if (preempted || list == &cfqd->busy_rr) {
+ list_add(&cfqq->cfq_list, list);
return;
-
- list_del(&cfqq->cfq_list);
+ }
/*
- * sort by our mean service_used, sub-sort by in-flight requests
+ * sort by when queue was last serviced
*/
- while ((entry = entry->prev) != &cfqq->cfqd->rr_list) {
+ entry = list;
+ while ((entry = entry->prev) != list) {
struct cfq_queue *__cfqq = list_entry_cfqq(entry);
- if (cfqq->service_used > __cfqq->service_used)
+ if (!__cfqq->service_last)
+ break;
+ if (time_before(__cfqq->service_last, cfqq->service_last))
break;
- else if (cfqq->service_used == __cfqq->service_used) {
- struct list_head *prv;
-
- while ((prv = entry->prev) != &cfqq->cfqd->rr_list) {
- __cfqq = list_entry_cfqq(prv);
-
- WARN_ON(__cfqq->service_used > cfqq->service_used);
- if (cfqq->service_used != __cfqq->service_used)
- break;
- if (cfqq->in_flight > __cfqq->in_flight)
- break;
-
- entry = prv;
- }
- }
}
list_add(&cfqq->cfq_list, entry);
@@ -469,28 +536,24 @@ static void cfq_sort_rr_list(struct cfq_queue *cfqq, int new_queue)
/*
* add to busy list of queues for service, trying to be fair in ordering
- * the pending list according to requests serviced
+ * the pending list according to last request service
*/
static inline void
-cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq, int requeue)
{
- /*
- * it's currently on the empty list
- */
- cfqq->on_rr = 1;
+ BUG_ON(cfq_cfqq_on_rr(cfqq));
+ cfq_mark_cfqq_on_rr(cfqq);
cfqd->busy_queues++;
- if (time_after(jiffies, cfqq->service_start + cfq_service))
- cfqq->service_used >>= 3;
-
- cfq_sort_rr_list(cfqq, 1);
+ cfq_resort_rr_list(cfqq, requeue);
}
static inline void
cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
+ BUG_ON(!cfq_cfqq_on_rr(cfqq));
+ cfq_clear_cfqq_on_rr(cfqq);
list_move(&cfqq->cfq_list, &cfqd->empty_list);
- cfqq->on_rr = 0;
BUG_ON(!cfqd->busy_queues);
cfqd->busy_queues--;
@@ -505,16 +568,17 @@ static inline void cfq_del_crq_rb(struct cfq_rq *crq)
if (ON_RB(&crq->rb_node)) {
struct cfq_data *cfqd = cfqq->cfqd;
+ const int sync = cfq_crq_is_sync(crq);
- BUG_ON(!cfqq->queued[crq->is_sync]);
+ BUG_ON(!cfqq->queued[sync]);
+ cfqq->queued[sync]--;
cfq_update_next_crq(crq);
- cfqq->queued[crq->is_sync]--;
rb_erase(&crq->rb_node, &cfqq->sort_list);
RB_CLEAR_COLOR(&crq->rb_node);
- if (RB_EMPTY(&cfqq->sort_list) && cfqq->on_rr)
+ if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))
cfq_del_cfqq_rr(cfqd, cfqq);
}
}
@@ -550,7 +614,7 @@ static void cfq_add_crq_rb(struct cfq_rq *crq)
struct cfq_rq *__alias;
crq->rb_key = rq_rb_key(rq);
- cfqq->queued[crq->is_sync]++;
+ cfqq->queued[cfq_crq_is_sync(crq)]++;
/*
* looks a little odd, but the first insert might return an alias.
@@ -561,8 +625,8 @@ static void cfq_add_crq_rb(struct cfq_rq *crq)
rb_insert_color(&crq->rb_node, &cfqq->sort_list);
- if (!cfqq->on_rr)
- cfq_add_cfqq_rr(cfqd, cfqq);
+ if (!cfq_cfqq_on_rr(cfqq))
+ cfq_add_cfqq_rr(cfqd, cfqq, cfq_crq_requeued(crq));
/*
* check if this request is a better next-serve candidate
@@ -575,17 +639,16 @@ cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
{
if (ON_RB(&crq->rb_node)) {
rb_erase(&crq->rb_node, &cfqq->sort_list);
- cfqq->queued[crq->is_sync]--;
+ cfqq->queued[cfq_crq_is_sync(crq)]--;
}
cfq_add_crq_rb(crq);
}
-static struct request *
-cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector)
+static struct request *cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector)
+
{
- const unsigned long key = cfq_hash_key(cfqd, current);
- struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, key);
+ struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->pid, CFQ_KEY_ANY);
struct rb_node *n;
if (!cfqq)
@@ -609,20 +672,25 @@ out:
static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_rq *crq = RQ_DATA(rq);
if (crq) {
struct cfq_queue *cfqq = crq->cfq_queue;
- if (cfqq->cfqd->cfq_tagged) {
- cfqq->service_used--;
- cfq_sort_rr_list(cfqq, 0);
+ if (cfq_crq_in_driver(crq)) {
+ cfq_clear_crq_in_driver(crq);
+ WARN_ON(!cfqd->rq_in_driver);
+ cfqd->rq_in_driver--;
}
+ if (cfq_crq_in_flight(crq)) {
+ const int sync = cfq_crq_is_sync(crq);
- if (crq->accounted) {
- crq->accounted = 0;
- cfqq->cfqd->rq_in_driver--;
+ cfq_clear_crq_in_flight(crq);
+ WARN_ON(!cfqq->on_dispatch[sync]);
+ cfqq->on_dispatch[sync]--;
}
+ cfq_mark_crq_requeued(crq);
}
}
@@ -640,11 +708,10 @@ static void cfq_remove_request(request_queue_t *q, struct request *rq)
struct cfq_rq *crq = RQ_DATA(rq);
if (crq) {
- cfq_remove_merge_hints(q, crq);
list_del_init(&rq->queuelist);
+ cfq_del_crq_rb(crq);
+ cfq_remove_merge_hints(q, crq);
- if (crq->cfq_queue)
- cfq_del_crq_rb(crq);
}
}
@@ -662,21 +729,15 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
}
__rq = cfq_find_rq_hash(cfqd, bio->bi_sector);
- if (__rq) {
- BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
-
- if (elv_rq_merge_ok(__rq, bio)) {
- ret = ELEVATOR_BACK_MERGE;
- goto out;
- }
+ if (__rq && elv_rq_merge_ok(__rq, bio)) {
+ ret = ELEVATOR_BACK_MERGE;
+ goto out;
}
__rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio));
- if (__rq) {
- if (elv_rq_merge_ok(__rq, bio)) {
- ret = ELEVATOR_FRONT_MERGE;
- goto out;
- }
+ if (__rq && elv_rq_merge_ok(__rq, bio)) {
+ ret = ELEVATOR_FRONT_MERGE;
+ goto out;
}
return ELEVATOR_NO_MERGE;
@@ -709,20 +770,220 @@ static void
cfq_merged_requests(request_queue_t *q, struct request *rq,
struct request *next)
{
- struct cfq_rq *crq = RQ_DATA(rq);
- struct cfq_rq *cnext = RQ_DATA(next);
-
cfq_merged_request(q, rq);
- if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) {
- if (time_before(cnext->queue_start, crq->queue_start)) {
- list_move(&rq->queuelist, &next->queuelist);
- crq->queue_start = cnext->queue_start;
+ /*
+ * reposition in fifo if next is older than rq
+ */
+ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
+ time_before(next->start_time, rq->start_time))
+ list_move(&rq->queuelist, &next->queuelist);
+
+ cfq_remove_request(q, next);
+}
+
+static inline void
+__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ if (cfqq) {
+ /*
+ * stop potential idle class queues waiting service
+ */
+ del_timer(&cfqd->idle_class_timer);
+
+ cfqq->slice_start = jiffies;
+ cfqq->slice_end = 0;
+ cfqq->slice_left = 0;
+ cfq_clear_cfqq_must_alloc_slice(cfqq);
+ cfq_clear_cfqq_fifo_expire(cfqq);
+ cfq_clear_cfqq_expired(cfqq);
+ }
+
+ cfqd->active_queue = cfqq;
+}
+
+/*
+ * 0
+ * 0,1
+ * 0,1,2
+ * 0,1,2,3
+ * 0,1,2,3,4
+ * 0,1,2,3,4,5
+ * 0,1,2,3,4,5,6
+ * 0,1,2,3,4,5,6,7
+ */
+static int cfq_get_next_prio_level(struct cfq_data *cfqd)
+{
+ int prio, wrap;
+
+ prio = -1;
+ wrap = 0;
+ do {
+ int p;
+
+ for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) {
+ if (!list_empty(&cfqd->rr_list[p])) {
+ prio = p;
+ break;
+ }
+ }
+
+ if (prio != -1)
+ break;
+ cfqd->cur_prio = 0;
+ if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
+ cfqd->cur_end_prio = 0;
+ if (wrap)
+ break;
+ wrap = 1;
}
+ } while (1);
+
+ if (unlikely(prio == -1))
+ return -1;
+
+ BUG_ON(prio >= CFQ_PRIO_LISTS);
+
+ list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr);
+
+ cfqd->cur_prio = prio + 1;
+ if (cfqd->cur_prio > cfqd->cur_end_prio) {
+ cfqd->cur_end_prio = cfqd->cur_prio;
+ cfqd->cur_prio = 0;
+ }
+ if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
+ cfqd->cur_prio = 0;
+ cfqd->cur_end_prio = 0;
}
- cfq_update_next_crq(cnext);
- cfq_remove_request(q, next);
+ return prio;
+}
+
+static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
+{
+ struct cfq_queue *cfqq;
+
+ /*
+ * if current queue is expired but not done with its requests yet,
+ * wait for that to happen
+ */
+ if ((cfqq = cfqd->active_queue) != NULL) {
+ if (cfq_cfqq_expired(cfqq) && cfq_cfqq_dispatched(cfqq))
+ return NULL;
+ }
+
+ /*
+ * if current list is non-empty, grab first entry. if it is empty,
+ * get next prio level and grab first entry then if any are spliced
+ */
+ if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1)
+ cfqq = list_entry_cfqq(cfqd->cur_rr.next);
+
+ /*
+ * if we have idle queues and no rt or be queues had pending
+ * requests, either allow immediate service if the grace period
+ * has passed or arm the idle grace timer
+ */
+ if (!cfqq && !list_empty(&cfqd->idle_rr)) {
+ unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
+
+ if (time_after_eq(jiffies, end))
+ cfqq = list_entry_cfqq(cfqd->idle_rr.next);
+ else
+ mod_timer(&cfqd->idle_class_timer, end);
+ }
+
+ __cfq_set_active_queue(cfqd, cfqq);
+ return cfqq;
+}
+
+/*
+ * current cfqq expired its slice (or was too idle), select new one
+ */
+static void
+__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ int preempted)
+{
+ unsigned long now = jiffies;
+
+ if (cfq_cfqq_wait_request(cfqq))
+ del_timer(&cfqd->idle_slice_timer);
+
+ if (!preempted && !cfq_cfqq_dispatched(cfqq))
+ cfqq->service_last = now;
+
+ cfq_clear_cfqq_must_dispatch(cfqq);
+ cfq_clear_cfqq_wait_request(cfqq);
+
+ /*
+ * store what was left of this slice, if the queue idled out
+ * or was preempted
+ */
+ if (time_after(now, cfqq->slice_end))
+ cfqq->slice_left = now - cfqq->slice_end;
+ else
+ cfqq->slice_left = 0;
+
+ if (cfq_cfqq_on_rr(cfqq))
+ cfq_resort_rr_list(cfqq, preempted);
+
+ if (cfqq == cfqd->active_queue)
+ cfqd->active_queue = NULL;
+
+ if (cfqd->active_cic) {
+ put_io_context(cfqd->active_cic->ioc);
+ cfqd->active_cic = NULL;
+ }
+
+ cfqd->dispatch_slice = 0;
+}
+
+static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted)
+{
+ struct cfq_queue *cfqq = cfqd->active_queue;
+
+ if (cfqq) {
+ /*
+ * use deferred expiry, if there are requests in progress as
+ * not to disturb the slice of the next queue
+ */
+ if (cfq_cfqq_dispatched(cfqq))
+ cfq_mark_cfqq_expired(cfqq);
+ else
+ __cfq_slice_expired(cfqd, cfqq, preempted);
+ }
+}
+
+static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+
+{
+ WARN_ON(!RB_EMPTY(&cfqq->sort_list));
+ WARN_ON(cfqq != cfqd->active_queue);
+
+ /*
+ * idle is disabled, either manually or by past process history
+ */
+ if (!cfqd->cfq_slice_idle)
+ return 0;
+ if (!cfq_cfqq_idle_window(cfqq))
+ return 0;
+ /*
+ * task has exited, don't wait
+ */
+ if (cfqd->active_cic && !cfqd->active_cic->ioc->task)
+ return 0;
+
+ cfq_mark_cfqq_must_dispatch(cfqq);
+ cfq_mark_cfqq_wait_request(cfqq);
+
+ if (!timer_pending(&cfqd->idle_slice_timer)) {
+ unsigned long slice_left = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
+
+ cfqd->idle_slice_timer.expires = jiffies + slice_left;
+ add_timer(&cfqd->idle_slice_timer);
+ }
+
+ return 1;
}
/*
@@ -738,31 +999,40 @@ static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq)
struct request *__rq;
sector_t last;
- cfq_del_crq_rb(crq);
- cfq_remove_merge_hints(q, crq);
list_del(&crq->request->queuelist);
last = cfqd->last_sector;
- while ((entry = entry->prev) != head) {
- __rq = list_entry_rq(entry);
+ list_for_each_entry_reverse(__rq, head, queuelist) {
+ struct cfq_rq *__crq = RQ_DATA(__rq);
- if (blk_barrier_rq(crq->request))
+ if (blk_barrier_rq(__rq))
break;
- if (!blk_fs_request(crq->request))
+ if (!blk_fs_request(__rq))
+ break;
+ if (cfq_crq_requeued(__crq))
break;
- if (crq->request->sector > __rq->sector)
+ if (__rq->sector <= crq->request->sector)
break;
if (__rq->sector > last && crq->request->sector < last) {
- last = crq->request->sector;
+ last = crq->request->sector + crq->request->nr_sectors;
break;
}
+ entry = &__rq->queuelist;
}
cfqd->last_sector = last;
- crq->in_flight = 1;
- cfqq->in_flight++;
- list_add(&crq->request->queuelist, entry);
+
+ cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
+
+ cfq_del_crq_rb(crq);
+ cfq_remove_merge_hints(q, crq);
+
+ cfq_mark_crq_in_flight(crq);
+ cfq_clear_crq_requeued(crq);
+
+ cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
+ list_add_tail(&crq->request->queuelist, entry);
}
/*
@@ -771,173 +1041,225 @@ static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq)
static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq)
{
struct cfq_data *cfqd = cfqq->cfqd;
- const int reads = !list_empty(&cfqq->fifo[0]);
- const int writes = !list_empty(&cfqq->fifo[1]);
- unsigned long now = jiffies;
+ struct request *rq;
struct cfq_rq *crq;
- if (time_before(now, cfqq->last_fifo_expire + cfqd->cfq_fifo_batch_expire))
+ if (cfq_cfqq_fifo_expire(cfqq))
return NULL;
- crq = RQ_DATA(list_entry(cfqq->fifo[0].next, struct request, queuelist));
- if (reads && time_after(now, crq->queue_start + cfqd->cfq_fifo_expire_r)) {
- cfqq->last_fifo_expire = now;
- return crq;
- }
+ if (!list_empty(&cfqq->fifo)) {
+ int fifo = cfq_cfqq_class_sync(cfqq);
- crq = RQ_DATA(list_entry(cfqq->fifo[1].next, struct request, queuelist));
- if (writes && time_after(now, crq->queue_start + cfqd->cfq_fifo_expire_w)) {
- cfqq->last_fifo_expire = now;
- return crq;
+ crq = RQ_DATA(list_entry_fifo(cfqq->fifo.next));
+ rq = crq->request;
+ if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
+ cfq_mark_cfqq_fifo_expire(cfqq);
+ return crq;
+ }
}
return NULL;
}
/*
- * dispatch a single request from given queue
+ * Scale schedule slice based on io priority. Use the sync time slice only
+ * if a queue is marked sync and has sync io queued. A sync queue with async
+ * io only, should not get full sync slice length.
*/
+static inline int
+cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)];
+
+ WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
+
+ return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio));
+}
+
static inline void
-cfq_dispatch_request(request_queue_t *q, struct cfq_data *cfqd,
- struct cfq_queue *cfqq)
+cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
- struct cfq_rq *crq;
+ cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
+}
+
+static inline int
+cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ const int base_rq = cfqd->cfq_slice_async_rq;
+
+ WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
+
+ return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
+}
+
+/*
+ * get next queue for service
+ */
+static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd, int force)
+{
+ unsigned long now = jiffies;
+ struct cfq_queue *cfqq;
+
+ cfqq = cfqd->active_queue;
+ if (!cfqq)
+ goto new_queue;
+
+ if (cfq_cfqq_expired(cfqq))
+ goto new_queue;
/*
- * follow expired path, else get first next available
+ * slice has expired
*/
- if ((crq = cfq_check_fifo(cfqq)) == NULL) {
- if (cfqd->find_best_crq)
- crq = cfqq->next_crq;
- else
- crq = rb_entry_crq(rb_first(&cfqq->sort_list));
- }
-
- cfqd->last_sector = crq->request->sector + crq->request->nr_sectors;
+ if (!cfq_cfqq_must_dispatch(cfqq) && time_after(now, cfqq->slice_end))
+ goto expire;
/*
- * finally, insert request into driver list
+ * if queue has requests, dispatch one. if not, check if
+ * enough slice is left to wait for one
*/
- cfq_dispatch_sort(q, crq);
+ if (!RB_EMPTY(&cfqq->sort_list))
+ goto keep_queue;
+ else if (!force && cfq_cfqq_class_sync(cfqq) &&
+ time_before(now, cfqq->slice_end)) {
+ if (cfq_arm_slice_timer(cfqd, cfqq))
+ return NULL;
+ }
+
+expire:
+ cfq_slice_expired(cfqd, 0);
+new_queue:
+ cfqq = cfq_set_active_queue(cfqd);
+keep_queue:
+ return cfqq;
}
-static int cfq_dispatch_requests(request_queue_t *q, int max_dispatch)
+static int
+__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ int max_dispatch)
{
- struct cfq_data *cfqd = q->elevator->elevator_data;
- struct cfq_queue *cfqq;
- struct list_head *entry, *tmp;
- int queued, busy_queues, first_round;
+ int dispatched = 0;
- if (list_empty(&cfqd->rr_list))
- return 0;
+ BUG_ON(RB_EMPTY(&cfqq->sort_list));
- queued = 0;
- first_round = 1;
-restart:
- busy_queues = 0;
- list_for_each_safe(entry, tmp, &cfqd->rr_list) {
- cfqq = list_entry_cfqq(entry);
+ do {
+ struct cfq_rq *crq;
- BUG_ON(RB_EMPTY(&cfqq->sort_list));
+ /*
+ * follow expired path, else get first next available
+ */
+ if ((crq = cfq_check_fifo(cfqq)) == NULL)
+ crq = cfqq->next_crq;
/*
- * first round of queueing, only select from queues that
- * don't already have io in-flight
+ * finally, insert request into driver dispatch list
*/
- if (first_round && cfqq->in_flight)
- continue;
+ cfq_dispatch_sort(cfqd->queue, crq);
- cfq_dispatch_request(q, cfqd, cfqq);
+ cfqd->dispatch_slice++;
+ dispatched++;
- if (!RB_EMPTY(&cfqq->sort_list))
- busy_queues++;
+ if (!cfqd->active_cic) {
+ atomic_inc(&crq->io_context->ioc->refcount);
+ cfqd->active_cic = crq->io_context;
+ }
- queued++;
- }
+ if (RB_EMPTY(&cfqq->sort_list))
+ break;
+
+ } while (dispatched < max_dispatch);
+
+ /*
+ * if slice end isn't set yet, set it. if at least one request was
+ * sync, use the sync time slice value
+ */
+ if (!cfqq->slice_end)
+ cfq_set_prio_slice(cfqd, cfqq);
+
+ /*
+ * expire an async queue immediately if it has used up its slice. idle
+ * queue always expire after 1 dispatch round.
+ */
+ if ((!cfq_cfqq_sync(cfqq) &&
+ cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
+ cfq_class_idle(cfqq))
+ cfq_slice_expired(cfqd, 0);
+
+ return dispatched;
+}
+
+static int
+cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct cfq_queue *cfqq;
+
+ if (!cfqd->busy_queues)
+ return 0;
+
+ cfqq = cfq_select_queue(cfqd, force);
+ if (cfqq) {
+ cfq_clear_cfqq_must_dispatch(cfqq);
+ cfq_clear_cfqq_wait_request(cfqq);
+ del_timer(&cfqd->idle_slice_timer);
- if ((queued < max_dispatch) && (busy_queues || first_round)) {
- first_round = 0;
- goto restart;
+ if (cfq_class_idle(cfqq))
+ max_dispatch = 1;
+
+ return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
}
- return queued;
+ return 0;
}
static inline void cfq_account_dispatch(struct cfq_rq *crq)
{
struct cfq_queue *cfqq = crq->cfq_queue;
struct cfq_data *cfqd = cfqq->cfqd;
- unsigned long now, elapsed;
- if (!blk_fs_request(crq->request))
+ if (unlikely(!blk_fs_request(crq->request)))
return;
/*
* accounted bit is necessary since some drivers will call
* elv_next_request() many times for the same request (eg ide)
*/
- if (crq->accounted)
+ if (cfq_crq_in_driver(crq))
return;
- now = jiffies;
- if (cfqq->service_start == ~0UL)
- cfqq->service_start = now;
-
- /*
- * on drives with tagged command queueing, command turn-around time
- * doesn't necessarily reflect the time spent processing this very
- * command inside the drive. so do the accounting differently there,
- * by just sorting on the number of requests
- */
- if (cfqd->cfq_tagged) {
- if (time_after(now, cfqq->service_start + cfq_service)) {
- cfqq->service_start = now;
- cfqq->service_used /= 10;
- }
-
- cfqq->service_used++;
- cfq_sort_rr_list(cfqq, 0);
- }
-
- elapsed = now - crq->queue_start;
- if (elapsed > max_elapsed_dispatch)
- max_elapsed_dispatch = elapsed;
-
- crq->accounted = 1;
- crq->service_start = now;
-
- if (++cfqd->rq_in_driver >= CFQ_MAX_TAG && !cfqd->cfq_tagged) {
- cfqq->cfqd->cfq_tagged = 1;
- printk("cfq: depth %d reached, tagging now on\n", CFQ_MAX_TAG);
- }
+ cfq_mark_crq_in_driver(crq);
+ cfqd->rq_in_driver++;
}
static inline void
cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq)
{
struct cfq_data *cfqd = cfqq->cfqd;
+ unsigned long now;
- if (!crq->accounted)
+ if (!cfq_crq_in_driver(crq))
return;
+ now = jiffies;
+
WARN_ON(!cfqd->rq_in_driver);
cfqd->rq_in_driver--;
- if (!cfqd->cfq_tagged) {
- unsigned long now = jiffies;
- unsigned long duration = now - crq->service_start;
+ if (!cfq_class_idle(cfqq))
+ cfqd->last_end_request = now;
- if (time_after(now, cfqq->service_start + cfq_service)) {
- cfqq->service_start = now;
- cfqq->service_used >>= 3;
+ if (!cfq_cfqq_dispatched(cfqq)) {
+ if (cfq_cfqq_on_rr(cfqq)) {
+ cfqq->service_last = now;
+ cfq_resort_rr_list(cfqq, 0);
+ }
+ if (cfq_cfqq_expired(cfqq)) {
+ __cfq_slice_expired(cfqd, cfqq, 0);
+ cfq_schedule_dispatch(cfqd);
}
-
- cfqq->service_used += duration;
- cfq_sort_rr_list(cfqq, 0);
-
- if (duration > max_elapsed_crq)
- max_elapsed_crq = duration;
}
+
+ if (cfq_crq_is_sync(crq))
+ crq->io_context->last_end_request = now;
}
static struct request *cfq_next_request(request_queue_t *q)
@@ -950,7 +1272,18 @@ static struct request *cfq_next_request(request_queue_t *q)
dispatch:
rq = list_entry_rq(q->queue_head.next);
- if ((crq = RQ_DATA(rq)) != NULL) {
+ crq = RQ_DATA(rq);
+ if (crq) {
+ struct cfq_queue *cfqq = crq->cfq_queue;
+
+ /*
+ * if idle window is disabled, allow queue buildup
+ */
+ if (!cfq_crq_in_driver(crq) &&
+ !cfq_cfqq_idle_window(cfqq) &&
+ cfqd->rq_in_driver >= cfqd->cfq_max_depth)
+ return NULL;
+
cfq_remove_merge_hints(q, crq);
cfq_account_dispatch(crq);
}
@@ -958,7 +1291,7 @@ dispatch:
return rq;
}
- if (cfq_dispatch_requests(q, cfqd->cfq_quantum))
+ if (cfq_dispatch_requests(q, cfqd->cfq_quantum, 0))
goto dispatch;
return NULL;
@@ -972,13 +1305,21 @@ dispatch:
*/
static void cfq_put_queue(struct cfq_queue *cfqq)
{
- BUG_ON(!atomic_read(&cfqq->ref));
+ struct cfq_data *cfqd = cfqq->cfqd;
+
+ BUG_ON(atomic_read(&cfqq->ref) <= 0);
if (!atomic_dec_and_test(&cfqq->ref))
return;
BUG_ON(rb_first(&cfqq->sort_list));
- BUG_ON(cfqq->on_rr);
+ BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
+ BUG_ON(cfq_cfqq_on_rr(cfqq));
+
+ if (unlikely(cfqd->active_queue == cfqq)) {
+ __cfq_slice_expired(cfqd, cfqq, 0);
+ cfq_schedule_dispatch(cfqd);
+ }
cfq_put_cfqd(cfqq->cfqd);
@@ -991,15 +1332,17 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
}
static inline struct cfq_queue *
-__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key, const int hashval)
+__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
+ const int hashval)
{
struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
struct hlist_node *entry, *next;
hlist_for_each_safe(entry, next, hash_list) {
struct cfq_queue *__cfqq = list_entry_qhash(entry);
+ const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->ioprio_class, __cfqq->ioprio);
- if (__cfqq->key == key)
+ if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY))
return __cfqq;
}
@@ -1007,94 +1350,220 @@ __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key, const int hashval)
}
static struct cfq_queue *
-cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key)
+cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
{
- return __cfq_find_cfq_hash(cfqd, key, hash_long(key, CFQ_QHASH_SHIFT));
+ return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
}
-static inline void
-cfq_rehash_cfqq(struct cfq_data *cfqd, struct cfq_queue **cfqq,
- struct cfq_io_context *cic)
+static void cfq_free_io_context(struct cfq_io_context *cic)
{
- unsigned long hashkey = cfq_hash_key(cfqd, current);
- unsigned long hashval = hash_long(hashkey, CFQ_QHASH_SHIFT);
- struct cfq_queue *__cfqq;
- unsigned long flags;
-
- spin_lock_irqsave(cfqd->queue->queue_lock, flags);
+ struct cfq_io_context *__cic;
+ struct list_head *entry, *next;
- hlist_del(&(*cfqq)->cfq_hash);
-
- __cfqq = __cfq_find_cfq_hash(cfqd, hashkey, hashval);
- if (!__cfqq || __cfqq == *cfqq) {
- __cfqq = *cfqq;
- hlist_add_head(&__cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
- __cfqq->key_type = cfqd->key_type;
- } else {
- atomic_inc(&__cfqq->ref);
- cic->cfqq = __cfqq;
- cfq_put_queue(*cfqq);
- *cfqq = __cfqq;
+ list_for_each_safe(entry, next, &cic->list) {
+ __cic = list_entry(entry, struct cfq_io_context, list);
+ kmem_cache_free(cfq_ioc_pool, __cic);
}
- cic->cfqq = __cfqq;
- spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+ kmem_cache_free(cfq_ioc_pool, cic);
}
-static void cfq_free_io_context(struct cfq_io_context *cic)
+/*
+ * Called with interrupts disabled
+ */
+static void cfq_exit_single_io_context(struct cfq_io_context *cic)
{
- kmem_cache_free(cfq_ioc_pool, cic);
+ struct cfq_data *cfqd = cic->cfqq->cfqd;
+ request_queue_t *q = cfqd->queue;
+
+ WARN_ON(!irqs_disabled());
+
+ spin_lock(q->queue_lock);
+
+ if (unlikely(cic->cfqq == cfqd->active_queue)) {
+ __cfq_slice_expired(cfqd, cic->cfqq, 0);
+ cfq_schedule_dispatch(cfqd);
+ }
+
+ cfq_put_queue(cic->cfqq);
+ cic->cfqq = NULL;
+ spin_unlock(q->queue_lock);
}
/*
- * locking hierarchy is: io_context lock -> queue locks
+ * Another task may update the task cic list, if it is doing a queue lookup
+ * on its behalf. cfq_cic_lock excludes such concurrent updates
*/
static void cfq_exit_io_context(struct cfq_io_context *cic)
{
- struct cfq_queue *cfqq = cic->cfqq;
- struct list_head *entry = &cic->list;
- request_queue_t *q;
+ struct cfq_io_context *__cic;
+ struct list_head *entry;
unsigned long flags;
+ local_irq_save(flags);
+
/*
* put the reference this task is holding to the various queues
*/
- spin_lock_irqsave(&cic->ioc->lock, flags);
- while ((entry = cic->list.next) != &cic->list) {
- struct cfq_io_context *__cic;
-
+ list_for_each(entry, &cic->list) {
__cic = list_entry(entry, struct cfq_io_context, list);
- list_del(entry);
-
- q = __cic->cfqq->cfqd->queue;
- spin_lock(q->queue_lock);
- cfq_put_queue(__cic->cfqq);
- spin_unlock(q->queue_lock);
+ cfq_exit_single_io_context(__cic);
}
- q = cfqq->cfqd->queue;
- spin_lock(q->queue_lock);
- cfq_put_queue(cfqq);
- spin_unlock(q->queue_lock);
-
- cic->cfqq = NULL;
- spin_unlock_irqrestore(&cic->ioc->lock, flags);
+ cfq_exit_single_io_context(cic);
+ local_irq_restore(flags);
}
-static struct cfq_io_context *cfq_alloc_io_context(int gfp_flags)
+static struct cfq_io_context *
+cfq_alloc_io_context(struct cfq_data *cfqd, int gfp_mask)
{
- struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_flags);
+ struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
if (cic) {
- cic->dtor = cfq_free_io_context;
- cic->exit = cfq_exit_io_context;
INIT_LIST_HEAD(&cic->list);
cic->cfqq = NULL;
+ cic->key = NULL;
+ cic->last_end_request = jiffies;
+ cic->ttime_total = 0;
+ cic->ttime_samples = 0;
+ cic->ttime_mean = 0;
+ cic->dtor = cfq_free_io_context;
+ cic->exit = cfq_exit_io_context;
}
return cic;
}
+static void cfq_init_prio_data(struct cfq_queue *cfqq)
+{
+ struct task_struct *tsk = current;
+ int ioprio_class;
+
+ if (!cfq_cfqq_prio_changed(cfqq))
+ return;
+
+ ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
+ switch (ioprio_class) {
+ default:
+ printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
+ case IOPRIO_CLASS_NONE:
+ /*
+ * no prio set, place us in the middle of the BE classes
+ */
+ cfqq->ioprio = task_nice_ioprio(tsk);
+ cfqq->ioprio_class = IOPRIO_CLASS_BE;
+ break;
+ case IOPRIO_CLASS_RT:
+ cfqq->ioprio = task_ioprio(tsk);
+ cfqq->ioprio_class = IOPRIO_CLASS_RT;
+ break;
+ case IOPRIO_CLASS_BE:
+ cfqq->ioprio = task_ioprio(tsk);
+ cfqq->ioprio_class = IOPRIO_CLASS_BE;
+ break;
+ case IOPRIO_CLASS_IDLE:
+ cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
+ cfqq->ioprio = 7;
+ cfq_clear_cfqq_idle_window(cfqq);
+ break;
+ }
+
+ /*
+ * keep track of original prio settings in case we have to temporarily
+ * elevate the priority of this queue
+ */
+ cfqq->org_ioprio = cfqq->ioprio;
+ cfqq->org_ioprio_class = cfqq->ioprio_class;
+
+ if (cfq_cfqq_on_rr(cfqq))
+ cfq_resort_rr_list(cfqq, 0);
+
+ cfq_clear_cfqq_prio_changed(cfqq);
+}
+
+static inline void changed_ioprio(struct cfq_queue *cfqq)
+{
+ if (cfqq) {
+ struct cfq_data *cfqd = cfqq->cfqd;
+
+ spin_lock(cfqd->queue->queue_lock);
+ cfq_mark_cfqq_prio_changed(cfqq);
+ cfq_init_prio_data(cfqq);
+ spin_unlock(cfqd->queue->queue_lock);
+ }
+}
+
+/*
+ * callback from sys_ioprio_set, irqs are disabled
+ */
+static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
+{
+ struct cfq_io_context *cic = ioc->cic;
+
+ changed_ioprio(cic->cfqq);
+
+ list_for_each_entry(cic, &cic->list, list)
+ changed_ioprio(cic->cfqq);
+
+ return 0;
+}
+
+static struct cfq_queue *
+cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio,
+ int gfp_mask)
+{
+ const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
+ struct cfq_queue *cfqq, *new_cfqq = NULL;
+
+retry:
+ cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
+
+ if (!cfqq) {
+ if (new_cfqq) {
+ cfqq = new_cfqq;
+ new_cfqq = NULL;
+ } else if (gfp_mask & __GFP_WAIT) {
+ spin_unlock_irq(cfqd->queue->queue_lock);
+ new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
+ spin_lock_irq(cfqd->queue->queue_lock);
+ goto retry;
+ } else {
+ cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
+ if (!cfqq)
+ goto out;
+ }
+
+ memset(cfqq, 0, sizeof(*cfqq));
+
+ INIT_HLIST_NODE(&cfqq->cfq_hash);
+ INIT_LIST_HEAD(&cfqq->cfq_list);
+ RB_CLEAR_ROOT(&cfqq->sort_list);
+ INIT_LIST_HEAD(&cfqq->fifo);
+
+ cfqq->key = key;
+ hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
+ atomic_set(&cfqq->ref, 0);
+ cfqq->cfqd = cfqd;
+ atomic_inc(&cfqd->ref);
+ cfqq->service_last = 0;
+ /*
+ * set ->slice_left to allow preemption for a new process
+ */
+ cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
+ cfq_mark_cfqq_idle_window(cfqq);
+ cfq_mark_cfqq_prio_changed(cfqq);
+ cfq_init_prio_data(cfqq);
+ }
+
+ if (new_cfqq)
+ kmem_cache_free(cfq_pool, new_cfqq);
+
+ atomic_inc(&cfqq->ref);
+out:
+ WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
+ return cfqq;
+}
+
/*
* Setup general io context and cfq io context. There can be several cfq
* io contexts per general io context, if this process is doing io to more
@@ -1102,39 +1571,39 @@ static struct cfq_io_context *cfq_alloc_io_context(int gfp_flags)
* cfqq, so we don't need to worry about it disappearing
*/
static struct cfq_io_context *
-cfq_get_io_context(struct cfq_queue **cfqq, int gfp_flags)
+cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, int gfp_mask)
{
- struct cfq_data *cfqd = (*cfqq)->cfqd;
- struct cfq_queue *__cfqq = *cfqq;
+ struct io_context *ioc = NULL;
struct cfq_io_context *cic;
- struct io_context *ioc;
- might_sleep_if(gfp_flags & __GFP_WAIT);
+ might_sleep_if(gfp_mask & __GFP_WAIT);
- ioc = get_io_context(gfp_flags);
+ ioc = get_io_context(gfp_mask);
if (!ioc)
return NULL;
if ((cic = ioc->cic) == NULL) {
- cic = cfq_alloc_io_context(gfp_flags);
+ cic = cfq_alloc_io_context(cfqd, gfp_mask);
if (cic == NULL)
goto err;
+ /*
+ * manually increment generic io_context usage count, it
+ * cannot go away since we are already holding one ref to it
+ */
ioc->cic = cic;
+ ioc->set_ioprio = cfq_ioc_set_ioprio;
cic->ioc = ioc;
- cic->cfqq = __cfqq;
- atomic_inc(&__cfqq->ref);
+ cic->key = cfqd;
+ atomic_inc(&cfqd->ref);
} else {
struct cfq_io_context *__cic;
- unsigned long flags;
/*
- * since the first cic on the list is actually the head
- * itself, need to check this here or we'll duplicate an
- * cic per ioc for no reason
+ * the first cic on the list is actually the head itself
*/
- if (cic->cfqq == __cfqq)
+ if (cic->key == cfqd)
goto out;
/*
@@ -1142,152 +1611,250 @@ cfq_get_io_context(struct cfq_queue **cfqq, int gfp_flags)
* should be ok here, the list will usually not be more than
* 1 or a few entries long
*/
- spin_lock_irqsave(&ioc->lock, flags);
list_for_each_entry(__cic, &cic->list, list) {
/*
* this process is already holding a reference to
* this queue, so no need to get one more
*/
- if (__cic->cfqq == __cfqq) {
+ if (__cic->key == cfqd) {
cic = __cic;
- spin_unlock_irqrestore(&ioc->lock, flags);
goto out;
}
}
- spin_unlock_irqrestore(&ioc->lock, flags);
/*
* nope, process doesn't have a cic assoicated with this
* cfqq yet. get a new one and add to list
*/
- __cic = cfq_alloc_io_context(gfp_flags);
+ __cic = cfq_alloc_io_context(cfqd, gfp_mask);
if (__cic == NULL)
goto err;
__cic->ioc = ioc;
- __cic->cfqq = __cfqq;
- atomic_inc(&__cfqq->ref);
- spin_lock_irqsave(&ioc->lock, flags);
+ __cic->key = cfqd;
+ atomic_inc(&cfqd->ref);
list_add(&__cic->list, &cic->list);
- spin_unlock_irqrestore(&ioc->lock, flags);
-
cic = __cic;
- *cfqq = __cfqq;
}
out:
- /*
- * if key_type has been changed on the fly, we lazily rehash
- * each queue at lookup time
- */
- if ((*cfqq)->key_type != cfqd->key_type)
- cfq_rehash_cfqq(cfqd, cfqq, cic);
-
return cic;
err:
put_io_context(ioc);
return NULL;
}
-static struct cfq_queue *
-__cfq_get_queue(struct cfq_data *cfqd, unsigned long key, int gfp_mask)
+static void
+cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
{
- const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
- struct cfq_queue *cfqq, *new_cfqq = NULL;
-
-retry:
- cfqq = __cfq_find_cfq_hash(cfqd, key, hashval);
+ unsigned long elapsed, ttime;
- if (!cfqq) {
- if (new_cfqq) {
- cfqq = new_cfqq;
- new_cfqq = NULL;
- } else {
- spin_unlock_irq(cfqd->queue->queue_lock);
- new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
- spin_lock_irq(cfqd->queue->queue_lock);
+ /*
+ * if this context already has stuff queued, thinktime is from
+ * last queue not last end
+ */
+#if 0
+ if (time_after(cic->last_end_request, cic->last_queue))
+ elapsed = jiffies - cic->last_end_request;
+ else
+ elapsed = jiffies - cic->last_queue;
+#else
+ elapsed = jiffies - cic->last_end_request;
+#endif
- if (!new_cfqq && !(gfp_mask & __GFP_WAIT))
- goto out;
+ ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
- goto retry;
- }
+ cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
+ cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
+ cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
+}
- memset(cfqq, 0, sizeof(*cfqq));
+#define sample_valid(samples) ((samples) > 80)
- INIT_HLIST_NODE(&cfqq->cfq_hash);
- INIT_LIST_HEAD(&cfqq->cfq_list);
- RB_CLEAR_ROOT(&cfqq->sort_list);
- INIT_LIST_HEAD(&cfqq->fifo[0]);
- INIT_LIST_HEAD(&cfqq->fifo[1]);
+/*
+ * Disable idle window if the process thinks too long or seeks so much that
+ * it doesn't matter
+ */
+static void
+cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ struct cfq_io_context *cic)
+{
+ int enable_idle = cfq_cfqq_idle_window(cfqq);
- cfqq->key = key;
- hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
- atomic_set(&cfqq->ref, 0);
- cfqq->cfqd = cfqd;
- atomic_inc(&cfqd->ref);
- cfqq->key_type = cfqd->key_type;
- cfqq->service_start = ~0UL;
+ if (!cic->ioc->task || !cfqd->cfq_slice_idle)
+ enable_idle = 0;
+ else if (sample_valid(cic->ttime_samples)) {
+ if (cic->ttime_mean > cfqd->cfq_slice_idle)
+ enable_idle = 0;
+ else
+ enable_idle = 1;
}
- if (new_cfqq)
- kmem_cache_free(cfq_pool, new_cfqq);
+ if (enable_idle)
+ cfq_mark_cfqq_idle_window(cfqq);
+ else
+ cfq_clear_cfqq_idle_window(cfqq);
+}
- atomic_inc(&cfqq->ref);
-out:
- WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
- return cfqq;
+
+/*
+ * Check if new_cfqq should preempt the currently active queue. Return 0 for
+ * no or if we aren't sure, a 1 will cause a preempt.
+ */
+static int
+cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
+ struct cfq_rq *crq)
+{
+ struct cfq_queue *cfqq = cfqd->active_queue;
+
+ if (cfq_class_idle(new_cfqq))
+ return 0;
+
+ if (!cfqq)
+ return 1;
+
+ if (cfq_class_idle(cfqq))
+ return 1;
+ if (!cfq_cfqq_wait_request(new_cfqq))
+ return 0;
+ /*
+ * if it doesn't have slice left, forget it
+ */
+ if (new_cfqq->slice_left < cfqd->cfq_slice_idle)
+ return 0;
+ if (cfq_crq_is_sync(crq) && !cfq_cfqq_sync(cfqq))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * cfqq preempts the active queue. if we allowed preempt with no slice left,
+ * let it have half of its nominal slice.
+ */
+static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ struct cfq_queue *__cfqq, *next;
+
+ list_for_each_entry_safe(__cfqq, next, &cfqd->cur_rr, cfq_list)
+ cfq_resort_rr_list(__cfqq, 1);
+
+ if (!cfqq->slice_left)
+ cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
+
+ cfqq->slice_end = cfqq->slice_left + jiffies;
+ __cfq_slice_expired(cfqd, cfqq, 1);
+ __cfq_set_active_queue(cfqd, cfqq);
}
-static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq)
+/*
+ * should really be a ll_rw_blk.c helper
+ */
+static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ request_queue_t *q = cfqd->queue;
+
+ if (!blk_queue_plugged(q))
+ q->request_fn(q);
+ else
+ __generic_unplug_device(q);
+}
+
+/*
+ * Called when a new fs request (crq) is added (to cfqq). Check if there's
+ * something we should do about it
+ */
+static void
+cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ struct cfq_rq *crq)
{
- crq->is_sync = 0;
- if (rq_data_dir(crq->request) == READ || current->flags & PF_SYNCWRITE)
- crq->is_sync = 1;
+ const int sync = cfq_crq_is_sync(crq);
+
+ cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
+
+ if (sync) {
+ struct cfq_io_context *cic = crq->io_context;
+
+ cfq_update_io_thinktime(cfqd, cic);
+ cfq_update_idle_window(cfqd, cfqq, cic);
+
+ cic->last_queue = jiffies;
+ }
+
+ if (cfqq == cfqd->active_queue) {
+ /*
+ * if we are waiting for a request for this queue, let it rip
+ * immediately and flag that we must not expire this queue
+ * just now
+ */
+ if (cfq_cfqq_wait_request(cfqq)) {
+ cfq_mark_cfqq_must_dispatch(cfqq);
+ del_timer(&cfqd->idle_slice_timer);
+ cfq_start_queueing(cfqd, cfqq);
+ }
+ } else if (cfq_should_preempt(cfqd, cfqq, crq)) {
+ /*
+ * not the active queue - expire current slice if it is
+ * idle and has expired it's mean thinktime or this new queue
+ * has some old slice time left and is of higher priority
+ */
+ cfq_preempt_queue(cfqd, cfqq);
+ cfq_mark_cfqq_must_dispatch(cfqq);
+ cfq_start_queueing(cfqd, cfqq);
+ }
+}
+
+static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq)
+{
+ struct cfq_rq *crq = RQ_DATA(rq);
+ struct cfq_queue *cfqq = crq->cfq_queue;
+
+ cfq_init_prio_data(cfqq);
cfq_add_crq_rb(crq);
- crq->queue_start = jiffies;
- list_add_tail(&crq->request->queuelist, &crq->cfq_queue->fifo[crq->is_sync]);
+ list_add_tail(&rq->queuelist, &cfqq->fifo);
+
+ if (rq_mergeable(rq)) {
+ cfq_add_crq_hash(cfqd, crq);
+
+ if (!cfqd->queue->last_merge)
+ cfqd->queue->last_merge = rq;
+ }
+
+ cfq_crq_enqueued(cfqd, cfqq, crq);
}
static void
cfq_insert_request(request_queue_t *q, struct request *rq, int where)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
- struct cfq_rq *crq = RQ_DATA(rq);
switch (where) {
case ELEVATOR_INSERT_BACK:
- while (cfq_dispatch_requests(q, cfqd->cfq_quantum))
+ while (cfq_dispatch_requests(q, INT_MAX, 1))
;
list_add_tail(&rq->queuelist, &q->queue_head);
+ /*
+ * If we were idling with pending requests on
+ * inactive cfqqs, force dispatching will
+ * remove the idle timer and the queue won't
+ * be kicked by __make_request() afterward.
+ * Kick it here.
+ */
+ cfq_schedule_dispatch(cfqd);
break;
case ELEVATOR_INSERT_FRONT:
list_add(&rq->queuelist, &q->queue_head);
break;
case ELEVATOR_INSERT_SORT:
BUG_ON(!blk_fs_request(rq));
- cfq_enqueue(cfqd, crq);
+ cfq_enqueue(cfqd, rq);
break;
default:
printk("%s: bad insert point %d\n", __FUNCTION__,where);
return;
}
-
- if (rq_mergeable(rq)) {
- cfq_add_crq_hash(cfqd, crq);
-
- if (!q->last_merge)
- q->last_merge = rq;
- }
-}
-
-static int cfq_queue_empty(request_queue_t *q)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
-
- return list_empty(&q->queue_head) && list_empty(&cfqd->rr_list);
}
static void cfq_completed_request(request_queue_t *q, struct request *rq)
@@ -1300,9 +1867,11 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq)
cfqq = crq->cfq_queue;
- if (crq->in_flight) {
- WARN_ON(!cfqq->in_flight);
- cfqq->in_flight--;
+ if (cfq_crq_in_flight(crq)) {
+ const int sync = cfq_crq_is_sync(crq);
+
+ WARN_ON(!cfqq->on_dispatch[sync]);
+ cfqq->on_dispatch[sync]--;
}
cfq_account_completion(cfqq, crq);
@@ -1332,51 +1901,136 @@ cfq_latter_request(request_queue_t *q, struct request *rq)
return NULL;
}
-static int cfq_may_queue(request_queue_t *q, int rw)
+/*
+ * we temporarily boost lower priority queues if they are holding fs exclusive
+ * resources. they are boosted to normal prio (CLASS_BE/4)
+ */
+static void cfq_prio_boost(struct cfq_queue *cfqq)
{
- struct cfq_data *cfqd = q->elevator->elevator_data;
- struct cfq_queue *cfqq;
- int ret = ELV_MQUEUE_MAY;
+ const int ioprio_class = cfqq->ioprio_class;
+ const int ioprio = cfqq->ioprio;
- if (current->flags & PF_MEMALLOC)
- return ELV_MQUEUE_MAY;
+ if (has_fs_excl()) {
+ /*
+ * boost idle prio on transactions that would lock out other
+ * users of the filesystem
+ */
+ if (cfq_class_idle(cfqq))
+ cfqq->ioprio_class = IOPRIO_CLASS_BE;
+ if (cfqq->ioprio > IOPRIO_NORM)
+ cfqq->ioprio = IOPRIO_NORM;
+ } else {
+ /*
+ * check if we need to unboost the queue
+ */
+ if (cfqq->ioprio_class != cfqq->org_ioprio_class)
+ cfqq->ioprio_class = cfqq->org_ioprio_class;
+ if (cfqq->ioprio != cfqq->org_ioprio)
+ cfqq->ioprio = cfqq->org_ioprio;
+ }
- cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(cfqd, current));
- if (cfqq) {
- int limit = cfqd->max_queued;
+ /*
+ * refile between round-robin lists if we moved the priority class
+ */
+ if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio) &&
+ cfq_cfqq_on_rr(cfqq))
+ cfq_resort_rr_list(cfqq, 0);
+}
- if (cfqq->allocated[rw] < cfqd->cfq_queued)
- return ELV_MQUEUE_MUST;
+static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
+{
+ if (rw == READ || process_sync(task))
+ return task->pid;
- if (cfqd->busy_queues)
- limit = q->nr_requests / cfqd->busy_queues;
+ return CFQ_KEY_ASYNC;
+}
- if (limit < cfqd->cfq_queued)
- limit = cfqd->cfq_queued;
- else if (limit > cfqd->max_queued)
- limit = cfqd->max_queued;
+static inline int
+__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ struct task_struct *task, int rw)
+{
+#if 1
+ if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
+ !cfq_cfqq_must_alloc_slice(cfqq)) {
+ cfq_mark_cfqq_must_alloc_slice(cfqq);
+ return ELV_MQUEUE_MUST;
+ }
- if (cfqq->allocated[rw] >= limit) {
- if (limit > cfqq->alloc_limit[rw])
- cfqq->alloc_limit[rw] = limit;
+ return ELV_MQUEUE_MAY;
+#else
+ if (!cfqq || task->flags & PF_MEMALLOC)
+ return ELV_MQUEUE_MAY;
+ if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) {
+ if (cfq_cfqq_wait_request(cfqq))
+ return ELV_MQUEUE_MUST;
- ret = ELV_MQUEUE_NO;
+ /*
+ * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we
+ * can quickly flood the queue with writes from a single task
+ */
+ if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) {
+ cfq_mark_cfqq_must_alloc_slice(cfqq);
+ return ELV_MQUEUE_MUST;
}
+
+ return ELV_MQUEUE_MAY;
}
+ if (cfq_class_idle(cfqq))
+ return ELV_MQUEUE_NO;
+ if (cfqq->allocated[rw] >= cfqd->max_queued) {
+ struct io_context *ioc = get_io_context(GFP_ATOMIC);
+ int ret = ELV_MQUEUE_NO;
- return ret;
+ if (ioc && ioc->nr_batch_requests)
+ ret = ELV_MQUEUE_MAY;
+
+ put_io_context(ioc);
+ return ret;
+ }
+
+ return ELV_MQUEUE_MAY;
+#endif
+}
+
+static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct task_struct *tsk = current;
+ struct cfq_queue *cfqq;
+
+ /*
+ * don't force setup of a queue from here, as a call to may_queue
+ * does not necessarily imply that a request actually will be queued.
+ * so just lookup a possibly existing queue, or return 'may queue'
+ * if that fails
+ */
+ cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio);
+ if (cfqq) {
+ cfq_init_prio_data(cfqq);
+ cfq_prio_boost(cfqq);
+
+ return __cfq_may_queue(cfqd, cfqq, tsk, rw);
+ }
+
+ return ELV_MQUEUE_MAY;
}
static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
struct request_list *rl = &q->rq;
- const int write = waitqueue_active(&rl->wait[WRITE]);
- const int read = waitqueue_active(&rl->wait[READ]);
- if (read && cfqq->allocated[READ] < cfqq->alloc_limit[READ])
- wake_up(&rl->wait[READ]);
- if (write && cfqq->allocated[WRITE] < cfqq->alloc_limit[WRITE])
- wake_up(&rl->wait[WRITE]);
+ if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) {
+ smp_mb();
+ if (waitqueue_active(&rl->wait[READ]))
+ wake_up(&rl->wait[READ]);
+ }
+
+ if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) {
+ smp_mb();
+ if (waitqueue_active(&rl->wait[WRITE]))
+ wake_up(&rl->wait[WRITE]);
+ }
}
/*
@@ -1389,69 +2043,61 @@ static void cfq_put_request(request_queue_t *q, struct request *rq)
if (crq) {
struct cfq_queue *cfqq = crq->cfq_queue;
+ const int rw = rq_data_dir(rq);
- BUG_ON(q->last_merge == rq);
- BUG_ON(!hlist_unhashed(&crq->hash));
+ BUG_ON(!cfqq->allocated[rw]);
+ cfqq->allocated[rw]--;
- if (crq->io_context)
- put_io_context(crq->io_context->ioc);
-
- BUG_ON(!cfqq->allocated[crq->is_write]);
- cfqq->allocated[crq->is_write]--;
+ put_io_context(crq->io_context->ioc);
mempool_free(crq, cfqd->crq_pool);
rq->elevator_private = NULL;
- smp_mb();
cfq_check_waiters(q, cfqq);
cfq_put_queue(cfqq);
}
}
/*
- * Allocate cfq data structures associated with this request. A queue and
+ * Allocate cfq data structures associated with this request.
*/
-static int cfq_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
+static int
+cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
+ int gfp_mask)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct task_struct *tsk = current;
struct cfq_io_context *cic;
const int rw = rq_data_dir(rq);
- struct cfq_queue *cfqq, *saved_cfqq;
+ pid_t key = cfq_queue_pid(tsk, rw);
+ struct cfq_queue *cfqq;
struct cfq_rq *crq;
unsigned long flags;
might_sleep_if(gfp_mask & __GFP_WAIT);
+ cic = cfq_get_io_context(cfqd, key, gfp_mask);
+
spin_lock_irqsave(q->queue_lock, flags);
- cfqq = __cfq_get_queue(cfqd, cfq_hash_key(cfqd, current), gfp_mask);
- if (!cfqq)
- goto out_lock;
+ if (!cic)
+ goto queue_fail;
+
+ if (!cic->cfqq) {
+ cfqq = cfq_get_queue(cfqd, key, tsk->ioprio, gfp_mask);
+ if (!cfqq)
+ goto queue_fail;
-repeat:
- if (cfqq->allocated[rw] >= cfqd->max_queued)
- goto out_lock;
+ cic->cfqq = cfqq;
+ } else
+ cfqq = cic->cfqq;
cfqq->allocated[rw]++;
+ cfq_clear_cfqq_must_alloc(cfqq);
+ cfqd->rq_starved = 0;
+ atomic_inc(&cfqq->ref);
spin_unlock_irqrestore(q->queue_lock, flags);
- /*
- * if hashing type has changed, the cfq_queue might change here.
- */
- saved_cfqq = cfqq;
- cic = cfq_get_io_context(&cfqq, gfp_mask);
- if (!cic)
- goto err;
-
- /*
- * repeat allocation checks on queue change
- */
- if (unlikely(saved_cfqq != cfqq)) {
- spin_lock_irqsave(q->queue_lock, flags);
- saved_cfqq->allocated[rw]--;
- goto repeat;
- }
-
crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
if (crq) {
RB_CLEAR(&crq->rb_node);
@@ -1460,24 +2106,141 @@ repeat:
INIT_HLIST_NODE(&crq->hash);
crq->cfq_queue = cfqq;
crq->io_context = cic;
- crq->service_start = crq->queue_start = 0;
- crq->in_flight = crq->accounted = crq->is_sync = 0;
- crq->is_write = rw;
+ cfq_clear_crq_in_flight(crq);
+ cfq_clear_crq_in_driver(crq);
+ cfq_clear_crq_requeued(crq);
+
+ if (rw == READ || process_sync(tsk))
+ cfq_mark_crq_is_sync(crq);
+ else
+ cfq_clear_crq_is_sync(crq);
+
rq->elevator_private = crq;
- cfqq->alloc_limit[rw] = 0;
return 0;
}
- put_io_context(cic->ioc);
-err:
spin_lock_irqsave(q->queue_lock, flags);
cfqq->allocated[rw]--;
+ if (!(cfqq->allocated[0] + cfqq->allocated[1]))
+ cfq_mark_cfqq_must_alloc(cfqq);
cfq_put_queue(cfqq);
-out_lock:
+queue_fail:
+ if (cic)
+ put_io_context(cic->ioc);
+ /*
+ * mark us rq allocation starved. we need to kickstart the process
+ * ourselves if there are no pending requests that can do it for us.
+ * that would be an extremely rare OOM situation
+ */
+ cfqd->rq_starved = 1;
+ cfq_schedule_dispatch(cfqd);
spin_unlock_irqrestore(q->queue_lock, flags);
return 1;
}
+static void cfq_kick_queue(void *data)
+{
+ request_queue_t *q = data;
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ if (cfqd->rq_starved) {
+ struct request_list *rl = &q->rq;
+
+ /*
+ * we aren't guaranteed to get a request after this, but we
+ * have to be opportunistic
+ */
+ smp_mb();
+ if (waitqueue_active(&rl->wait[READ]))
+ wake_up(&rl->wait[READ]);
+ if (waitqueue_active(&rl->wait[WRITE]))
+ wake_up(&rl->wait[WRITE]);
+ }
+
+ blk_remove_plug(q);
+ q->request_fn(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+/*
+ * Timer running if the active_queue is currently idling inside its time slice
+ */
+static void cfq_idle_slice_timer(unsigned long data)
+{
+ struct cfq_data *cfqd = (struct cfq_data *) data;
+ struct cfq_queue *cfqq;
+ unsigned long flags;
+
+ spin_lock_irqsave(cfqd->queue->queue_lock, flags);
+
+ if ((cfqq = cfqd->active_queue) != NULL) {
+ unsigned long now = jiffies;
+
+ /*
+ * expired
+ */
+ if (time_after(now, cfqq->slice_end))
+ goto expire;
+
+ /*
+ * only expire and reinvoke request handler, if there are
+ * other queues with pending requests
+ */
+ if (!cfq_pending_requests(cfqd)) {
+ cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end);
+ add_timer(&cfqd->idle_slice_timer);
+ goto out_cont;
+ }
+
+ /*
+ * not expired and it has a request pending, let it dispatch
+ */
+ if (!RB_EMPTY(&cfqq->sort_list)) {
+ cfq_mark_cfqq_must_dispatch(cfqq);
+ goto out_kick;
+ }
+ }
+expire:
+ cfq_slice_expired(cfqd, 0);
+out_kick:
+ cfq_schedule_dispatch(cfqd);
+out_cont:
+ spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+}
+
+/*
+ * Timer running if an idle class queue is waiting for service
+ */
+static void cfq_idle_class_timer(unsigned long data)
+{
+ struct cfq_data *cfqd = (struct cfq_data *) data;
+ unsigned long flags, end;
+
+ spin_lock_irqsave(cfqd->queue->queue_lock, flags);
+
+ /*
+ * race with a non-idle queue, reset timer
+ */
+ end = cfqd->last_end_request + CFQ_IDLE_GRACE;
+ if (!time_after_eq(jiffies, end)) {
+ cfqd->idle_class_timer.expires = end;
+ add_timer(&cfqd->idle_class_timer);
+ } else
+ cfq_schedule_dispatch(cfqd);
+
+ spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+}
+
+static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
+{
+ del_timer_sync(&cfqd->idle_slice_timer);
+ del_timer_sync(&cfqd->idle_class_timer);
+ blk_sync_queue(cfqd->queue);
+}
+
static void cfq_put_cfqd(struct cfq_data *cfqd)
{
request_queue_t *q = cfqd->queue;
@@ -1487,6 +2250,9 @@ static void cfq_put_cfqd(struct cfq_data *cfqd)
blk_put_queue(q);
+ cfq_shutdown_timer_wq(cfqd);
+ q->elevator->elevator_data = NULL;
+
mempool_destroy(cfqd->crq_pool);
kfree(cfqd->crq_hash);
kfree(cfqd->cfq_hash);
@@ -1495,7 +2261,10 @@ static void cfq_put_cfqd(struct cfq_data *cfqd)
static void cfq_exit_queue(elevator_t *e)
{
- cfq_put_cfqd(e->elevator_data);
+ struct cfq_data *cfqd = e->elevator_data;
+
+ cfq_shutdown_timer_wq(cfqd);
+ cfq_put_cfqd(cfqd);
}
static int cfq_init_queue(request_queue_t *q, elevator_t *e)
@@ -1508,7 +2277,13 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
return -ENOMEM;
memset(cfqd, 0, sizeof(*cfqd));
- INIT_LIST_HEAD(&cfqd->rr_list);
+
+ for (i = 0; i < CFQ_PRIO_LISTS; i++)
+ INIT_LIST_HEAD(&cfqd->rr_list[i]);
+
+ INIT_LIST_HEAD(&cfqd->busy_rr);
+ INIT_LIST_HEAD(&cfqd->cur_rr);
+ INIT_LIST_HEAD(&cfqd->idle_rr);
INIT_LIST_HEAD(&cfqd->empty_list);
cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
@@ -1533,24 +2308,32 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
cfqd->queue = q;
atomic_inc(&q->refcnt);
- /*
- * just set it to some high value, we want anyone to be able to queue
- * some requests. fairness is handled differently
- */
- q->nr_requests = 1024;
- cfqd->max_queued = q->nr_requests / 16;
+ cfqd->max_queued = q->nr_requests / 4;
q->nr_batching = cfq_queued;
- cfqd->key_type = CFQ_KEY_TGID;
- cfqd->find_best_crq = 1;
+
+ init_timer(&cfqd->idle_slice_timer);
+ cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
+ cfqd->idle_slice_timer.data = (unsigned long) cfqd;
+
+ init_timer(&cfqd->idle_class_timer);
+ cfqd->idle_class_timer.function = cfq_idle_class_timer;
+ cfqd->idle_class_timer.data = (unsigned long) cfqd;
+
+ INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
+
atomic_set(&cfqd->ref, 1);
cfqd->cfq_queued = cfq_queued;
cfqd->cfq_quantum = cfq_quantum;
- cfqd->cfq_fifo_expire_r = cfq_fifo_expire_r;
- cfqd->cfq_fifo_expire_w = cfq_fifo_expire_w;
- cfqd->cfq_fifo_batch_expire = cfq_fifo_rate;
+ cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
+ cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
cfqd->cfq_back_max = cfq_back_max;
cfqd->cfq_back_penalty = cfq_back_penalty;
+ cfqd->cfq_slice[0] = cfq_slice_async;
+ cfqd->cfq_slice[1] = cfq_slice_sync;
+ cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
+ cfqd->cfq_slice_idle = cfq_slice_idle;
+ cfqd->cfq_max_depth = cfq_max_depth;
return 0;
out_crqpool:
@@ -1595,7 +2378,6 @@ fail:
return -ENOMEM;
}
-
/*
* sysfs parts below -->
*/
@@ -1620,45 +2402,6 @@ cfq_var_store(unsigned int *var, const char *page, size_t count)
return count;
}
-static ssize_t
-cfq_clear_elapsed(struct cfq_data *cfqd, const char *page, size_t count)
-{
- max_elapsed_dispatch = max_elapsed_crq = 0;
- return count;
-}
-
-static ssize_t
-cfq_set_key_type(struct cfq_data *cfqd, const char *page, size_t count)
-{
- spin_lock_irq(cfqd->queue->queue_lock);
- if (!strncmp(page, "pgid", 4))
- cfqd->key_type = CFQ_KEY_PGID;
- else if (!strncmp(page, "tgid", 4))
- cfqd->key_type = CFQ_KEY_TGID;
- else if (!strncmp(page, "uid", 3))
- cfqd->key_type = CFQ_KEY_UID;
- else if (!strncmp(page, "gid", 3))
- cfqd->key_type = CFQ_KEY_GID;
- spin_unlock_irq(cfqd->queue->queue_lock);
- return count;
-}
-
-static ssize_t
-cfq_read_key_type(struct cfq_data *cfqd, char *page)
-{
- ssize_t len = 0;
- int i;
-
- for (i = CFQ_KEY_PGID; i < CFQ_KEY_LAST; i++) {
- if (cfqd->key_type == i)
- len += sprintf(page+len, "[%s] ", cfq_key_types[i]);
- else
- len += sprintf(page+len, "%s ", cfq_key_types[i]);
- }
- len += sprintf(page+len, "\n");
- return len;
-}
-
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \
{ \
@@ -1669,12 +2412,15 @@ static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \
}
SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0);
-SHOW_FUNCTION(cfq_fifo_expire_r_show, cfqd->cfq_fifo_expire_r, 1);
-SHOW_FUNCTION(cfq_fifo_expire_w_show, cfqd->cfq_fifo_expire_w, 1);
-SHOW_FUNCTION(cfq_fifo_batch_expire_show, cfqd->cfq_fifo_batch_expire, 1);
-SHOW_FUNCTION(cfq_find_best_show, cfqd->find_best_crq, 0);
+SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
+SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0);
SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0);
+SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
+SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
+SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
+SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
+SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0);
#undef SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
@@ -1694,12 +2440,15 @@ static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count) \
}
STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0);
-STORE_FUNCTION(cfq_fifo_expire_r_store, &cfqd->cfq_fifo_expire_r, 1, UINT_MAX, 1);
-STORE_FUNCTION(cfq_fifo_expire_w_store, &cfqd->cfq_fifo_expire_w, 1, UINT_MAX, 1);
-STORE_FUNCTION(cfq_fifo_batch_expire_store, &cfqd->cfq_fifo_batch_expire, 0, UINT_MAX, 1);
-STORE_FUNCTION(cfq_find_best_store, &cfqd->find_best_crq, 0, 1, 0);
+STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
+STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
+STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
+STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
+STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
+STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
+STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0);
#undef STORE_FUNCTION
static struct cfq_fs_entry cfq_quantum_entry = {
@@ -1712,25 +2461,15 @@ static struct cfq_fs_entry cfq_queued_entry = {
.show = cfq_queued_show,
.store = cfq_queued_store,
};
-static struct cfq_fs_entry cfq_fifo_expire_r_entry = {
+static struct cfq_fs_entry cfq_fifo_expire_sync_entry = {
.attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR },
- .show = cfq_fifo_expire_r_show,
- .store = cfq_fifo_expire_r_store,
+ .show = cfq_fifo_expire_sync_show,
+ .store = cfq_fifo_expire_sync_store,
};
-static struct cfq_fs_entry cfq_fifo_expire_w_entry = {
+static struct cfq_fs_entry cfq_fifo_expire_async_entry = {
.attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR },
- .show = cfq_fifo_expire_w_show,
- .store = cfq_fifo_expire_w_store,
-};
-static struct cfq_fs_entry cfq_fifo_batch_expire_entry = {
- .attr = {.name = "fifo_batch_expire", .mode = S_IRUGO | S_IWUSR },
- .show = cfq_fifo_batch_expire_show,
- .store = cfq_fifo_batch_expire_store,
-};
-static struct cfq_fs_entry cfq_find_best_entry = {
- .attr = {.name = "find_best_crq", .mode = S_IRUGO | S_IWUSR },
- .show = cfq_find_best_show,
- .store = cfq_find_best_store,
+ .show = cfq_fifo_expire_async_show,
+ .store = cfq_fifo_expire_async_store,
};
static struct cfq_fs_entry cfq_back_max_entry = {
.attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR },
@@ -1742,27 +2481,44 @@ static struct cfq_fs_entry cfq_back_penalty_entry = {
.show = cfq_back_penalty_show,
.store = cfq_back_penalty_store,
};
-static struct cfq_fs_entry cfq_clear_elapsed_entry = {
- .attr = {.name = "clear_elapsed", .mode = S_IWUSR },
- .store = cfq_clear_elapsed,
+static struct cfq_fs_entry cfq_slice_sync_entry = {
+ .attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR },
+ .show = cfq_slice_sync_show,
+ .store = cfq_slice_sync_store,
+};
+static struct cfq_fs_entry cfq_slice_async_entry = {
+ .attr = {.name = "slice_async", .mode = S_IRUGO | S_IWUSR },
+ .show = cfq_slice_async_show,
+ .store = cfq_slice_async_store,
};
-static struct cfq_fs_entry cfq_key_type_entry = {
- .attr = {.name = "key_type", .mode = S_IRUGO | S_IWUSR },
- .show = cfq_read_key_type,
- .store = cfq_set_key_type,
+static struct cfq_fs_entry cfq_slice_async_rq_entry = {
+ .attr = {.name = "slice_async_rq", .mode = S_IRUGO | S_IWUSR },
+ .show = cfq_slice_async_rq_show,
+ .store = cfq_slice_async_rq_store,
+};
+static struct cfq_fs_entry cfq_slice_idle_entry = {
+ .attr = {.name = "slice_idle", .mode = S_IRUGO | S_IWUSR },
+ .show = cfq_slice_idle_show,
+ .store = cfq_slice_idle_store,
+};
+static struct cfq_fs_entry cfq_max_depth_entry = {
+ .attr = {.name = "max_depth", .mode = S_IRUGO | S_IWUSR },
+ .show = cfq_max_depth_show,
+ .store = cfq_max_depth_store,
};
static struct attribute *default_attrs[] = {
&cfq_quantum_entry.attr,
&cfq_queued_entry.attr,
- &cfq_fifo_expire_r_entry.attr,
- &cfq_fifo_expire_w_entry.attr,
- &cfq_fifo_batch_expire_entry.attr,
- &cfq_key_type_entry.attr,
- &cfq_find_best_entry.attr,
+ &cfq_fifo_expire_sync_entry.attr,
+ &cfq_fifo_expire_async_entry.attr,
&cfq_back_max_entry.attr,
&cfq_back_penalty_entry.attr,
- &cfq_clear_elapsed_entry.attr,
+ &cfq_slice_sync_entry.attr,
+ &cfq_slice_async_entry.attr,
+ &cfq_slice_async_rq_entry.attr,
+ &cfq_slice_idle_entry.attr,
+ &cfq_max_depth_entry.attr,
NULL,
};
@@ -1832,21 +2588,46 @@ static int __init cfq_init(void)
{
int ret;
+ /*
+ * could be 0 on HZ < 1000 setups
+ */
+ if (!cfq_slice_async)
+ cfq_slice_async = 1;
+ if (!cfq_slice_idle)
+ cfq_slice_idle = 1;
+
if (cfq_slab_setup())
return -ENOMEM;
ret = elv_register(&iosched_cfq);
- if (!ret) {
- __module_get(THIS_MODULE);
- return 0;
- }
+ if (ret)
+ cfq_slab_kill();
- cfq_slab_kill();
return ret;
}
static void __exit cfq_exit(void)
{
+ struct task_struct *g, *p;
+ unsigned long flags;
+
+ read_lock_irqsave(&tasklist_lock, flags);
+
+ /*
+ * iterate each process in the system, removing our io_context
+ */
+ do_each_thread(g, p) {
+ struct io_context *ioc = p->io_context;
+
+ if (ioc && ioc->cic) {
+ ioc->cic->exit(ioc->cic);
+ cfq_free_io_context(ioc->cic);
+ ioc->cic = NULL;
+ }
+ } while_each_thread(g, p);
+
+ read_unlock_irqrestore(&tasklist_lock, flags);
+
cfq_slab_kill();
elv_unregister(&iosched_cfq);
}
diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c
index 4bc2fea7327..ff5201e0215 100644
--- a/drivers/block/deadline-iosched.c
+++ b/drivers/block/deadline-iosched.c
@@ -760,7 +760,8 @@ static void deadline_put_request(request_queue_t *q, struct request *rq)
}
static int
-deadline_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
+deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
+ int gfp_mask)
{
struct deadline_data *dd = q->elevator->elevator_data;
struct deadline_rq *drq;
diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c
index f831f08f839..98f0126a2de 100644
--- a/drivers/block/elevator.c
+++ b/drivers/block/elevator.c
@@ -486,12 +486,13 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq)
return NULL;
}
-int elv_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
+int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
+ int gfp_mask)
{
elevator_t *e = q->elevator;
if (e->ops->elevator_set_req_fn)
- return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
+ return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask);
rq->elevator_private = NULL;
return 0;
@@ -505,12 +506,12 @@ void elv_put_request(request_queue_t *q, struct request *rq)
e->ops->elevator_put_req_fn(q, rq);
}
-int elv_may_queue(request_queue_t *q, int rw)
+int elv_may_queue(request_queue_t *q, int rw, struct bio *bio)
{
elevator_t *e = q->elevator;
if (e->ops->elevator_may_queue_fn)
- return e->ops->elevator_may_queue_fn(q, rw);
+ return e->ops->elevator_may_queue_fn(q, rw, bio);
return ELV_MQUEUE_MAY;
}
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 60e64091de1..692a5fced76 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -276,6 +276,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
rq->errors = 0;
rq->rq_status = RQ_ACTIVE;
rq->bio = rq->biotail = NULL;
+ rq->ioprio = 0;
rq->buffer = NULL;
rq->ref_count = 1;
rq->q = q;
@@ -1442,11 +1443,7 @@ void __generic_unplug_device(request_queue_t *q)
if (!blk_remove_plug(q))
return;
- /*
- * was plugged, fire request_fn if queue has stuff to do
- */
- if (elv_next_request(q))
- q->request_fn(q);
+ q->request_fn(q);
}
EXPORT_SYMBOL(__generic_unplug_device);
@@ -1776,8 +1773,8 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq)
mempool_free(rq, q->rq.rq_pool);
}
-static inline struct request *blk_alloc_request(request_queue_t *q, int rw,
- int gfp_mask)
+static inline struct request *
+blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask)
{
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
@@ -1790,7 +1787,7 @@ static inline struct request *blk_alloc_request(request_queue_t *q, int rw,
*/
rq->flags = rw;
- if (!elv_set_request(q, rq, gfp_mask))
+ if (!elv_set_request(q, rq, bio, gfp_mask))
return rq;
mempool_free(rq, q->rq.rq_pool);
@@ -1870,18 +1867,20 @@ static void freed_request(request_queue_t *q, int rw)
#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
/*
- * Get a free request, queue_lock must not be held
+ * Get a free request, queue_lock must be held.
+ * Returns NULL on failure, with queue_lock held.
+ * Returns !NULL on success, with queue_lock *not held*.
*/
-static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
+static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
+ int gfp_mask)
{
struct request *rq = NULL;
struct request_list *rl = &q->rq;
- struct io_context *ioc = get_io_context(gfp_mask);
+ struct io_context *ioc = current_io_context(GFP_ATOMIC);
if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)))
goto out;
- spin_lock_irq(q->queue_lock);
if (rl->count[rw]+1 >= q->nr_requests) {
/*
* The queue will fill after this allocation, so set it as
@@ -1895,7 +1894,7 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
}
}
- switch (elv_may_queue(q, rw)) {
+ switch (elv_may_queue(q, rw, bio)) {
case ELV_MQUEUE_NO:
goto rq_starved;
case ELV_MQUEUE_MAY:
@@ -1909,18 +1908,25 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
* The queue is full and the allocating process is not a
* "batcher", and not exempted by the IO scheduler
*/
- spin_unlock_irq(q->queue_lock);
goto out;
}
get_rq:
+ /*
+ * Only allow batching queuers to allocate up to 50% over the defined
+ * limit of requests, otherwise we could have thousands of requests
+ * allocated with any setting of ->nr_requests
+ */
+ if (rl->count[rw] >= (3 * q->nr_requests / 2))
+ goto out;
+
rl->count[rw]++;
rl->starved[rw] = 0;
if (rl->count[rw] >= queue_congestion_on_threshold(q))
set_queue_congested(q, rw);
spin_unlock_irq(q->queue_lock);
- rq = blk_alloc_request(q, rw, gfp_mask);
+ rq = blk_alloc_request(q, rw, bio, gfp_mask);
if (!rq) {
/*
* Allocation failed presumably due to memory. Undo anything
@@ -1943,7 +1949,6 @@ rq_starved:
if (unlikely(rl->count[rw] == 0))
rl->starved[rw] = 1;
- spin_unlock_irq(q->queue_lock);
goto out;
}
@@ -1953,31 +1958,35 @@ rq_starved:
rq_init(q, rq);
rq->rl = rl;
out:
- put_io_context(ioc);
return rq;
}
/*
* No available requests for this queue, unplug the device and wait for some
* requests to become available.
+ *
+ * Called with q->queue_lock held, and returns with it unlocked.
*/
-static struct request *get_request_wait(request_queue_t *q, int rw)
+static struct request *get_request_wait(request_queue_t *q, int rw,
+ struct bio *bio)
{
- DEFINE_WAIT(wait);
struct request *rq;
- do {
+ rq = get_request(q, rw, bio, GFP_NOIO);
+ while (!rq) {
+ DEFINE_WAIT(wait);
struct request_list *rl = &q->rq;
prepare_to_wait_exclusive(&rl->wait[rw], &wait,
TASK_UNINTERRUPTIBLE);
- rq = get_request(q, rw, GFP_NOIO);
+ rq = get_request(q, rw, bio, GFP_NOIO);
if (!rq) {
struct io_context *ioc;
- generic_unplug_device(q);
+ __generic_unplug_device(q);
+ spin_unlock_irq(q->queue_lock);
io_schedule();
/*
@@ -1986,12 +1995,13 @@ static struct request *get_request_wait(request_queue_t *q, int rw)
* up to a big batch of them for a small period time.
* See ioc_batching, ioc_set_batching
*/
- ioc = get_io_context(GFP_NOIO);
+ ioc = current_io_context(GFP_NOIO);
ioc_set_batching(q, ioc);
- put_io_context(ioc);
+
+ spin_lock_irq(q->queue_lock);
}
finish_wait(&rl->wait[rw], &wait);
- } while (!rq);
+ }
return rq;
}
@@ -2002,14 +2012,18 @@ struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask)
BUG_ON(rw != READ && rw != WRITE);
- if (gfp_mask & __GFP_WAIT)
- rq = get_request_wait(q, rw);
- else
- rq = get_request(q, rw, gfp_mask);
+ spin_lock_irq(q->queue_lock);
+ if (gfp_mask & __GFP_WAIT) {
+ rq = get_request_wait(q, rw, NULL);
+ } else {
+ rq = get_request(q, rw, NULL, gfp_mask);
+ if (!rq)
+ spin_unlock_irq(q->queue_lock);
+ }
+ /* q->queue_lock is unlocked at this point */
return rq;
}
-
EXPORT_SYMBOL(blk_get_request);
/**
@@ -2333,7 +2347,6 @@ static void __blk_put_request(request_queue_t *q, struct request *req)
return;
req->rq_status = RQ_INACTIVE;
- req->q = NULL;
req->rl = NULL;
/*
@@ -2462,6 +2475,8 @@ static int attempt_merge(request_queue_t *q, struct request *req,
req->rq_disk->in_flight--;
}
+ req->ioprio = ioprio_best(req->ioprio, next->ioprio);
+
__blk_put_request(q, next);
return 1;
}
@@ -2512,13 +2527,15 @@ EXPORT_SYMBOL(blk_attempt_remerge);
static int __make_request(request_queue_t *q, struct bio *bio)
{
- struct request *req, *freereq = NULL;
+ struct request *req;
int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync;
+ unsigned short prio;
sector_t sector;
sector = bio->bi_sector;
nr_sectors = bio_sectors(bio);
cur_nr_sectors = bio_cur_sectors(bio);
+ prio = bio_prio(bio);
rw = bio_data_dir(bio);
sync = bio_sync(bio);
@@ -2538,14 +2555,9 @@ static int __make_request(request_queue_t *q, struct bio *bio)
goto end_io;
}
-again:
spin_lock_irq(q->queue_lock);
- if (elv_queue_empty(q)) {
- blk_plug_device(q);
- goto get_rq;
- }
- if (barrier)
+ if (unlikely(barrier) || elv_queue_empty(q))
goto get_rq;
el_ret = elv_merge(q, &req, bio);
@@ -2559,6 +2571,7 @@ again:
req->biotail->bi_next = bio;
req->biotail = bio;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+ req->ioprio = ioprio_best(req->ioprio, prio);
drive_stat_acct(req, nr_sectors, 0);
if (!attempt_back_merge(q, req))
elv_merged_request(q, req);
@@ -2583,45 +2596,30 @@ again:
req->hard_cur_sectors = cur_nr_sectors;
req->sector = req->hard_sector = sector;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+ req->ioprio = ioprio_best(req->ioprio, prio);
drive_stat_acct(req, nr_sectors, 0);
if (!attempt_front_merge(q, req))
elv_merged_request(q, req);
goto out;
- /*
- * elevator says don't/can't merge. get new request
- */
- case ELEVATOR_NO_MERGE:
- break;
-
+ /* ELV_NO_MERGE: elevator says don't/can't merge. */
default:
- printk("elevator returned crap (%d)\n", el_ret);
- BUG();
+ ;
}
+get_rq:
/*
- * Grab a free request from the freelist - if that is empty, check
- * if we are doing read ahead and abort instead of blocking for
- * a free slot.
+ * Grab a free request. This is might sleep but can not fail.
+ * Returns with the queue unlocked.
+ */
+ req = get_request_wait(q, rw, bio);
+
+ /*
+ * After dropping the lock and possibly sleeping here, our request
+ * may now be mergeable after it had proven unmergeable (above).
+ * We don't worry about that case for efficiency. It won't happen
+ * often, and the elevators are able to handle it.
*/
-get_rq:
- if (freereq) {
- req = freereq;
- freereq = NULL;
- } else {
- spin_unlock_irq(q->queue_lock);
- if ((freereq = get_request(q, rw, GFP_ATOMIC)) == NULL) {
- /*
- * READA bit set
- */
- err = -EWOULDBLOCK;
- if (bio_rw_ahead(bio))
- goto end_io;
-
- freereq = get_request_wait(q, rw);
- }
- goto again;
- }
req->flags |= REQ_CMD;
@@ -2646,13 +2644,15 @@ get_rq:
req->buffer = bio_data(bio); /* see ->buffer comment above */
req->waiting = NULL;
req->bio = req->biotail = bio;
+ req->ioprio = prio;
req->rq_disk = bio->bi_bdev->bd_disk;
req->start_time = jiffies;
+ spin_lock_irq(q->queue_lock);
+ if (elv_queue_empty(q))
+ blk_plug_device(q);
add_request(q, req);
out:
- if (freereq)
- __blk_put_request(q, freereq);
if (sync)
__generic_unplug_device(q);
@@ -2674,7 +2674,7 @@ static inline void blk_partition_remap(struct bio *bio)
if (bdev != bdev->bd_contains) {
struct hd_struct *p = bdev->bd_part;
- switch (bio->bi_rw) {
+ switch (bio_data_dir(bio)) {
case READ:
p->read_sectors += bio_sectors(bio);
p->reads++;
@@ -2693,6 +2693,7 @@ void blk_finish_queue_drain(request_queue_t *q)
{
struct request_list *rl = &q->rq;
struct request *rq;
+ int requeued = 0;
spin_lock_irq(q->queue_lock);
clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
@@ -2701,9 +2702,13 @@ void blk_finish_queue_drain(request_queue_t *q)
rq = list_entry_rq(q->drain_list.next);
list_del_init(&rq->queuelist);
- __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
+ elv_requeue_request(q, rq);
+ requeued++;
}
+ if (requeued)
+ q->request_fn(q);
+
spin_unlock_irq(q->queue_lock);
wake_up(&rl->wait[0]);
@@ -2900,7 +2905,7 @@ void submit_bio(int rw, struct bio *bio)
BIO_BUG_ON(!bio->bi_size);
BIO_BUG_ON(!bio->bi_io_vec);
- bio->bi_rw = rw;
+ bio->bi_rw |= rw;
if (rw & WRITE)
mod_page_state(pgpgout, count);
else
@@ -3257,8 +3262,11 @@ void exit_io_context(void)
struct io_context *ioc;
local_irq_save(flags);
+ task_lock(current);
ioc = current->io_context;
current->io_context = NULL;
+ ioc->task = NULL;
+ task_unlock(current);
local_irq_restore(flags);
if (ioc->aic && ioc->aic->exit)
@@ -3271,53 +3279,49 @@ void exit_io_context(void)
/*
* If the current task has no IO context then create one and initialise it.
- * If it does have a context, take a ref on it.
+ * Otherwise, return its existing IO context.
*
- * This is always called in the context of the task which submitted the I/O.
- * But weird things happen, so we disable local interrupts to ensure exclusive
- * access to *current.
+ * This returned IO context doesn't have a specifically elevated refcount,
+ * but since the current task itself holds a reference, the context can be
+ * used in general code, so long as it stays within `current` context.
*/
-struct io_context *get_io_context(int gfp_flags)
+struct io_context *current_io_context(int gfp_flags)
{
struct task_struct *tsk = current;
- unsigned long flags;
struct io_context *ret;
- local_irq_save(flags);
ret = tsk->io_context;
- if (ret)
- goto out;
-
- local_irq_restore(flags);
+ if (likely(ret))
+ return ret;
ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
if (ret) {
atomic_set(&ret->refcount, 1);
- ret->pid = tsk->pid;
+ ret->task = current;
+ ret->set_ioprio = NULL;
ret->last_waited = jiffies; /* doesn't matter... */
ret->nr_batch_requests = 0; /* because this is 0 */
ret->aic = NULL;
ret->cic = NULL;
- spin_lock_init(&ret->lock);
-
- local_irq_save(flags);
+ tsk->io_context = ret;
+ }
- /*
- * very unlikely, someone raced with us in setting up the task
- * io context. free new context and just grab a reference.
- */
- if (!tsk->io_context)
- tsk->io_context = ret;
- else {
- kmem_cache_free(iocontext_cachep, ret);
- ret = tsk->io_context;
- }
+ return ret;
+}
+EXPORT_SYMBOL(current_io_context);
-out:
+/*
+ * If the current task has no IO context then create one and initialise it.
+ * If it does have a context, take a ref on it.
+ *
+ * This is always called in the context of the task which submitted the I/O.
+ */
+struct io_context *get_io_context(int gfp_flags)
+{
+ struct io_context *ret;
+ ret = current_io_context(gfp_flags);
+ if (likely(ret))
atomic_inc(&ret->refcount);
- local_irq_restore(flags);
- }
-
return ret;
}
EXPORT_SYMBOL(get_io_context);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 5b09cf154ac..e5f7494c00e 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -253,7 +253,7 @@ static int floppy_revalidate(struct gendisk *disk);
static int swim3_add_device(struct device_node *swims);
int swim3_init(void);
-#ifndef CONFIG_PMAC_PBOOK
+#ifndef CONFIG_PMAC_MEDIABAY
#define check_media_bay(which, what) 1
#endif
@@ -297,9 +297,11 @@ static void do_fd_request(request_queue_t * q)
int i;
for(i=0;i<floppy_count;i++)
{
+#ifdef CONFIG_PMAC_MEDIABAY
if (floppy_states[i].media_bay &&
check_media_bay(floppy_states[i].media_bay, MB_FD))
continue;
+#endif /* CONFIG_PMAC_MEDIABAY */
start_request(&floppy_states[i]);
}
sti();
@@ -856,8 +858,10 @@ static int floppy_ioctl(struct inode *inode, struct file *filp,
if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))
return -EPERM;
+#ifdef CONFIG_PMAC_MEDIABAY
if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD))
return -ENXIO;
+#endif
switch (cmd) {
case FDEJECT:
@@ -881,8 +885,10 @@ static int floppy_open(struct inode *inode, struct file *filp)
int n, err = 0;
if (fs->ref_count == 0) {
+#ifdef CONFIG_PMAC_MEDIABAY
if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD))
return -ENXIO;
+#endif
out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2);
out_8(&sw->control_bic, 0xff);
out_8(&sw->mode, 0x95);
@@ -967,8 +973,10 @@ static int floppy_revalidate(struct gendisk *disk)
struct swim3 __iomem *sw;
int ret, n;
+#ifdef CONFIG_PMAC_MEDIABAY
if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD))
return -ENXIO;
+#endif
sw = fs->swim3;
grab_drive(fs, revalidating, 0);
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 5ed3a637945..9db0a9e3e59 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/time.h>
#include <linux/hdreg.h>
+#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
@@ -1582,9 +1583,9 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out;
#if IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
- rc = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
+ rc = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
if (!rc) {
- rc = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
+ rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
if (rc) {
printk(KERN_ERR DRV_NAME "(%s): consistent DMA mask failure\n",
pci_name(pdev));
@@ -1593,7 +1594,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
pci_dac = 1;
} else {
#endif
- rc = pci_set_dma_mask(pdev, 0xffffffffULL);
+ rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {
printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n",
pci_name(pdev));
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index e481cc411b5..5ef9adb9fe7 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -1089,6 +1089,14 @@ static int bluecard_event(event_t event, int priority, event_callback_args_t *ar
return 0;
}
+static struct pcmcia_device_id bluecard_ids[] = {
+ PCMCIA_DEVICE_PROD_ID12("BlueCard", "LSE041", 0xbaf16fbf, 0x657cc15e),
+ PCMCIA_DEVICE_PROD_ID12("BTCFCARD", "LSE139", 0xe3987764, 0x2524b59c),
+ PCMCIA_DEVICE_PROD_ID12("WSS", "LSE039", 0x0a0736ec, 0x24e6dfab),
+ PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, bluecard_ids);
+
static struct pcmcia_driver bluecard_driver = {
.owner = THIS_MODULE,
.drv = {
@@ -1096,6 +1104,7 @@ static struct pcmcia_driver bluecard_driver = {
},
.attach = bluecard_attach,
.detach = bluecard_detach,
+ .id_table = bluecard_ids,
};
static int __init init_bluecard_cs(void)
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index f71e5c76963..9013cd759af 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -935,6 +935,12 @@ static int bt3c_event(event_t event, int priority, event_callback_args_t *args)
return 0;
}
+static struct pcmcia_device_id bt3c_ids[] = {
+ PCMCIA_DEVICE_PROD_ID13("3COM", "Bluetooth PC Card", 0xefce0a31, 0xd4ce9b02),
+ PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, bt3c_ids);
+
static struct pcmcia_driver bt3c_driver = {
.owner = THIS_MODULE,
.drv = {
@@ -942,6 +948,7 @@ static struct pcmcia_driver bt3c_driver = {
},
.attach = bt3c_attach,
.detach = bt3c_detach,
+ .id_table = bt3c_ids,
};
static int __init init_bt3c_cs(void)
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index ad8d972444a..c479484a1f7 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -855,6 +855,12 @@ static int btuart_event(event_t event, int priority, event_callback_args_t *args
return 0;
}
+static struct pcmcia_device_id btuart_ids[] = {
+ /* don't use this driver. Use serial_cs + hci_uart instead */
+ PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, btuart_ids);
+
static struct pcmcia_driver btuart_driver = {
.owner = THIS_MODULE,
.drv = {
@@ -862,6 +868,7 @@ static struct pcmcia_driver btuart_driver = {
},
.attach = btuart_attach,
.detach = btuart_detach,
+ .id_table = btuart_ids,
};
static int __init init_btuart_cs(void)
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index fe954e5d9a1..bb12f7daeb9 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -807,6 +807,13 @@ static int dtl1_event(event_t event, int priority, event_callback_args_t *args)
return 0;
}
+static struct pcmcia_device_id dtl1_ids[] = {
+ PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-1", 0xe1bfdd64, 0xe168480d),
+ PCMCIA_DEVICE_PROD_ID12("Socket", "CF", 0xb38bcc2e, 0x44ebf863),
+ PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, dtl1_ids);
+
static struct pcmcia_driver dtl1_driver = {
.owner = THIS_MODULE,
.drv = {
@@ -814,6 +821,7 @@ static struct pcmcia_driver dtl1_driver = {
},
.attach = dtl1_attach,
.detach = dtl1_detach,
+ .id_table = dtl1_ids,
};
static int __init init_dtl1_cs(void)
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 1407945a589..59f589d733f 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -686,6 +686,15 @@ static struct pci_device_id agp_amd64_pci_table[] = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
+ /* SIS 760 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_SI,
+ .device = PCI_DEVICE_ID_SI_760,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
{ }
};
diff --git a/drivers/char/drm/Kconfig b/drivers/char/drm/Kconfig
index d9a02993467..c2b12eab67c 100644
--- a/drivers/char/drm/Kconfig
+++ b/drivers/char/drm/Kconfig
@@ -6,7 +6,7 @@
#
config DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
- depends on AGP || AGP=n
+ depends on (AGP || AGP=n) && PCI
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
diff --git a/drivers/char/drm/Makefile b/drivers/char/drm/Makefile
index 23ab26321e9..7444dec40b9 100644
--- a/drivers/char/drm/Makefile
+++ b/drivers/char/drm/Makefile
@@ -19,6 +19,11 @@ radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o
ffb-objs := ffb_drv.o ffb_context.o
sis-objs := sis_drv.o sis_ds.o sis_mm.o
+ifeq ($(CONFIG_COMPAT),y)
+drm-objs += drm_ioc32.o
+radeon-objs += radeon_ioc32.o
+endif
+
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_GAMMA) += gamma.o
obj-$(CONFIG_DRM_TDFX) += tdfx.o
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index 21f4c54e1a8..b04ddf12a0f 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -316,6 +316,9 @@ do { \
typedef int drm_ioctl_t( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
+typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+
typedef struct drm_ioctl_desc {
drm_ioctl_t *func;
int auth_needed;
@@ -775,6 +778,8 @@ extern int drm_version(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
+extern long drm_compat_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg);
extern int drm_takedown(drm_device_t * dev);
/* Device support (drm_fops.h) */
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index 4113bcba67f..3407380b865 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -60,6 +60,15 @@ int drm_order( unsigned long size )
}
EXPORT_SYMBOL(drm_order);
+#ifdef CONFIG_COMPAT
+/*
+ * Used to allocate 32-bit handles for _DRM_SHM regions
+ * The 0x10000000 value is chosen to be out of the way of
+ * FB/register and GART physical addresses.
+ */
+static unsigned int map32_handle = 0x10000000;
+#endif
+
/**
* Ioctl to specify a range of memory that is available for mapping by a non-root process.
*
@@ -187,16 +196,18 @@ int drm_addmap( struct inode *inode, struct file *filp,
down(&dev->struct_sem);
list_add(&list->head, &dev->maplist->head);
+#ifdef CONFIG_COMPAT
+ /* Assign a 32-bit handle for _DRM_SHM mappings */
+ /* We do it here so that dev->struct_sem protects the increment */
+ if (map->type == _DRM_SHM)
+ map->offset = map32_handle += PAGE_SIZE;
+#endif
up(&dev->struct_sem);
if ( copy_to_user( argp, map, sizeof(*map) ) )
return -EFAULT;
- if ( map->type != _DRM_SHM ) {
- if ( copy_to_user( &argp->handle,
- &map->offset,
- sizeof(map->offset) ) )
- return -EFAULT;
- }
+ if (copy_to_user(&argp->handle, &map->offset, sizeof(map->offset)))
+ return -EFAULT;
return 0;
}
@@ -240,7 +251,7 @@ int drm_rmmap(struct inode *inode, struct file *filp,
r_list = list_entry(list, drm_map_list_t, head);
if(r_list->map &&
- r_list->map->handle == request.handle &&
+ r_list->map->offset == (unsigned long) request.handle &&
r_list->map->flags & _DRM_REMOVABLE) break;
}
diff --git a/drivers/char/drm/drm_context.c b/drivers/char/drm/drm_context.c
index f15c86c5787..fdf661f234e 100644
--- a/drivers/char/drm/drm_context.c
+++ b/drivers/char/drm/drm_context.c
@@ -225,7 +225,7 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
map = dev->context_sareas[request.ctx_id];
up(&dev->struct_sem);
- request.handle = map->handle;
+ request.handle = (void *) map->offset;
if (copy_to_user(argp, &request, sizeof(request)))
return -EFAULT;
return 0;
@@ -261,8 +261,8 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
down(&dev->struct_sem);
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
- if(r_list->map &&
- r_list->map->handle == request.handle)
+ if (r_list->map
+ && r_list->map->offset == (unsigned long) request.handle)
goto found;
}
bad:
diff --git a/drivers/char/drm/drm_ioc32.c b/drivers/char/drm/drm_ioc32.c
new file mode 100644
index 00000000000..8087a963639
--- /dev/null
+++ b/drivers/char/drm/drm_ioc32.c
@@ -0,0 +1,1069 @@
+/**
+ * \file drm_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the DRM.
+ *
+ * \author Paul Mackerras <paulus@samba.org>
+ *
+ * Copyright (C) Paul Mackerras 2005.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <linux/compat.h>
+#include <linux/ioctl32.h>
+
+#include "drmP.h"
+#include "drm_core.h"
+
+#define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t)
+#define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t)
+#define DRM_IOCTL_GET_MAP32 DRM_IOWR(0x04, drm_map32_t)
+#define DRM_IOCTL_GET_CLIENT32 DRM_IOWR(0x05, drm_client32_t)
+#define DRM_IOCTL_GET_STATS32 DRM_IOR( 0x06, drm_stats32_t)
+
+#define DRM_IOCTL_SET_UNIQUE32 DRM_IOW( 0x10, drm_unique32_t)
+#define DRM_IOCTL_ADD_MAP32 DRM_IOWR(0x15, drm_map32_t)
+#define DRM_IOCTL_ADD_BUFS32 DRM_IOWR(0x16, drm_buf_desc32_t)
+#define DRM_IOCTL_MARK_BUFS32 DRM_IOW( 0x17, drm_buf_desc32_t)
+#define DRM_IOCTL_INFO_BUFS32 DRM_IOWR(0x18, drm_buf_info32_t)
+#define DRM_IOCTL_MAP_BUFS32 DRM_IOWR(0x19, drm_buf_map32_t)
+#define DRM_IOCTL_FREE_BUFS32 DRM_IOW( 0x1a, drm_buf_free32_t)
+
+#define DRM_IOCTL_RM_MAP32 DRM_IOW( 0x1b, drm_map32_t)
+
+#define DRM_IOCTL_SET_SAREA_CTX32 DRM_IOW( 0x1c, drm_ctx_priv_map32_t)
+#define DRM_IOCTL_GET_SAREA_CTX32 DRM_IOWR(0x1d, drm_ctx_priv_map32_t)
+
+#define DRM_IOCTL_RES_CTX32 DRM_IOWR(0x26, drm_ctx_res32_t)
+#define DRM_IOCTL_DMA32 DRM_IOWR(0x29, drm_dma32_t)
+
+#define DRM_IOCTL_AGP_ENABLE32 DRM_IOW( 0x32, drm_agp_mode32_t)
+#define DRM_IOCTL_AGP_INFO32 DRM_IOR( 0x33, drm_agp_info32_t)
+#define DRM_IOCTL_AGP_ALLOC32 DRM_IOWR(0x34, drm_agp_buffer32_t)
+#define DRM_IOCTL_AGP_FREE32 DRM_IOW( 0x35, drm_agp_buffer32_t)
+#define DRM_IOCTL_AGP_BIND32 DRM_IOW( 0x36, drm_agp_binding32_t)
+#define DRM_IOCTL_AGP_UNBIND32 DRM_IOW( 0x37, drm_agp_binding32_t)
+
+#define DRM_IOCTL_SG_ALLOC32 DRM_IOW( 0x38, drm_scatter_gather32_t)
+#define DRM_IOCTL_SG_FREE32 DRM_IOW( 0x39, drm_scatter_gather32_t)
+
+#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t)
+
+typedef struct drm_version_32 {
+ int version_major; /**< Major version */
+ int version_minor; /**< Minor version */
+ int version_patchlevel;/**< Patch level */
+ u32 name_len; /**< Length of name buffer */
+ u32 name; /**< Name of driver */
+ u32 date_len; /**< Length of date buffer */
+ u32 date; /**< User-space buffer to hold date */
+ u32 desc_len; /**< Length of desc buffer */
+ u32 desc; /**< User-space buffer to hold desc */
+} drm_version32_t;
+
+static int compat_drm_version(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_version32_t v32;
+ drm_version_t __user *version;
+ int err;
+
+ if (copy_from_user(&v32, (void __user *) arg, sizeof(v32)))
+ return -EFAULT;
+
+ version = compat_alloc_user_space(sizeof(*version));
+ if (!access_ok(VERIFY_WRITE, version, sizeof(*version)))
+ return -EFAULT;
+ if (__put_user(v32.name_len, &version->name_len)
+ || __put_user((void __user *)(unsigned long)v32.name,
+ &version->name)
+ || __put_user(v32.date_len, &version->date_len)
+ || __put_user((void __user *)(unsigned long)v32.date,
+ &version->date)
+ || __put_user(v32.desc_len, &version->desc_len)
+ || __put_user((void __user *)(unsigned long)v32.desc,
+ &version->desc))
+ return -EFAULT;
+
+ err = drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_VERSION, (unsigned long) version);
+ if (err)
+ return err;
+
+ if (__get_user(v32.version_major, &version->version_major)
+ || __get_user(v32.version_minor, &version->version_minor)
+ || __get_user(v32.version_patchlevel, &version->version_patchlevel)
+ || __get_user(v32.name_len, &version->name_len)
+ || __get_user(v32.date_len, &version->date_len)
+ || __get_user(v32.desc_len, &version->desc_len))
+ return -EFAULT;
+
+ if (copy_to_user((void __user *) arg, &v32, sizeof(v32)))
+ return -EFAULT;
+ return 0;
+}
+
+typedef struct drm_unique32 {
+ u32 unique_len; /**< Length of unique */
+ u32 unique; /**< Unique name for driver instantiation */
+} drm_unique32_t;
+
+static int compat_drm_getunique(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_unique32_t uq32;
+ drm_unique_t __user *u;
+ int err;
+
+ if (copy_from_user(&uq32, (void __user *) arg, sizeof(uq32)))
+ return -EFAULT;
+
+ u = compat_alloc_user_space(sizeof(*u));
+ if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+ return -EFAULT;
+ if (__put_user(uq32.unique_len, &u->unique_len)
+ || __put_user((void __user *)(unsigned long) uq32.unique,
+ &u->unique))
+ return -EFAULT;
+
+ err = drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_GET_UNIQUE, (unsigned long) u);
+ if (err)
+ return err;
+
+ if (__get_user(uq32.unique_len, &u->unique_len))
+ return -EFAULT;
+ if (copy_to_user((void __user *) arg, &uq32, sizeof(uq32)))
+ return -EFAULT;
+ return 0;
+}
+
+static int compat_drm_setunique(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_unique32_t uq32;
+ drm_unique_t __user *u;
+
+ if (copy_from_user(&uq32, (void __user *) arg, sizeof(uq32)))
+ return -EFAULT;
+
+ u = compat_alloc_user_space(sizeof(*u));
+ if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+ return -EFAULT;
+ if (__put_user(uq32.unique_len, &u->unique_len)
+ || __put_user((void __user *)(unsigned long) uq32.unique,
+ &u->unique))
+ return -EFAULT;
+
+ return drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_SET_UNIQUE, (unsigned long) u);
+}
+
+typedef struct drm_map32 {
+ u32 offset; /**< Requested physical address (0 for SAREA)*/
+ u32 size; /**< Requested physical size (bytes) */
+ drm_map_type_t type; /**< Type of memory to map */
+ drm_map_flags_t flags; /**< Flags */
+ u32 handle; /**< User-space: "Handle" to pass to mmap() */
+ int mtrr; /**< MTRR slot used */
+} drm_map32_t;
+
+static int compat_drm_getmap(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_map32_t __user *argp = (void __user *)arg;
+ drm_map32_t m32;
+ drm_map_t __user *map;
+ int idx, err;
+ void *handle;
+
+ if (get_user(idx, &argp->offset))
+ return -EFAULT;
+
+ map = compat_alloc_user_space(sizeof(*map));
+ if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+ return -EFAULT;
+ if (__put_user(idx, &map->offset))
+ return -EFAULT;
+
+ err = drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_GET_MAP, (unsigned long) map);
+ if (err)
+ return err;
+
+ if (__get_user(m32.offset, &map->offset)
+ || __get_user(m32.size, &map->size)
+ || __get_user(m32.type, &map->type)
+ || __get_user(m32.flags, &map->flags)
+ || __get_user(handle, &map->handle)
+ || __get_user(m32.mtrr, &map->mtrr))
+ return -EFAULT;
+
+ m32.handle = (unsigned long) handle;
+ if (copy_to_user(argp, &m32, sizeof(m32)))
+ return -EFAULT;
+ return 0;
+
+}
+
+static int compat_drm_addmap(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_map32_t __user *argp = (void __user *)arg;
+ drm_map32_t m32;
+ drm_map_t __user *map;
+ int err;
+ void *handle;
+
+ if (copy_from_user(&m32, argp, sizeof(m32)))
+ return -EFAULT;
+
+ map = compat_alloc_user_space(sizeof(*map));
+ if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+ return -EFAULT;
+ if (__put_user(m32.offset, &map->offset)
+ || __put_user(m32.size, &map->size)
+ || __put_user(m32.type, &map->type)
+ || __put_user(m32.flags, &map->flags))
+ return -EFAULT;
+
+ err = drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_ADD_MAP, (unsigned long) map);
+ if (err)
+ return err;
+
+ if (__get_user(m32.offset, &map->offset)
+ || __get_user(m32.mtrr, &map->mtrr)
+ || __get_user(handle, &map->handle))
+ return -EFAULT;
+
+ m32.handle = (unsigned long) handle;
+ if (m32.handle != (unsigned long) handle && printk_ratelimit())
+ printk(KERN_ERR "compat_drm_addmap truncated handle"
+ " %p for type %d offset %x\n",
+ handle, m32.type, m32.offset);
+
+ if (copy_to_user(argp, &m32, sizeof(m32)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int compat_drm_rmmap(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_map32_t __user *argp = (void __user *)arg;
+ drm_map_t __user *map;
+ u32 handle;
+
+ if (get_user(handle, &argp->handle))
+ return -EFAULT;
+
+ map = compat_alloc_user_space(sizeof(*map));
+ if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+ return -EFAULT;
+ if (__put_user((void *)(unsigned long) handle, &map->handle))
+ return -EFAULT;
+
+ return drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_RM_MAP, (unsigned long) map);
+}
+
+typedef struct drm_client32 {
+ int idx; /**< Which client desired? */
+ int auth; /**< Is client authenticated? */
+ u32 pid; /**< Process ID */
+ u32 uid; /**< User ID */
+ u32 magic; /**< Magic */
+ u32 iocs; /**< Ioctl count */
+} drm_client32_t;
+
+static int compat_drm_getclient(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_client32_t c32;
+ drm_client32_t __user *argp = (void __user *)arg;
+ drm_client_t __user *client;
+ int idx, err;
+
+ if (get_user(idx, &argp->idx))
+ return -EFAULT;
+
+ client = compat_alloc_user_space(sizeof(*client));
+ if (!access_ok(VERIFY_WRITE, client, sizeof(*client)))
+ return -EFAULT;
+ if (__put_user(idx, &client->idx))
+ return -EFAULT;
+
+ err = drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_GET_CLIENT, (unsigned long) client);
+ if (err)
+ return err;
+
+ if (__get_user(c32.auth, &client->auth)
+ || __get_user(c32.pid, &client->pid)
+ || __get_user(c32.uid, &client->uid)
+ || __get_user(c32.magic, &client->magic)
+ || __get_user(c32.iocs, &client->iocs))
+ return -EFAULT;
+
+ if (copy_to_user(argp, &c32, sizeof(c32)))
+ return -EFAULT;
+ return 0;
+}
+
+typedef struct drm_stats32 {
+ u32 count;
+ struct {
+ u32 value;
+ drm_stat_type_t type;
+ } data[15];
+} drm_stats32_t;
+
+static int compat_drm_getstats(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_stats32_t s32;
+ drm_stats32_t __user *argp = (void __user *)arg;
+ drm_stats_t __user *stats;
+ int i, err;
+
+ stats = compat_alloc_user_space(sizeof(*stats));
+ if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
+ return -EFAULT;
+
+ err = drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_GET_STATS, (unsigned long) stats);
+ if (err)
+ return err;
+
+ if (__get_user(s32.count, &stats->count))
+ return -EFAULT;
+ for (i = 0; i < 15; ++i)
+ if (__get_user(s32.data[i].value, &stats->data[i].value)
+ || __get_user(s32.data[i].type, &stats->data[i].type))
+ return -EFAULT;
+
+ if (copy_to_user(argp, &s32, sizeof(s32)))
+ return -EFAULT;
+ return 0;
+}
+
+typedef struct drm_buf_desc32 {
+ int count; /**< Number of buffers of this size */
+ int size; /**< Size in bytes */
+ int low_mark; /**< Low water mark */
+ int high_mark; /**< High water mark */
+ int flags;
+ u32 agp_start; /**< Start address in the AGP aperture */
+} drm_buf_desc32_t;
+
+static int compat_drm_addbufs(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_buf_desc32_t __user *argp = (void __user *)arg;
+ drm_buf_desc_t __user *buf;
+ int err;
+ unsigned long agp_start;
+
+ buf = compat_alloc_user_space(sizeof(*buf));
+ if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))
+ || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
+ return -EFAULT;
+
+ if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start))
+ || __get_user(agp_start, &argp->agp_start)
+ || __put_user(agp_start, &buf->agp_start))
+ return -EFAULT;
+
+ err = drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_ADD_BUFS, (unsigned long) buf);
+ if (err)
+ return err;
+
+ if (__copy_in_user(argp, buf, offsetof(drm_buf_desc32_t, agp_start))
+ || __get_user(agp_start, &buf->agp_start)
+ || __put_user(agp_start, &argp->agp_start))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int compat_drm_markbufs(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_buf_desc32_t b32;
+ drm_buf_desc32_t __user *argp = (void __user *)arg;
+ drm_buf_desc_t __user *buf;
+
+ if (copy_from_user(&b32, argp, sizeof(b32)))
+ return -EFAULT;
+
+ buf = compat_alloc_user_space(sizeof(*buf));
+ if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)))
+ return -EFAULT;
+
+ if (__put_user(b32.size, &buf->size)
+ || __put_user(b32.low_mark, &buf->low_mark)
+ || __put_user(b32.high_mark, &buf->high_mark))
+ return -EFAULT;
+
+ return drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_MARK_BUFS, (unsigned long) buf);
+}
+
+typedef struct drm_buf_info32 {
+ int count; /**< Entries in list */
+ u32 list;
+} drm_buf_info32_t;
+
+static int compat_drm_infobufs(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_buf_info32_t req32;
+ drm_buf_info32_t __user *argp = (void __user *)arg;
+ drm_buf_desc32_t __user *to;
+ drm_buf_info_t __user *request;
+ drm_buf_desc_t __user *list;
+ size_t nbytes;
+ int i, err;
+ int count, actual;
+
+ if (copy_from_user(&req32, argp, sizeof(req32)))
+ return -EFAULT;
+
+ count = req32.count;
+ to = (drm_buf_desc32_t __user *)(unsigned long) req32.list;
+ if (count < 0)
+ count = 0;
+ if (count > 0
+ && !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t)))
+ return -EFAULT;
+
+ nbytes = sizeof(*request) + count * sizeof(drm_buf_desc_t);
+ request = compat_alloc_user_space(nbytes);
+ if (!access_ok(VERIFY_WRITE, request, nbytes))
+ return -EFAULT;
+ list = (drm_buf_desc_t *) (request + 1);
+
+ if (__put_user(count, &request->count)
+ || __put_user(list, &request->list))
+ return -EFAULT;
+
+ err = drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_INFO_BUFS, (unsigned long) request);
+ if (err)
+ return err;
+
+ if (__get_user(actual, &request->count))
+ return -EFAULT;
+ if (count >= actual)
+ for (i = 0; i < actual; ++i)
+ if (__copy_in_user(&to[i], &list[i],
+ offsetof(drm_buf_desc_t, flags)))
+ return -EFAULT;
+
+ if (__put_user(actual, &argp->count))
+ return -EFAULT;
+
+ return 0;
+}
+
+typedef struct drm_buf_pub32 {
+ int idx; /**< Index into the master buffer list */
+ int total; /**< Buffer size */
+ int used; /**< Amount of buffer in use (for DMA) */
+ u32 address; /**< Address of buffer */
+} drm_buf_pub32_t;
+
+typedef struct drm_buf_map32 {
+ int count; /**< Length of the buffer list */
+ u32 virtual; /**< Mmap'd area in user-virtual */
+ u32 list; /**< Buffer information */
+} drm_buf_map32_t;
+
+static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_buf_map32_t __user *argp = (void __user *)arg;
+ drm_buf_map32_t req32;
+ drm_buf_pub32_t __user *list32;
+ drm_buf_map_t __user *request;
+ drm_buf_pub_t __user *list;
+ int i, err;
+ int count, actual;
+ size_t nbytes;
+ void __user *addr;
+
+ if (copy_from_user(&req32, argp, sizeof(req32)))
+ return -EFAULT;
+ count = req32.count;
+ list32 = (void __user *)(unsigned long)req32.list;
+
+ if (count < 0)
+ return -EINVAL;
+ nbytes = sizeof(*request) + count * sizeof(drm_buf_pub_t);
+ request = compat_alloc_user_space(nbytes);
+ if (!access_ok(VERIFY_WRITE, request, nbytes))
+ return -EFAULT;
+ list = (drm_buf_pub_t *) (request + 1);
+
+ if (__put_user(count, &request->count)
+ || __put_user(list, &request->list))
+ return -EFAULT;
+
+ err = drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_MAP_BUFS, (unsigned long) request);
+ if (err)
+ return err;
+
+ if (__get_user(actual, &request->count))
+ return -EFAULT;
+ if (count >= actual)
+ for (i = 0; i < actual; ++i)
+ if (__copy_in_user(&list32[i], &list[i],
+ offsetof(drm_buf_pub_t, address))
+ || __get_user(addr, &list[i].address)
+ || __put_user((unsigned long) addr,
+ &list32[i].address))
+ return -EFAULT;
+
+ if (__put_user(actual, &argp->count)
+ || __get_user(addr, &request->virtual)
+ || __put_user((unsigned long) addr, &argp->virtual))
+ return -EFAULT;
+
+ return 0;
+}
+
+typedef struct drm_buf_free32 {
+ int count;
+ u32 list;
+} drm_buf_free32_t;
+
+static int compat_drm_freebufs(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_buf_free32_t req32;
+ drm_buf_free_t __user *request;
+ drm_buf_free32_t __user *argp = (void __user *)arg;
+
+ if (copy_from_user(&req32, argp, sizeof(req32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+ return -EFAULT;
+ if (__put_user(req32.count, &request->count)
+ || __put_user((int __user *)(unsigned long) req32.list,
+ &request->list))
+ return -EFAULT;
+
+ return drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_FREE_BUFS, (unsigned long) request);
+}
+
+typedef struct drm_ctx_priv_map32 {
+ unsigned int ctx_id; /**< Context requesting private mapping */
+ u32 handle; /**< Handle of map */
+} drm_ctx_priv_map32_t;
+
+static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_ctx_priv_map32_t req32;
+ drm_ctx_priv_map_t __user *request;
+ drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
+
+ if (copy_from_user(&req32, argp, sizeof(req32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+ return -EFAULT;
+ if (__put_user(req32.ctx_id, &request->ctx_id)
+ || __put_user((void *)(unsigned long) req32.handle,
+ &request->handle))
+ return -EFAULT;
+
+ return drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_SET_SAREA_CTX, (unsigned long) request);
+}
+
+static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_ctx_priv_map_t __user *request;
+ drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
+ int err;
+ unsigned int ctx_id;
+ void *handle;
+
+ if (!access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+ || __get_user(ctx_id, &argp->ctx_id))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+ return -EFAULT;
+ if (__put_user(ctx_id, &request->ctx_id))
+ return -EFAULT;
+
+ err = drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_GET_SAREA_CTX, (unsigned long) request);
+ if (err)
+ return err;
+
+ if (__get_user(handle, &request->handle)
+ || __put_user((unsigned long) handle, &argp->handle))
+ return -EFAULT;
+
+ return 0;
+}
+
+typedef struct drm_ctx_res32 {
+ int count;
+ u32 contexts;
+} drm_ctx_res32_t;
+
+static int compat_drm_resctx(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_ctx_res32_t __user *argp = (void __user *)arg;
+ drm_ctx_res32_t res32;
+ drm_ctx_res_t __user *res;
+ int err;
+
+ if (copy_from_user(&res32, argp, sizeof(res32)))
+ return -EFAULT;
+
+ res = compat_alloc_user_space(sizeof(*res));
+ if (!access_ok(VERIFY_WRITE, res, sizeof(*res)))
+ return -EFAULT;
+ if (__put_user(res32.count, &res->count)
+ || __put_user((drm_ctx_t __user *)(unsigned long) res32.contexts,
+ &res->contexts))
+ return -EFAULT;
+
+ err = drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_RES_CTX, (unsigned long) res);
+ if (err)
+ return err;
+
+ if (__get_user(res32.count, &res->count)
+ || __put_user(res32.count, &argp->count))
+ return -EFAULT;
+
+ return 0;
+}
+
+typedef struct drm_dma32 {
+ int context; /**< Context handle */
+ int send_count; /**< Number of buffers to send */
+ u32 send_indices; /**< List of handles to buffers */
+ u32 send_sizes; /**< Lengths of data to send */
+ drm_dma_flags_t flags; /**< Flags */
+ int request_count; /**< Number of buffers requested */
+ int request_size; /**< Desired size for buffers */
+ u32 request_indices; /**< Buffer information */
+ u32 request_sizes;
+ int granted_count; /**< Number of buffers granted */
+} drm_dma32_t;
+
+static int compat_drm_dma(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_dma32_t d32;
+ drm_dma32_t __user *argp = (void __user *) arg;
+ drm_dma_t __user *d;
+ int err;
+
+ if (copy_from_user(&d32, argp, sizeof(d32)))
+ return -EFAULT;
+
+ d = compat_alloc_user_space(sizeof(*d));
+ if (!access_ok(VERIFY_WRITE, d, sizeof(*d)))
+ return -EFAULT;
+
+ if (__put_user(d32.context, &d->context)
+ || __put_user(d32.send_count, &d->send_count)
+ || __put_user((int __user *)(unsigned long) d32.send_indices,
+ &d->send_indices)
+ || __put_user((int __user *)(unsigned long) d32.send_sizes,
+ &d->send_sizes)
+ || __put_user(d32.flags, &d->flags)
+ || __put_user(d32.request_count, &d->request_count)
+ || __put_user((int __user *)(unsigned long) d32.request_indices,
+ &d->request_indices)
+ || __put_user((int __user *)(unsigned long) d32.request_sizes,
+ &d->request_sizes))
+ return -EFAULT;
+
+ err = drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_DMA, (unsigned long) d);
+ if (err)
+ return err;
+
+ if (__get_user(d32.request_size, &d->request_size)
+ || __get_user(d32.granted_count, &d->granted_count)
+ || __put_user(d32.request_size, &argp->request_size)
+ || __put_user(d32.granted_count, &argp->granted_count))
+ return -EFAULT;
+
+ return 0;
+}
+
+#if __OS_HAS_AGP
+typedef struct drm_agp_mode32 {
+ u32 mode; /**< AGP mode */
+} drm_agp_mode32_t;
+
+static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_agp_mode32_t __user *argp = (void __user *)arg;
+ drm_agp_mode32_t m32;
+ drm_agp_mode_t __user *mode;
+
+ if (get_user(m32.mode, &argp->mode))
+ return -EFAULT;
+
+ mode = compat_alloc_user_space(sizeof(*mode));
+ if (put_user(m32.mode, &mode->mode))
+ return -EFAULT;
+
+ return drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_AGP_ENABLE, (unsigned long) mode);
+}
+
+typedef struct drm_agp_info32 {
+ int agp_version_major;
+ int agp_version_minor;
+ u32 mode;
+ u32 aperture_base; /* physical address */
+ u32 aperture_size; /* bytes */
+ u32 memory_allowed; /* bytes */
+ u32 memory_used;
+
+ /* PCI information */
+ unsigned short id_vendor;
+ unsigned short id_device;
+} drm_agp_info32_t;
+
+static int compat_drm_agp_info(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_agp_info32_t __user *argp = (void __user *)arg;
+ drm_agp_info32_t i32;
+ drm_agp_info_t __user *info;
+ int err;
+
+ info = compat_alloc_user_space(sizeof(*info));
+ if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
+ return -EFAULT;
+
+ err = drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_AGP_INFO, (unsigned long) info);
+ if (err)
+ return err;
+
+ if (__get_user(i32.agp_version_major, &info->agp_version_major)
+ || __get_user(i32.agp_version_minor, &info->agp_version_minor)
+ || __get_user(i32.mode, &info->mode)
+ || __get_user(i32.aperture_base, &info->aperture_base)
+ || __get_user(i32.aperture_size, &info->aperture_size)
+ || __get_user(i32.memory_allowed, &info->memory_allowed)
+ || __get_user(i32.memory_used, &info->memory_used)
+ || __get_user(i32.id_vendor, &info->id_vendor)
+ || __get_user(i32.id_device, &info->id_device))
+ return -EFAULT;
+
+ if (copy_to_user(argp, &i32, sizeof(i32)))
+ return -EFAULT;
+
+ return 0;
+}
+
+typedef struct drm_agp_buffer32 {
+ u32 size; /**< In bytes -- will round to page boundary */
+ u32 handle; /**< Used for binding / unbinding */
+ u32 type; /**< Type of memory to allocate */
+ u32 physical; /**< Physical used by i810 */
+} drm_agp_buffer32_t;
+
+static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_agp_buffer32_t __user *argp = (void __user *)arg;
+ drm_agp_buffer32_t req32;
+ drm_agp_buffer_t __user *request;
+ int err;
+
+ if (copy_from_user(&req32, argp, sizeof(req32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user(req32.size, &request->size)
+ || __put_user(req32.type, &request->type))
+ return -EFAULT;
+
+ err = drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_AGP_ALLOC, (unsigned long) request);
+ if (err)
+ return err;
+
+ if (__get_user(req32.handle, &request->handle)
+ || __get_user(req32.physical, &request->physical)
+ || copy_to_user(argp, &req32, sizeof(req32))) {
+ drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_AGP_FREE, (unsigned long) request);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int compat_drm_agp_free(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_agp_buffer32_t __user *argp = (void __user *)arg;
+ drm_agp_buffer_t __user *request;
+ u32 handle;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || get_user(handle, &argp->handle)
+ || __put_user(handle, &request->handle))
+ return -EFAULT;
+
+ return drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_AGP_FREE, (unsigned long) request);
+}
+
+typedef struct drm_agp_binding32 {
+ u32 handle; /**< From drm_agp_buffer */
+ u32 offset; /**< In bytes -- will round to page boundary */
+} drm_agp_binding32_t;
+
+static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_agp_binding32_t __user *argp = (void __user *)arg;
+ drm_agp_binding32_t req32;
+ drm_agp_binding_t __user *request;
+
+ if (copy_from_user(&req32, argp, sizeof(req32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user(req32.handle, &request->handle)
+ || __put_user(req32.offset, &request->offset))
+ return -EFAULT;
+
+ return drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_AGP_BIND, (unsigned long) request);
+}
+
+static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_agp_binding32_t __user *argp = (void __user *)arg;
+ drm_agp_binding_t __user *request;
+ u32 handle;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || get_user(handle, &argp->handle)
+ || __put_user(handle, &request->handle))
+ return -EFAULT;
+
+ return drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_AGP_UNBIND, (unsigned long) request);
+}
+#endif /* __OS_HAS_AGP */
+
+typedef struct drm_scatter_gather32 {
+ u32 size; /**< In bytes -- will round to page boundary */
+ u32 handle; /**< Used for mapping / unmapping */
+} drm_scatter_gather32_t;
+
+static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_scatter_gather32_t __user *argp = (void __user *)arg;
+ drm_scatter_gather_t __user *request;
+ int err;
+ unsigned long x;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+ || __get_user(x, &argp->size)
+ || __put_user(x, &request->size))
+ return -EFAULT;
+
+ err = drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_SG_ALLOC, (unsigned long) request);
+ if (err)
+ return err;
+
+ /* XXX not sure about the handle conversion here... */
+ if (__get_user(x, &request->handle)
+ || __put_user(x >> PAGE_SHIFT, &argp->handle))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int compat_drm_sg_free(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_scatter_gather32_t __user *argp = (void __user *)arg;
+ drm_scatter_gather_t __user *request;
+ unsigned long x;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+ || __get_user(x, &argp->handle)
+ || __put_user(x << PAGE_SHIFT, &request->handle))
+ return -EFAULT;
+
+ return drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_SG_FREE, (unsigned long) request);
+}
+
+struct drm_wait_vblank_request32 {
+ drm_vblank_seq_type_t type;
+ unsigned int sequence;
+ u32 signal;
+};
+
+struct drm_wait_vblank_reply32 {
+ drm_vblank_seq_type_t type;
+ unsigned int sequence;
+ s32 tval_sec;
+ s32 tval_usec;
+};
+
+typedef union drm_wait_vblank32 {
+ struct drm_wait_vblank_request32 request;
+ struct drm_wait_vblank_reply32 reply;
+} drm_wait_vblank32_t;
+
+static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_wait_vblank32_t __user *argp = (void __user *)arg;
+ drm_wait_vblank32_t req32;
+ drm_wait_vblank_t __user *request;
+ int err;
+
+ if (copy_from_user(&req32, argp, sizeof(req32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user(req32.request.type, &request->request.type)
+ || __put_user(req32.request.sequence, &request->request.sequence)
+ || __put_user(req32.request.signal, &request->request.signal))
+ return -EFAULT;
+
+ err = drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_WAIT_VBLANK, (unsigned long) request);
+ if (err)
+ return err;
+
+ if (__get_user(req32.reply.type, &request->reply.type)
+ || __get_user(req32.reply.sequence, &request->reply.sequence)
+ || __get_user(req32.reply.tval_sec, &request->reply.tval_sec)
+ || __get_user(req32.reply.tval_usec, &request->reply.tval_usec))
+ return -EFAULT;
+
+ if (copy_to_user(argp, &req32, sizeof(req32)))
+ return -EFAULT;
+
+ return 0;
+}
+
+drm_ioctl_compat_t *drm_compat_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT32)] = compat_drm_getclient,
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS32)] = compat_drm_getstats,
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE32)] = compat_drm_setunique,
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP32)] = compat_drm_addmap,
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS32)] = compat_drm_addbufs,
+ [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS32)] = compat_drm_markbufs,
+ [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS32)] = compat_drm_infobufs,
+ [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS32)] = compat_drm_mapbufs,
+ [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS32)] = compat_drm_freebufs,
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP32)] = compat_drm_rmmap,
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX32)] = compat_drm_setsareactx,
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX32)] = compat_drm_getsareactx,
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX32)] = compat_drm_resctx,
+ [DRM_IOCTL_NR(DRM_IOCTL_DMA32)] = compat_drm_dma,
+#if __OS_HAS_AGP
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE32)] = compat_drm_agp_enable,
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO32)] = compat_drm_agp_info,
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC32)] = compat_drm_agp_alloc,
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE32)] = compat_drm_agp_free,
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND32)] = compat_drm_agp_bind,
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND32)] = compat_drm_agp_unbind,
+#endif
+ [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc,
+ [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free,
+ [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
+};
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/drm.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+ drm_ioctl_compat_t *fn;
+ int ret;
+
+ if (nr >= DRM_ARRAY_SIZE(drm_compat_ioctls))
+ return -ENOTTY;
+
+ fn = drm_compat_ioctls[nr];
+
+ lock_kernel(); /* XXX for now */
+ if (fn != NULL)
+ ret = (*fn)(filp, cmd, arg);
+ else
+ ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_compat_ioctl);
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index 7300a09dbd5..b5903f9f142 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -1,10 +1,30 @@
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
*/
/**************************************************************************
- *
+ *
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
- *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
**************************************************************************/
#include "drmP.h"
diff --git a/drivers/char/drm/i915_drm.h b/drivers/char/drm/i915_drm.h
index 7e55edf45c4..23e027d2908 100644
--- a/drivers/char/drm/i915_drm.h
+++ b/drivers/char/drm/i915_drm.h
@@ -1,3 +1,30 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
#ifndef _I915_DRM_H_
#define _I915_DRM_H_
diff --git a/drivers/char/drm/i915_drv.c b/drivers/char/drm/i915_drv.c
index 002b7082e21..e6a9e1d1d28 100644
--- a/drivers/char/drm/i915_drv.c
+++ b/drivers/char/drm/i915_drv.c
@@ -1,11 +1,30 @@
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
*/
-
/**************************************************************************
- *
+ *
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
- *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
**************************************************************************/
#include "drmP.h"
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index f6ca92a565d..fa940d64b85 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -1,10 +1,30 @@
/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
*/
/**************************************************************************
- *
+ *
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
- *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
**************************************************************************/
#ifndef _I915_DRV_H_
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index b0239262a84..a101cc9cfd7 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -1,10 +1,30 @@
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
*/
/**************************************************************************
- *
+ *
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
- *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
**************************************************************************/
#include "drmP.h"
diff --git a/drivers/char/drm/i915_mem.c b/drivers/char/drm/i915_mem.c
index d54a3005946..9b1698f521b 100644
--- a/drivers/char/drm/i915_mem.c
+++ b/drivers/char/drm/i915_mem.c
@@ -1,10 +1,30 @@
/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
*/
/**************************************************************************
- *
+ *
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
- *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
**************************************************************************/
#include "drmP.h"
diff --git a/drivers/char/drm/radeon_drv.c b/drivers/char/drm/radeon_drv.c
index 7b983d96e53..18e4e5b0952 100644
--- a/drivers/char/drm/radeon_drv.c
+++ b/drivers/char/drm/radeon_drv.c
@@ -101,6 +101,9 @@ static struct drm_driver driver = {
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = radeon_compat_ioctl,
+#endif
},
.pci_driver = {
.name = DRIVER_NAME,
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index 5837098afae..771aa80a5e8 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -317,6 +317,9 @@ extern int radeon_preinit( struct drm_device *dev, unsigned long flags );
extern int radeon_postinit( struct drm_device *dev, unsigned long flags );
extern int radeon_postcleanup( struct drm_device *dev );
+extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+
/* Flags for stats.boxes
*/
#define RADEON_BOX_DMA_IDLE 0x1
diff --git a/drivers/char/drm/radeon_ioc32.c b/drivers/char/drm/radeon_ioc32.c
new file mode 100644
index 00000000000..bfe612215fb
--- /dev/null
+++ b/drivers/char/drm/radeon_ioc32.c
@@ -0,0 +1,395 @@
+/**
+ * \file radeon_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the Radeon DRM.
+ *
+ * \author Paul Mackerras <paulus@samba.org>
+ *
+ * Copyright (C) Paul Mackerras 2005
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <linux/compat.h>
+#include <linux/ioctl32.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_drv.h"
+
+typedef struct drm_radeon_init32 {
+ int func;
+ u32 sarea_priv_offset;
+ int is_pci;
+ int cp_mode;
+ int gart_size;
+ int ring_size;
+ int usec_timeout;
+
+ unsigned int fb_bpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+ unsigned int depth_bpp;
+ unsigned int depth_offset, depth_pitch;
+
+ u32 fb_offset;
+ u32 mmio_offset;
+ u32 ring_offset;
+ u32 ring_rptr_offset;
+ u32 buffers_offset;
+ u32 gart_textures_offset;
+} drm_radeon_init32_t;
+
+static int compat_radeon_cp_init(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_radeon_init32_t init32;
+ drm_radeon_init_t __user *init;
+
+ if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
+ return -EFAULT;
+
+ init = compat_alloc_user_space(sizeof(*init));
+ if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
+ || __put_user(init32.func, &init->func)
+ || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
+ || __put_user(init32.is_pci, &init->is_pci)
+ || __put_user(init32.cp_mode, &init->cp_mode)
+ || __put_user(init32.gart_size, &init->gart_size)
+ || __put_user(init32.ring_size, &init->ring_size)
+ || __put_user(init32.usec_timeout, &init->usec_timeout)
+ || __put_user(init32.fb_bpp, &init->fb_bpp)
+ || __put_user(init32.front_offset, &init->front_offset)
+ || __put_user(init32.front_pitch, &init->front_pitch)
+ || __put_user(init32.back_offset, &init->back_offset)
+ || __put_user(init32.back_pitch, &init->back_pitch)
+ || __put_user(init32.depth_bpp, &init->depth_bpp)
+ || __put_user(init32.depth_offset, &init->depth_offset)
+ || __put_user(init32.depth_pitch, &init->depth_pitch)
+ || __put_user(init32.fb_offset, &init->fb_offset)
+ || __put_user(init32.mmio_offset, &init->mmio_offset)
+ || __put_user(init32.ring_offset, &init->ring_offset)
+ || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset)
+ || __put_user(init32.buffers_offset, &init->buffers_offset)
+ || __put_user(init32.gart_textures_offset,
+ &init->gart_textures_offset))
+ return -EFAULT;
+
+ return drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_RADEON_CP_INIT, (unsigned long) init);
+}
+
+typedef struct drm_radeon_clear32 {
+ unsigned int flags;
+ unsigned int clear_color;
+ unsigned int clear_depth;
+ unsigned int color_mask;
+ unsigned int depth_mask; /* misnamed field: should be stencil */
+ u32 depth_boxes;
+} drm_radeon_clear32_t;
+
+static int compat_radeon_cp_clear(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_radeon_clear32_t clr32;
+ drm_radeon_clear_t __user *clr;
+
+ if (copy_from_user(&clr32, (void __user *)arg, sizeof(clr32)))
+ return -EFAULT;
+
+ clr = compat_alloc_user_space(sizeof(*clr));
+ if (!access_ok(VERIFY_WRITE, clr, sizeof(*clr))
+ || __put_user(clr32.flags, &clr->flags)
+ || __put_user(clr32.clear_color, &clr->clear_color)
+ || __put_user(clr32.clear_depth, &clr->clear_depth)
+ || __put_user(clr32.color_mask, &clr->color_mask)
+ || __put_user(clr32.depth_mask, &clr->depth_mask)
+ || __put_user((void __user *)(unsigned long)clr32.depth_boxes,
+ &clr->depth_boxes))
+ return -EFAULT;
+
+ return drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_RADEON_CLEAR, (unsigned long) clr);
+}
+
+typedef struct drm_radeon_stipple32 {
+ u32 mask;
+} drm_radeon_stipple32_t;
+
+static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_radeon_stipple32_t __user *argp = (void __user *) arg;
+ drm_radeon_stipple_t __user *request;
+ u32 mask;
+
+ if (get_user(mask, &argp->mask))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user((unsigned int __user *)(unsigned long) mask,
+ &request->mask))
+ return -EFAULT;
+
+ return drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_RADEON_STIPPLE, (unsigned long) request);
+}
+
+typedef struct drm_radeon_tex_image32 {
+ unsigned int x, y; /* Blit coordinates */
+ unsigned int width, height;
+ u32 data;
+} drm_radeon_tex_image32_t;
+
+typedef struct drm_radeon_texture32 {
+ unsigned int offset;
+ int pitch;
+ int format;
+ int width; /* Texture image coordinates */
+ int height;
+ u32 image;
+} drm_radeon_texture32_t;
+
+static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_radeon_texture32_t req32;
+ drm_radeon_texture_t __user *request;
+ drm_radeon_tex_image32_t img32;
+ drm_radeon_tex_image_t __user *image;
+
+ if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+ return -EFAULT;
+ if (req32.image == 0)
+ return -EINVAL;
+ if (copy_from_user(&img32, (void __user *)(unsigned long)req32.image,
+ sizeof(img32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request) + sizeof(*image));
+ if (!access_ok(VERIFY_WRITE, request,
+ sizeof(*request) + sizeof(*image)))
+ return -EFAULT;
+ image = (drm_radeon_tex_image_t __user *) (request + 1);
+
+ if (__put_user(req32.offset, &request->offset)
+ || __put_user(req32.pitch, &request->pitch)
+ || __put_user(req32.format, &request->format)
+ || __put_user(req32.width, &request->width)
+ || __put_user(req32.height, &request->height)
+ || __put_user(image, &request->image)
+ || __put_user(img32.x, &image->x)
+ || __put_user(img32.y, &image->y)
+ || __put_user(img32.width, &image->width)
+ || __put_user(img32.height, &image->height)
+ || __put_user((const void __user *)(unsigned long)img32.data,
+ &image->data))
+ return -EFAULT;
+
+ return drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_RADEON_TEXTURE, (unsigned long) request);
+}
+
+typedef struct drm_radeon_vertex2_32 {
+ int idx; /* Index of vertex buffer */
+ int discard; /* Client finished with buffer? */
+ int nr_states;
+ u32 state;
+ int nr_prims;
+ u32 prim;
+} drm_radeon_vertex2_32_t;
+
+static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_radeon_vertex2_32_t req32;
+ drm_radeon_vertex2_t __user *request;
+
+ if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user(req32.idx, &request->idx)
+ || __put_user(req32.discard, &request->discard)
+ || __put_user(req32.nr_states, &request->nr_states)
+ || __put_user((void __user *)(unsigned long)req32.state,
+ &request->state)
+ || __put_user(req32.nr_prims, &request->nr_prims)
+ || __put_user((void __user *)(unsigned long)req32.prim,
+ &request->prim))
+ return -EFAULT;
+
+ return drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_RADEON_VERTEX2, (unsigned long) request);
+}
+
+typedef struct drm_radeon_cmd_buffer32 {
+ int bufsz;
+ u32 buf;
+ int nbox;
+ u32 boxes;
+} drm_radeon_cmd_buffer32_t;
+
+static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_radeon_cmd_buffer32_t req32;
+ drm_radeon_cmd_buffer_t __user *request;
+
+ if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user(req32.bufsz, &request->bufsz)
+ || __put_user((void __user *)(unsigned long)req32.buf,
+ &request->buf)
+ || __put_user(req32.nbox, &request->nbox)
+ || __put_user((void __user *)(unsigned long)req32.boxes,
+ &request->boxes))
+ return -EFAULT;
+
+ return drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_RADEON_CMDBUF, (unsigned long) request);
+}
+
+typedef struct drm_radeon_getparam32 {
+ int param;
+ u32 value;
+} drm_radeon_getparam32_t;
+
+static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_radeon_getparam32_t req32;
+ drm_radeon_getparam_t __user *request;
+
+ if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user(req32.param, &request->param)
+ || __put_user((void __user *)(unsigned long)req32.value,
+ &request->value))
+ return -EFAULT;
+
+ return drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_RADEON_GETPARAM, (unsigned long) request);
+}
+
+typedef struct drm_radeon_mem_alloc32 {
+ int region;
+ int alignment;
+ int size;
+ u32 region_offset; /* offset from start of fb or GART */
+} drm_radeon_mem_alloc32_t;
+
+static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_radeon_mem_alloc32_t req32;
+ drm_radeon_mem_alloc_t __user *request;
+
+ if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user(req32.region, &request->region)
+ || __put_user(req32.alignment, &request->alignment)
+ || __put_user(req32.size, &request->size)
+ || __put_user((int __user *)(unsigned long)req32.region_offset,
+ &request->region_offset))
+ return -EFAULT;
+
+ return drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_RADEON_ALLOC, (unsigned long) request);
+}
+
+typedef struct drm_radeon_irq_emit32 {
+ u32 irq_seq;
+} drm_radeon_irq_emit32_t;
+
+static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_radeon_irq_emit32_t req32;
+ drm_radeon_irq_emit_t __user *request;
+
+ if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user((int __user *)(unsigned long)req32.irq_seq,
+ &request->irq_seq))
+ return -EFAULT;
+
+ return drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long) request);
+}
+
+drm_ioctl_compat_t *radeon_compat_ioctls[] = {
+ [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
+ [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
+ [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
+ [DRM_RADEON_TEXTURE] = compat_radeon_cp_texture,
+ [DRM_RADEON_VERTEX2] = compat_radeon_cp_vertex2,
+ [DRM_RADEON_CMDBUF] = compat_radeon_cp_cmdbuf,
+ [DRM_RADEON_GETPARAM] = compat_radeon_cp_getparam,
+ [DRM_RADEON_ALLOC] = compat_radeon_mem_alloc,
+ [DRM_RADEON_IRQ_EMIT] = compat_radeon_irq_emit,
+};
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/dri/card<n>.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+ drm_ioctl_compat_t *fn = NULL;
+ int ret;
+
+ if (nr < DRM_COMMAND_BASE)
+ return drm_compat_ioctl(filp, cmd, arg);
+
+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
+ fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
+
+ lock_kernel(); /* XXX for now */
+ if (fn != NULL)
+ ret = (*fn)(filp, cmd, arg);
+ else
+ ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
diff --git a/drivers/char/drm/radeon_irq.c b/drivers/char/drm/radeon_irq.c
index cd25f28e26a..40474a65f56 100644
--- a/drivers/char/drm/radeon_irq.c
+++ b/drivers/char/drm/radeon_irq.c
@@ -35,6 +35,14 @@
#include "radeon_drm.h"
#include "radeon_drv.h"
+static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 mask)
+{
+ u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) & mask;
+ if (irqs)
+ RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
+ return irqs;
+}
+
/* Interrupts - Used for device synchronization and flushing in the
* following circumstances:
*
@@ -63,8 +71,8 @@ irqreturn_t radeon_driver_irq_handler( DRM_IRQ_ARGS )
/* Only consider the bits we're interested in - others could be used
* outside the DRM
*/
- stat = RADEON_READ(RADEON_GEN_INT_STATUS)
- & (RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT);
+ stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
+ RADEON_CRTC_VBLANK_STAT));
if (!stat)
return IRQ_NONE;
@@ -80,19 +88,9 @@ irqreturn_t radeon_driver_irq_handler( DRM_IRQ_ARGS )
drm_vbl_send_signals( dev );
}
- /* Acknowledge interrupts we handle */
- RADEON_WRITE(RADEON_GEN_INT_STATUS, stat);
return IRQ_HANDLED;
}
-static __inline__ void radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv)
-{
- u32 tmp = RADEON_READ( RADEON_GEN_INT_STATUS )
- & (RADEON_SW_INT_TEST_ACK | RADEON_CRTC_VBLANK_STAT);
- if (tmp)
- RADEON_WRITE( RADEON_GEN_INT_STATUS, tmp );
-}
-
static int radeon_emit_irq(drm_device_t *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -141,7 +139,7 @@ int radeon_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
return DRM_ERR(EINVAL);
}
- radeon_acknowledge_irqs( dev_priv );
+ radeon_acknowledge_irqs(dev_priv, RADEON_CRTC_VBLANK_STAT);
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
@@ -219,7 +217,8 @@ void radeon_driver_irq_preinstall( drm_device_t *dev ) {
RADEON_WRITE( RADEON_GEN_INT_CNTL, 0 );
/* Clear bits if they're already high */
- radeon_acknowledge_irqs( dev_priv );
+ radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
+ RADEON_CRTC_VBLANK_STAT));
}
void radeon_driver_irq_postinstall( drm_device_t *dev ) {
diff --git a/drivers/char/hw_random.c b/drivers/char/hw_random.c
index 7e6ac14c245..3480535a09c 100644
--- a/drivers/char/hw_random.c
+++ b/drivers/char/hw_random.c
@@ -579,7 +579,7 @@ static int __init rng_init (void)
/* Probe for Intel, AMD RNGs */
for_each_pci_dev(pdev) {
- ent = pci_match_device (rng_pci_tbl, pdev);
+ ent = pci_match_id(rng_pci_tbl, pdev);
if (ent) {
rng_ops = &rng_vendor_ops[ent->driver_data];
goto match;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 1813d0d198f..e16c13fe698 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -1088,8 +1088,8 @@ static inline int i_ipmi_request(ipmi_user_t user,
long seqid;
int broadcast = 0;
- if (addr->channel > IPMI_NUM_CHANNELS) {
- spin_lock_irqsave(&intf->counter_lock, flags);
+ if (addr->channel >= IPMI_MAX_CHANNELS) {
+ spin_lock_irqsave(&intf->counter_lock, flags);
intf->sent_invalid_commands++;
spin_unlock_irqrestore(&intf->counter_lock, flags);
rv = -EINVAL;
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 31cf84d6902..931efd58f87 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -309,9 +309,6 @@ static int __init misc_init(void)
#ifdef CONFIG_BVME6000
rtc_DP8570A_init();
#endif
-#ifdef CONFIG_PMAC_PBOOK
- pmu_device_init();
-#endif
if (register_chrdev(MISC_MAJOR,"misc",&misc_fops)) {
printk("unable to get major %d for misc devices\n",
MISC_MAJOR);
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 7c24fbe831f..95f7046ff05 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -451,7 +451,7 @@ static int __init moxa_init(void)
int n = (sizeof(moxa_pcibrds) / sizeof(moxa_pcibrds[0])) - 1;
i = 0;
while (i < n) {
- while ((p = pci_find_device(moxa_pcibrds[i].vendor, moxa_pcibrds[i].device, p))!=NULL)
+ while ((p = pci_get_device(moxa_pcibrds[i].vendor, moxa_pcibrds[i].device, p))!=NULL)
{
if (pci_enable_device(p))
continue;
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 1c8d866a49d..8f36b1758eb 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -581,7 +581,7 @@ static dev_link_t *mgslpc_attach(void)
/* Interrupt setup */
link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID;
+ link->irq.IRQInfo1 = IRQ_LEVEL_ID;
link->irq.Handler = NULL;
link->conf.Attributes = 0;
@@ -3081,6 +3081,12 @@ void mgslpc_remove_device(MGSLPC_INFO *remove_info)
}
}
+static struct pcmcia_device_id mgslpc_ids[] = {
+ PCMCIA_DEVICE_MANF_CARD(0x02c5, 0x0050),
+ PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, mgslpc_ids);
+
static struct pcmcia_driver mgslpc_driver = {
.owner = THIS_MODULE,
.drv = {
@@ -3088,6 +3094,7 @@ static struct pcmcia_driver mgslpc_driver = {
},
.attach = mgslpc_attach,
.detach = mgslpc_detach,
+ .id_table = mgslpc_ids,
};
static struct tty_operations mgslpc_ops = {
diff --git a/drivers/char/rio/rio_linux.c b/drivers/char/rio/rio_linux.c
index 7db3370f497..d7d484024e2 100644
--- a/drivers/char/rio/rio_linux.c
+++ b/drivers/char/rio/rio_linux.c
@@ -1095,7 +1095,7 @@ static int __init rio_init(void)
#ifdef CONFIG_PCI
/* First look for the JET devices: */
- while ((pdev = pci_find_device (PCI_VENDOR_ID_SPECIALIX,
+ while ((pdev = pci_get_device (PCI_VENDOR_ID_SPECIALIX,
PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8,
pdev))) {
if (pci_enable_device(pdev)) continue;
@@ -1169,7 +1169,7 @@ static int __init rio_init(void)
*/
/* Then look for the older RIO/PCI devices: */
- while ((pdev = pci_find_device (PCI_VENDOR_ID_SPECIALIX,
+ while ((pdev = pci_get_device (PCI_VENDOR_ID_SPECIALIX,
PCI_DEVICE_ID_SPECIALIX_RIO,
pdev))) {
if (pci_enable_device(pdev)) continue;
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index ff4f0980486..d8f9e94ae47 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -78,6 +78,7 @@
#include <linux/sysctl.h>
#include <linux/wait.h>
#include <linux/bcd.h>
+#include <linux/delay.h>
#include <asm/current.h>
#include <asm/uaccess.h>
@@ -894,7 +895,6 @@ static int __init rtc_init(void)
struct proc_dir_entry *ent;
#if defined(__alpha__) || defined(__mips__)
unsigned int year, ctrl;
- unsigned long uip_watchdog;
char *guess = NULL;
#endif
#ifdef __sparc__
@@ -1000,12 +1000,8 @@ no_irq:
/* Each operating system on an Alpha uses its own epoch.
Let's try to guess which one we are using now. */
- uip_watchdog = jiffies;
if (rtc_is_updating() != 0)
- while (jiffies - uip_watchdog < 2*HZ/100) {
- barrier();
- cpu_relax();
- }
+ msleep(20);
spin_lock_irq(&rtc_lock);
year = CMOS_READ(RTC_YEAR);
@@ -1213,7 +1209,6 @@ static int rtc_proc_open(struct inode *inode, struct file *file)
void rtc_get_rtc_time(struct rtc_time *rtc_tm)
{
- unsigned long uip_watchdog = jiffies;
unsigned char ctrl;
#ifdef CONFIG_MACH_DECSTATION
unsigned int real_year;
@@ -1221,7 +1216,7 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
/*
* read RTC once any update in progress is done. The update
- * can take just over 2ms. We wait 10 to 20ms. There is no need to
+ * can take just over 2ms. We wait 20ms. There is no need to
* to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
* If you need to know *exactly* when a second has started, enable
* periodic update complete interrupts, (via ioctl) and then
@@ -1230,10 +1225,7 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
*/
if (rtc_is_updating() != 0)
- while (jiffies - uip_watchdog < 2*HZ/100) {
- barrier();
- cpu_relax();
- }
+ msleep(20);
/*
* Only the values that we read from the RTC are set. We leave
diff --git a/drivers/char/tipar.c b/drivers/char/tipar.c
index 659335d80ee..ec78d2f161f 100644
--- a/drivers/char/tipar.c
+++ b/drivers/char/tipar.c
@@ -396,7 +396,7 @@ static struct file_operations tipar_fops = {
static int __init
tipar_setup(char *str)
{
- int ints[2];
+ int ints[3];
str = get_options(str, ARRAY_SIZE(ints), ints);
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 854475c54f0..049d128ae7f 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -464,7 +464,7 @@ void __devexit tpm_remove(struct pci_dev *pci_dev)
pci_set_drvdata(pci_dev, NULL);
misc_deregister(&chip->vendor->miscdev);
- kfree(&chip->vendor->miscdev.name);
+ kfree(chip->vendor->miscdev.name);
sysfs_remove_group(&pci_dev->dev.kobj, chip->vendor->attr_group);
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
index 58597993954..f19cf9d7792 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/char/tty_ioctl.c
@@ -476,11 +476,11 @@ int n_tty_ioctl(struct tty_struct * tty, struct file * file,
ld = tty_ldisc_ref(tty);
switch (arg) {
case TCIFLUSH:
- if (ld->flush_buffer)
+ if (ld && ld->flush_buffer)
ld->flush_buffer(tty);
break;
case TCIOFLUSH:
- if (ld->flush_buffer)
+ if (ld && ld->flush_buffer)
ld->flush_buffer(tty);
/* fall through */
case TCOFLUSH:
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 8971484b956..1d44f69e1fd 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -25,6 +25,7 @@
#include <linux/fs.h>
#include <linux/console.h>
#include <linux/signal.h>
+#include <linux/timex.h>
#include <asm/io.h>
#include <asm/uaccess.h>
@@ -386,7 +387,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
if (!perm)
return -EPERM;
if (arg)
- arg = 1193182 / arg;
+ arg = CLOCK_TICK_RATE / arg;
kd_mksound(arg, 0);
return 0;
@@ -403,7 +404,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
ticks = HZ * ((arg >> 16) & 0xffff) / 1000;
count = ticks ? (arg & 0xffff) : 0;
if (count)
- count = 1193182 / count;
+ count = CLOCK_TICK_RATE / count;
kd_mksound(count, ticks);
return 0;
}
diff --git a/drivers/char/watchdog/i8xx_tco.c b/drivers/char/watchdog/i8xx_tco.c
index b14d642439e..5d07ee59679 100644
--- a/drivers/char/watchdog/i8xx_tco.c
+++ b/drivers/char/watchdog/i8xx_tco.c
@@ -401,7 +401,7 @@ static unsigned char __init i8xx_tco_getdevice (void)
*/
while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
- if (pci_match_device(i8xx_tco_pci_tbl, dev)) {
+ if (pci_match_id(i8xx_tco_pci_tbl, dev)) {
i8xx_tco_pci = dev;
break;
}
diff --git a/drivers/char/watchdog/ixp2000_wdt.c b/drivers/char/watchdog/ixp2000_wdt.c
index 4e98c215e5b..4b039516cc8 100644
--- a/drivers/char/watchdog/ixp2000_wdt.c
+++ b/drivers/char/watchdog/ixp2000_wdt.c
@@ -162,7 +162,7 @@ ixp2000_wdt_release(struct inode *inode, struct file *file)
if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) {
wdt_disable();
} else {
- printk(KERN_CRIT "WATCHDOG: Device closed unexpectdly - "
+ printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - "
"timer will not stop\n");
}
diff --git a/drivers/char/watchdog/ixp4xx_wdt.c b/drivers/char/watchdog/ixp4xx_wdt.c
index 82396e06c8a..83df369113a 100644
--- a/drivers/char/watchdog/ixp4xx_wdt.c
+++ b/drivers/char/watchdog/ixp4xx_wdt.c
@@ -156,7 +156,7 @@ ixp4xx_wdt_release(struct inode *inode, struct file *file)
if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) {
wdt_disable();
} else {
- printk(KERN_CRIT "WATCHDOG: Device closed unexpectdly - "
+ printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - "
"timer will not stop\n");
}
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index 839b44a7e08..53c95c0bbf4 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -16,6 +16,7 @@
#include <linux/console.h>
#include <linux/efi.h>
#include <linux/serial.h>
+#include <asm/vga.h>
#include "pcdp.h"
static int __init
@@ -40,10 +41,27 @@ setup_serial_console(struct pcdp_uart *uart)
}
static int __init
-setup_vga_console(struct pcdp_vga *vga)
+setup_vga_console(struct pcdp_device *dev)
{
#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
- if (efi_mem_type(0xA0000) == EFI_CONVENTIONAL_MEMORY) {
+ u8 *if_ptr;
+
+ if_ptr = ((u8 *)dev + sizeof(struct pcdp_device));
+ if (if_ptr[0] == PCDP_IF_PCI) {
+ struct pcdp_if_pci if_pci;
+
+ /* struct copy since ifptr might not be correctly aligned */
+
+ memcpy(&if_pci, if_ptr, sizeof(if_pci));
+
+ if (if_pci.trans & PCDP_PCI_TRANS_IOPORT)
+ vga_console_iobase = if_pci.ioport_tra;
+
+ if (if_pci.trans & PCDP_PCI_TRANS_MMIO)
+ vga_console_membase = if_pci.mmio_tra;
+ }
+
+ if (efi_mem_type(vga_console_membase + 0xA0000) == EFI_CONVENTIONAL_MEMORY) {
printk(KERN_ERR "PCDP: VGA selected, but frame buffer is not MMIO!\n");
return -ENODEV;
}
@@ -95,7 +113,7 @@ efi_setup_pcdp_console(char *cmdline)
dev = (struct pcdp_device *) ((u8 *) dev + dev->length)) {
if (dev->flags & PCDP_PRIMARY_CONSOLE) {
if (dev->type == PCDP_CONSOLE_VGA) {
- return setup_vga_console((struct pcdp_vga *) dev);
+ return setup_vga_console(dev);
}
}
}
diff --git a/drivers/firmware/pcdp.h b/drivers/firmware/pcdp.h
index 1dc7c88b7b4..e72cc47de33 100644
--- a/drivers/firmware/pcdp.h
+++ b/drivers/firmware/pcdp.h
@@ -52,11 +52,34 @@ struct pcdp_uart {
u32 clock_rate;
u8 pci_prog_intfc;
u8 flags;
-};
+} __attribute__((packed));
+
+#define PCDP_IF_PCI 1
+
+/* pcdp_if_pci.trans */
+#define PCDP_PCI_TRANS_IOPORT 0x02
+#define PCDP_PCI_TRANS_MMIO 0x01
+
+struct pcdp_if_pci {
+ u8 interconnect;
+ u8 reserved;
+ u16 length;
+ u8 segment;
+ u8 bus;
+ u8 dev;
+ u8 fun;
+ u16 dev_id;
+ u16 vendor_id;
+ u32 acpi_interrupt;
+ u64 mmio_tra;
+ u64 ioport_tra;
+ u8 flags;
+ u8 trans;
+} __attribute__((packed));
struct pcdp_vga {
u8 count; /* address space descriptors */
-};
+} __attribute__((packed));
/* pcdp_device.flags */
#define PCDP_PRIMARY_CONSOLE 1
@@ -66,7 +89,9 @@ struct pcdp_device {
u8 flags;
u16 length;
u16 efi_index;
-};
+ /* next data is pcdp_if_pci or pcdp_if_acpi (not yet supported) */
+ /* next data is device specific type (currently only pcdp_vga) */
+} __attribute__((packed));
struct pcdp {
u8 signature[4];
@@ -81,4 +106,4 @@ struct pcdp {
u32 num_uarts;
struct pcdp_uart uart[0]; /* actual size is num_uarts */
/* remainder of table is pcdp_device structures */
-};
+} __attribute__((packed));
diff --git a/drivers/i2c/chips/atxp1.c b/drivers/i2c/chips/atxp1.c
index 5c6597aa2c7..0bcf82b4c07 100644
--- a/drivers/i2c/chips/atxp1.c
+++ b/drivers/i2c/chips/atxp1.c
@@ -144,7 +144,7 @@ static ssize_t atxp1_storevcore(struct device *dev, struct device_attribute *att
if (vid == cvid)
return count;
- dev_info(dev, "Setting VCore to %d mV (0x%02x)\n", vcore, vid);
+ dev_dbg(dev, "Setting VCore to %d mV (0x%02x)\n", vcore, vid);
/* Write every 25 mV step to increase stability */
if (cvid > vid) {
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 0273f124a4f..5f33df47aa7 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -606,6 +606,12 @@ config BLK_DEV_IT8172
<http://www.ite.com.tw/ia/brief_it8172bsp.htm>; picture of the
board at <http://www.mvista.com/partners/semiconductor/ite.html>.
+config BLK_DEV_IT821X
+ tristate "IT821X IDE support"
+ help
+ This driver adds support for the ITE 8211 IDE controller and the
+ IT 8212 IDE RAID controller in both RAID and pass-through mode.
+
config BLK_DEV_NS87415
tristate "NS87415 chipset support"
help
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index 5be8ad6dc9e..cca9c075966 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -20,7 +20,6 @@ ide-core-$(CONFIG_BLK_DEV_CMD640) += pci/cmd640.o
# Core IDE code - must come before legacy
ide-core-$(CONFIG_BLK_DEV_IDEPCI) += setup-pci.o
ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o
-ide-core-$(CONFIG_BLK_DEV_IDE_TCQ) += ide-tcq.o
ide-core-$(CONFIG_PROC_FS) += ide-proc.o
ide-core-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index d6f934886b0..f9c1acb4ed6 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -119,6 +119,10 @@ static int lba_capacity_is_ok (struct hd_driveid *id)
{
unsigned long lba_sects, chs_sects, head, tail;
+ /* No non-LBA info .. so valid! */
+ if (id->cyls == 0)
+ return 1;
+
/*
* The ATA spec tells large drives to return
* C/H/S = 16383/16/63 independent of their size.
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 2d2eefb610d..1e1531334c2 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -132,7 +132,6 @@ static const struct drive_list_entry drive_blacklist [] = {
{ "SAMSUNG CD-ROM SC-148C", "ALL" },
{ "SAMSUNG CD-ROM SC", "ALL" },
{ "SanDisk SDP3B-64" , "ALL" },
- { "SAMSUNG CD-ROM SN-124", "ALL" },
{ "ATAPI CD-ROM DRIVE 40X MAXIMUM", "ALL" },
{ "_NEC DV5800A", "ALL" },
{ NULL , NULL }
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 53024942a7e..b443b04a4c5 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -1181,7 +1181,8 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
pre_reset(drive);
SELECT_DRIVE(drive);
udelay (20);
- hwif->OUTB(WIN_SRST, IDE_COMMAND_REG);
+ hwif->OUTBSYNC(drive, WIN_SRST, IDE_COMMAND_REG);
+ ndelay(400);
hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
hwgroup->polling = 1;
__ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 6806d407e9c..b09a6537c7a 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -487,8 +487,7 @@ static u8 ide_dump_ata_status(ide_drive_t *drive, const char *msg, u8 stat)
u8 err = 0;
local_irq_set(flags);
- printk("%s: %s: status=0x%02x", drive->name, msg, stat);
- printk(" { ");
+ printk("%s: %s: status=0x%02x { ", drive->name, msg, stat);
if (stat & BUSY_STAT)
printk("Busy ");
else {
@@ -500,15 +499,13 @@ static u8 ide_dump_ata_status(ide_drive_t *drive, const char *msg, u8 stat)
if (stat & INDEX_STAT) printk("Index ");
if (stat & ERR_STAT) printk("Error ");
}
- printk("}");
- printk("\n");
+ printk("}\n");
if ((stat & (BUSY_STAT|ERR_STAT)) == ERR_STAT) {
err = hwif->INB(IDE_ERROR_REG);
- printk("%s: %s: error=0x%02x", drive->name, msg, err);
- printk(" { ");
+ printk("%s: %s: error=0x%02x { ", drive->name, msg, err);
if (err & ABRT_ERR) printk("DriveStatusError ");
if (err & ICRC_ERR)
- printk("Bad%s ", (err & ABRT_ERR) ? "CRC" : "Sector");
+ printk((err & ABRT_ERR) ? "BadCRC " : "BadSector ");
if (err & ECC_ERR) printk("UncorrectableError ");
if (err & ID_ERR) printk("SectorIdNotFound ");
if (err & TRK0_ERR) printk("TrackZeroNotFound ");
@@ -546,8 +543,8 @@ static u8 ide_dump_ata_status(ide_drive_t *drive, const char *msg, u8 stat)
printk(", sector=%llu",
(unsigned long long)HWGROUP(drive)->rq->sector);
}
+ printk("\n");
}
- printk("\n");
ide_dump_opcode(drive);
local_irq_restore(flags);
return err;
diff --git a/drivers/ide/legacy/hd.c b/drivers/ide/legacy/hd.c
index e884cd4b22f..242029c9c0c 100644
--- a/drivers/ide/legacy/hd.c
+++ b/drivers/ide/legacy/hd.c
@@ -156,11 +156,13 @@ else \
#if (HD_DELAY > 0)
+
+#include <asm/i8253.h>
+
unsigned long last_req;
unsigned long read_timer(void)
{
- extern spinlock_t i8253_lock;
unsigned long t, flags;
int i;
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index e20327e54b1..978d27d6452 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -457,6 +457,40 @@ int ide_event(event_t event, int priority,
return 0;
} /* ide_event */
+static struct pcmcia_device_id ide_ids[] = {
+ PCMCIA_DEVICE_FUNC_ID(4),
+ PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704),
+ PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
+ PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001),
+ PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401),
+ PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0),
+ PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74),
+ PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
+ PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591),
+ PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728),
+ PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591),
+ PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4),
+ PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde),
+ PCMCIA_DEVICE_PROD_ID12("EXP", "CD", 0x6f58c983, 0xaae5994f),
+ PCMCIA_DEVICE_PROD_ID12("EXP ", "CD-ROM", 0x0a5c52fd, 0x66536591),
+ PCMCIA_DEVICE_PROD_ID12("EXP ", "PnPIDE", 0x0a5c52fd, 0x0c694728),
+ PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e),
+ PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
+ PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
+ PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
+ PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674),
+ PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b),
+ PCMCIA_DEVICE_PROD_ID12(" ", "NinjaATA-", 0x3b6e20c8, 0xebe0bd79),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728),
+ PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1),
+ PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003),
+ PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
+ PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
+ PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, ide_ids);
+
static struct pcmcia_driver ide_cs_driver = {
.owner = THIS_MODULE,
.drv = {
@@ -464,6 +498,7 @@ static struct pcmcia_driver ide_cs_driver = {
},
.attach = ide_attach,
.detach = ide_detach,
+ .id_table = ide_ids,
};
static int __init init_ide_cs(void)
diff --git a/drivers/ide/pci/Makefile b/drivers/ide/pci/Makefile
index 55e6e553e49..af46226c179 100644
--- a/drivers/ide/pci/Makefile
+++ b/drivers/ide/pci/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_BLK_DEV_HPT34X) += hpt34x.o
obj-$(CONFIG_BLK_DEV_HPT366) += hpt366.o
#obj-$(CONFIG_BLK_DEV_HPT37X) += hpt37x.o
obj-$(CONFIG_BLK_DEV_IT8172) += it8172.o
+obj-$(CONFIG_BLK_DEV_IT821X) += it821x.o
obj-$(CONFIG_BLK_DEV_NS87415) += ns87415.o
obj-$(CONFIG_BLK_DEV_OPTI621) += opti621.o
obj-$(CONFIG_BLK_DEV_PDC202XX_OLD) += pdc202xx_old.o
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index 67efb38a9f6..6cf49394a80 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -583,7 +583,7 @@ static int ali15x3_dma_setup(ide_drive_t *drive)
* appropriate also sets up the 1533 southbridge.
*/
-static unsigned int __init init_chipset_ali15x3 (struct pci_dev *dev, const char *name)
+static unsigned int __devinit init_chipset_ali15x3 (struct pci_dev *dev, const char *name)
{
unsigned long flags;
u8 tmpbyte;
@@ -677,7 +677,7 @@ static unsigned int __init init_chipset_ali15x3 (struct pci_dev *dev, const char
* FIXME: frobs bits that are not defined on newer ALi devicea
*/
-static unsigned int __init ata66_ali15x3 (ide_hwif_t *hwif)
+static unsigned int __devinit ata66_ali15x3 (ide_hwif_t *hwif)
{
struct pci_dev *dev = hwif->pci_dev;
unsigned int ata66 = 0;
@@ -748,7 +748,7 @@ static unsigned int __init ata66_ali15x3 (ide_hwif_t *hwif)
* Initialize the IDE structure side of the ALi 15x3 driver.
*/
-static void __init init_hwif_common_ali15x3 (ide_hwif_t *hwif)
+static void __devinit init_hwif_common_ali15x3 (ide_hwif_t *hwif)
{
hwif->autodma = 0;
hwif->tuneproc = &ali15x3_tune_drive;
@@ -794,7 +794,7 @@ static void __init init_hwif_common_ali15x3 (ide_hwif_t *hwif)
* Sparc systems
*/
-static void __init init_hwif_ali15x3 (ide_hwif_t *hwif)
+static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif)
{
u8 ideic, inmir;
s8 irq_routing_table[] = { -1, 9, 3, 10, 4, 5, 7, 6,
@@ -847,7 +847,7 @@ static void __init init_hwif_ali15x3 (ide_hwif_t *hwif)
* the actual work.
*/
-static void __init init_dma_ali15x3 (ide_hwif_t *hwif, unsigned long dmabase)
+static void __devinit init_dma_ali15x3 (ide_hwif_t *hwif, unsigned long dmabase)
{
if (m5229_revision < 0x20)
return;
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index 4e0f13d1d06..844a6c9fb94 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -73,6 +73,7 @@ static struct amd_ide_chip {
{ PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE, 0x50, AMD_UDMA_133 },
{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, 0x50, AMD_UDMA_133 },
{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, 0x50, AMD_UDMA_133 },
+ { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, 0x50, AMD_UDMA_133 },
{ 0 }
};
@@ -309,7 +310,7 @@ static int amd74xx_ide_dma_check(ide_drive_t *drive)
* and initialize its drive independent registers.
*/
-static unsigned int __init init_chipset_amd74xx(struct pci_dev *dev, const char *name)
+static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev, const char *name)
{
unsigned char t;
unsigned int u;
@@ -413,7 +414,7 @@ static unsigned int __init init_chipset_amd74xx(struct pci_dev *dev, const char
return dev->irq;
}
-static void __init init_hwif_amd74xx(ide_hwif_t *hwif)
+static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
{
int i;
@@ -489,6 +490,7 @@ static ide_pci_device_t amd74xx_chipsets[] __devinitdata = {
/* 13 */ DECLARE_NV_DEV("NFORCE-CK804"),
/* 14 */ DECLARE_NV_DEV("NFORCE-MCP04"),
/* 15 */ DECLARE_NV_DEV("NFORCE-MCP51"),
+ /* 16 */ DECLARE_NV_DEV("NFORCE-MCP55"),
};
static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id)
@@ -524,6 +526,7 @@ static struct pci_device_id amd74xx_pci_tbl[] = {
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 13 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15 },
+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl);
diff --git a/drivers/ide/pci/cs5530.c b/drivers/ide/pci/cs5530.c
index 0381961db26..09269e574b3 100644
--- a/drivers/ide/pci/cs5530.c
+++ b/drivers/ide/pci/cs5530.c
@@ -217,7 +217,7 @@ static int cs5530_config_dma (ide_drive_t *drive)
* Initialize the cs5530 bridge for reliable IDE DMA operation.
*/
-static unsigned int __init init_chipset_cs5530 (struct pci_dev *dev, const char *name)
+static unsigned int __devinit init_chipset_cs5530 (struct pci_dev *dev, const char *name)
{
struct pci_dev *master_0 = NULL, *cs5530_0 = NULL;
unsigned long flags;
@@ -308,7 +308,7 @@ static unsigned int __init init_chipset_cs5530 (struct pci_dev *dev, const char
* performs channel-specific pre-initialization before drive probing.
*/
-static void __init init_hwif_cs5530 (ide_hwif_t *hwif)
+static void __devinit init_hwif_cs5530 (ide_hwif_t *hwif)
{
unsigned long basereg;
u32 d0_timings;
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c
index 80d67e99ccb..5a33513f3dd 100644
--- a/drivers/ide/pci/cy82c693.c
+++ b/drivers/ide/pci/cy82c693.c
@@ -391,7 +391,7 @@ static void cy82c693_tune_drive (ide_drive_t *drive, u8 pio)
/*
* this function is called during init and is used to setup the cy82c693 chip
*/
-static unsigned int __init init_chipset_cy82c693(struct pci_dev *dev, const char *name)
+static unsigned int __devinit init_chipset_cy82c693(struct pci_dev *dev, const char *name)
{
if (PCI_FUNC(dev->devfn) != 1)
return 0;
@@ -443,7 +443,7 @@ static unsigned int __init init_chipset_cy82c693(struct pci_dev *dev, const char
/*
* the init function - called for each ide channel once
*/
-static void __init init_hwif_cy82c693(ide_hwif_t *hwif)
+static void __devinit init_hwif_cy82c693(ide_hwif_t *hwif)
{
hwif->autodma = 0;
@@ -467,9 +467,9 @@ static void __init init_hwif_cy82c693(ide_hwif_t *hwif)
hwif->drives[1].autodma = hwif->autodma;
}
-static __initdata ide_hwif_t *primary;
+static __devinitdata ide_hwif_t *primary;
-void __init init_iops_cy82c693(ide_hwif_t *hwif)
+void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
{
if (PCI_FUNC(hwif->pci_dev->devfn) == 1)
primary = hwif;
diff --git a/drivers/ide/pci/generic.c b/drivers/ide/pci/generic.c
index 4565cc311ff..da46577380f 100644
--- a/drivers/ide/pci/generic.c
+++ b/drivers/ide/pci/generic.c
@@ -39,6 +39,17 @@
#include <asm/io.h>
+static int ide_generic_all; /* Set to claim all devices */
+
+static int __init ide_generic_all_on(char *unused)
+{
+ ide_generic_all = 1;
+ printk(KERN_INFO "IDE generic will claim all unknown PCI IDE storage controllers.\n");
+ return 1;
+}
+
+__setup("all-generic-ide", ide_generic_all_on);
+
static void __devinit init_hwif_generic (ide_hwif_t *hwif)
{
switch(hwif->pci_dev->device) {
@@ -78,79 +89,85 @@ static void __devinit init_hwif_generic (ide_hwif_t *hwif)
static ide_pci_device_t generic_chipsets[] __devinitdata = {
{ /* 0 */
+ .name = "Unknown",
+ .init_hwif = init_hwif_generic,
+ .channels = 2,
+ .autodma = AUTODMA,
+ .bootable = ON_BOARD,
+ },{ /* 1 */
.name = "NS87410",
.init_hwif = init_hwif_generic,
.channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x43,0x08,0x08}, {0x47,0x08,0x08}},
.bootable = ON_BOARD,
- },{ /* 1 */
+ },{ /* 2 */
.name = "SAMURAI",
.init_hwif = init_hwif_generic,
.channels = 2,
.autodma = AUTODMA,
.bootable = ON_BOARD,
- },{ /* 2 */
+ },{ /* 3 */
.name = "HT6565",
.init_hwif = init_hwif_generic,
.channels = 2,
.autodma = AUTODMA,
.bootable = ON_BOARD,
- },{ /* 3 */
+ },{ /* 4 */
.name = "UM8673F",
.init_hwif = init_hwif_generic,
.channels = 2,
.autodma = NODMA,
.bootable = ON_BOARD,
- },{ /* 4 */
+ },{ /* 5 */
.name = "UM8886A",
.init_hwif = init_hwif_generic,
.channels = 2,
.autodma = NODMA,
.bootable = ON_BOARD,
- },{ /* 5 */
+ },{ /* 6 */
.name = "UM8886BF",
.init_hwif = init_hwif_generic,
.channels = 2,
.autodma = NODMA,
.bootable = ON_BOARD,
- },{ /* 6 */
+ },{ /* 7 */
.name = "HINT_IDE",
.init_hwif = init_hwif_generic,
.channels = 2,
.autodma = AUTODMA,
.bootable = ON_BOARD,
- },{ /* 7 */
+ },{ /* 8 */
.name = "VIA_IDE",
.init_hwif = init_hwif_generic,
.channels = 2,
.autodma = NOAUTODMA,
.bootable = ON_BOARD,
- },{ /* 8 */
+ },{ /* 9 */
.name = "OPTI621V",
.init_hwif = init_hwif_generic,
.channels = 2,
.autodma = NOAUTODMA,
.bootable = ON_BOARD,
- },{ /* 9 */
+ },{ /* 10 */
.name = "VIA8237SATA",
.init_hwif = init_hwif_generic,
.channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
- },{ /* 10 */
+ },{ /* 11 */
.name = "Piccolo0102",
.init_hwif = init_hwif_generic,
.channels = 2,
.autodma = NOAUTODMA,
.bootable = ON_BOARD,
- },{ /* 11 */
+ },{ /* 12 */
.name = "Piccolo0103",
.init_hwif = init_hwif_generic,
.channels = 2,
.autodma = NOAUTODMA,
.bootable = ON_BOARD,
- },{ /* 12 */
+ },{ /* 13 */
.name = "Piccolo0105",
.init_hwif = init_hwif_generic,
.channels = 2,
@@ -174,6 +191,10 @@ static int __devinit generic_init_one(struct pci_dev *dev, const struct pci_devi
u16 command;
int ret = -ENODEV;
+ /* Don't use the generic entry unless instructed to do so */
+ if (id->driver_data == 0 && ide_generic_all == 0)
+ goto out;
+
if (dev->vendor == PCI_VENDOR_ID_UMC &&
dev->device == PCI_DEVICE_ID_UMC_UM8886A &&
(!(PCI_FUNC(dev->devfn) & 1)))
@@ -195,21 +216,23 @@ out:
}
static struct pci_device_id generic_pci_tbl[] = {
- { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- { PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
- { PCI_VENDOR_ID_HOLTEK, PCI_DEVICE_ID_HOLTEK_6565, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
- { PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8673F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
- { PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
- { PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
- { PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6},
- { PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7},
- { PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8},
+ { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
+ { PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
+ { PCI_VENDOR_ID_HOLTEK, PCI_DEVICE_ID_HOLTEK_6565, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
+ { PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8673F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
+ { PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
+ { PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6},
+ { PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7},
+ { PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8},
+ { PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9},
#ifdef CONFIG_BLK_DEV_IDE_SATA
- { PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237_SATA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9},
+ { PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237_SATA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10},
#endif
- { PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10},
- { PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11},
- { PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12},
+ { PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11},
+ { PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12},
+ { PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 13},
+ /* Must come last. If you add entries adjust this table appropriately and the init_one code */
+ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 0},
{ 0, },
};
MODULE_DEVICE_TABLE(pci, generic_pci_tbl);
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index c8ee0b8c029..7b64db10d1b 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -10,6 +10,11 @@
* donation of an ABit BP6 mainboard, processor, and memory acellerated
* development and support.
*
+ *
+ * Highpoint have their own driver (source except for the raid part)
+ * available from http://www.highpoint-tech.com/hpt3xx-opensource-v131.tgz
+ * This may be useful to anyone wanting to work on the mainstream hpt IDE.
+ *
* Note that final HPT370 support was done by force extraction of GPL.
*
* - add function for getting/setting power status of drive
@@ -446,44 +451,29 @@ static struct chipset_bus_clock_list_entry sixty_six_base_hpt374[] = {
#define F_LOW_PCI_50 0x2d
#define F_LOW_PCI_66 0x42
-/* FIXME: compare with driver's code before removing */
-#if 0
- if (hpt_minimum_revision(dev, 3)) {
- u8 cbl;
- cbl = inb(iobase + 0x7b);
- outb(cbl | 1, iobase + 0x7b);
- outb(cbl & ~1, iobase + 0x7b);
- cbl = inb(iobase + 0x7a);
- p += sprintf(p, "Cable: ATA-%d"
- " ATA-%d\n",
- (cbl & 0x02) ? 33 : 66,
- (cbl & 0x01) ? 33 : 66);
- p += sprintf(p, "\n");
- }
- {
- u8 c2, c3;
- /* older revs don't have these registers mapped
- * into io space */
- pci_read_config_byte(dev, 0x43, &c0);
- pci_read_config_byte(dev, 0x47, &c1);
- pci_read_config_byte(dev, 0x4b, &c2);
- pci_read_config_byte(dev, 0x4f, &c3);
-
- p += sprintf(p, "Mode: %s %s"
- " %s %s\n",
- (c0 & 0x10) ? "UDMA" : (c0 & 0x20) ? "DMA " :
- (c0 & 0x80) ? "PIO " : "off ",
- (c1 & 0x10) ? "UDMA" : (c1 & 0x20) ? "DMA " :
- (c1 & 0x80) ? "PIO " : "off ",
- (c2 & 0x10) ? "UDMA" : (c2 & 0x20) ? "DMA " :
- (c2 & 0x80) ? "PIO " : "off ",
- (c3 & 0x10) ? "UDMA" : (c3 & 0x20) ? "DMA " :
- (c3 & 0x80) ? "PIO " : "off ");
- }
- }
-#endif
+/*
+ * Hold all the highpoint quirks and revision information in one
+ * place.
+ */
-static u32 hpt_revision (struct pci_dev *dev)
+struct hpt_info
+{
+ u8 max_mode; /* Speeds allowed */
+ int revision; /* Chipset revision */
+ int flags; /* Chipset properties */
+#define PLL_MODE 1
+#define IS_372N 2
+ /* Speed table */
+ struct chipset_bus_clock_list_entry *speed;
+};
+
+/*
+ * This wants fixing so that we do everything not by classrev
+ * (which breaks on the newest chips) but by creating an
+ * enumeration of chip variants and using that
+ */
+
+static __devinit u32 hpt_revision (struct pci_dev *dev)
{
u32 class_rev;
pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
@@ -507,37 +497,33 @@ static u32 hpt_revision (struct pci_dev *dev)
return class_rev;
}
-static u32 hpt_minimum_revision (struct pci_dev *dev, int revision)
-{
- unsigned int class_rev = hpt_revision(dev);
- revision--;
- return ((int) (class_rev > revision) ? 1 : 0);
-}
-
static int check_in_drive_lists(ide_drive_t *drive, const char **list);
static u8 hpt3xx_ratemask (ide_drive_t *drive)
{
- struct pci_dev *dev = HWIF(drive)->pci_dev;
+ ide_hwif_t *hwif = drive->hwif;
+ struct hpt_info *info = ide_get_hwifdata(hwif);
u8 mode = 0;
- if (hpt_minimum_revision(dev, 8)) { /* HPT374 */
+ /* FIXME: TODO - move this to set info->mode once at boot */
+
+ if (info->revision >= 8) { /* HPT374 */
mode = (HPT374_ALLOW_ATA133_6) ? 4 : 3;
- } else if (hpt_minimum_revision(dev, 7)) { /* HPT371 */
+ } else if (info->revision >= 7) { /* HPT371 */
mode = (HPT371_ALLOW_ATA133_6) ? 4 : 3;
- } else if (hpt_minimum_revision(dev, 6)) { /* HPT302 */
+ } else if (info->revision >= 6) { /* HPT302 */
mode = (HPT302_ALLOW_ATA133_6) ? 4 : 3;
- } else if (hpt_minimum_revision(dev, 5)) { /* HPT372 */
+ } else if (info->revision >= 5) { /* HPT372 */
mode = (HPT372_ALLOW_ATA133_6) ? 4 : 3;
- } else if (hpt_minimum_revision(dev, 4)) { /* HPT370A */
+ } else if (info->revision >= 4) { /* HPT370A */
mode = (HPT370_ALLOW_ATA100_5) ? 3 : 2;
- } else if (hpt_minimum_revision(dev, 3)) { /* HPT370 */
+ } else if (info->revision >= 3) { /* HPT370 */
mode = (HPT370_ALLOW_ATA100_5) ? 3 : 2;
mode = (check_in_drive_lists(drive, bad_ata33)) ? 0 : mode;
} else { /* HPT366 and HPT368 */
mode = (check_in_drive_lists(drive, bad_ata33)) ? 0 : 2;
}
- if (!eighty_ninty_three(drive) && (mode))
+ if (!eighty_ninty_three(drive) && mode)
mode = min(mode, (u8)1);
return mode;
}
@@ -549,7 +535,8 @@ static u8 hpt3xx_ratemask (ide_drive_t *drive)
static u8 hpt3xx_ratefilter (ide_drive_t *drive, u8 speed)
{
- struct pci_dev *dev = HWIF(drive)->pci_dev;
+ ide_hwif_t *hwif = drive->hwif;
+ struct hpt_info *info = ide_get_hwifdata(hwif);
u8 mode = hpt3xx_ratemask(drive);
if (drive->media != ide_disk)
@@ -561,7 +548,7 @@ static u8 hpt3xx_ratefilter (ide_drive_t *drive, u8 speed)
break;
case 0x03:
speed = min(speed, (u8)XFER_UDMA_5);
- if (hpt_minimum_revision(dev, 5))
+ if (info->revision >= 5)
break;
if (check_in_drive_lists(drive, bad_ata100_5))
speed = min(speed, (u8)XFER_UDMA_4);
@@ -571,7 +558,7 @@ static u8 hpt3xx_ratefilter (ide_drive_t *drive, u8 speed)
/*
* CHECK ME, Does this need to be set to 5 ??
*/
- if (hpt_minimum_revision(dev, 3))
+ if (info->revision >= 3)
break;
if ((check_in_drive_lists(drive, bad_ata66_4)) ||
(!(HPT366_ALLOW_ATA66_4)))
@@ -585,7 +572,7 @@ static u8 hpt3xx_ratefilter (ide_drive_t *drive, u8 speed)
/*
* CHECK ME, Does this need to be set to 5 ??
*/
- if (hpt_minimum_revision(dev, 3))
+ if (info->revision >= 3)
break;
if (check_in_drive_lists(drive, bad_ata33))
speed = min(speed, (u8)XFER_MW_DMA_2);
@@ -624,11 +611,12 @@ static unsigned int pci_bus_clock_list (u8 speed, struct chipset_bus_clock_list_
static int hpt36x_tune_chipset(ide_drive_t *drive, u8 xferspeed)
{
- struct pci_dev *dev = HWIF(drive)->pci_dev;
+ ide_hwif_t *hwif = drive->hwif;
+ struct pci_dev *dev = hwif->pci_dev;
+ struct hpt_info *info = ide_get_hwifdata(hwif);
u8 speed = hpt3xx_ratefilter(drive, xferspeed);
-// u8 speed = ide_rate_filter(hpt3xx_ratemask(drive), xferspeed);
u8 regtime = (drive->select.b.unit & 0x01) ? 0x44 : 0x40;
- u8 regfast = (HWIF(drive)->channel) ? 0x55 : 0x51;
+ u8 regfast = (hwif->channel) ? 0x55 : 0x51;
u8 drive_fast = 0;
u32 reg1 = 0, reg2 = 0;
@@ -636,16 +624,11 @@ static int hpt36x_tune_chipset(ide_drive_t *drive, u8 xferspeed)
* Disable the "fast interrupt" prediction.
*/
pci_read_config_byte(dev, regfast, &drive_fast);
-#if 0
- if (drive_fast & 0x02)
- pci_write_config_byte(dev, regfast, drive_fast & ~0x20);
-#else
if (drive_fast & 0x80)
pci_write_config_byte(dev, regfast, drive_fast & ~0x80);
-#endif
- reg2 = pci_bus_clock_list(speed,
- (struct chipset_bus_clock_list_entry *) pci_get_drvdata(dev));
+ reg2 = pci_bus_clock_list(speed, info->speed);
+
/*
* Disable on-chip PIO FIFO/buffer
* (to avoid problems handling I/O errors later)
@@ -665,10 +648,11 @@ static int hpt36x_tune_chipset(ide_drive_t *drive, u8 xferspeed)
static int hpt370_tune_chipset(ide_drive_t *drive, u8 xferspeed)
{
- struct pci_dev *dev = HWIF(drive)->pci_dev;
+ ide_hwif_t *hwif = drive->hwif;
+ struct pci_dev *dev = hwif->pci_dev;
+ struct hpt_info *info = ide_get_hwifdata(hwif);
u8 speed = hpt3xx_ratefilter(drive, xferspeed);
-// u8 speed = ide_rate_filter(hpt3xx_ratemask(drive), xferspeed);
- u8 regfast = (HWIF(drive)->channel) ? 0x55 : 0x51;
+ u8 regfast = (drive->hwif->channel) ? 0x55 : 0x51;
u8 drive_pci = 0x40 + (drive->dn * 4);
u8 new_fast = 0, drive_fast = 0;
u32 list_conf = 0, drive_conf = 0;
@@ -693,17 +677,13 @@ static int hpt370_tune_chipset(ide_drive_t *drive, u8 xferspeed)
if (new_fast != drive_fast)
pci_write_config_byte(dev, regfast, new_fast);
- list_conf = pci_bus_clock_list(speed,
- (struct chipset_bus_clock_list_entry *)
- pci_get_drvdata(dev));
+ list_conf = pci_bus_clock_list(speed, info->speed);
pci_read_config_dword(dev, drive_pci, &drive_conf);
list_conf = (list_conf & ~conf_mask) | (drive_conf & conf_mask);
- if (speed < XFER_MW_DMA_0) {
+ if (speed < XFER_MW_DMA_0)
list_conf &= ~0x80000000; /* Disable on-chip PIO FIFO/buffer */
- }
-
pci_write_config_dword(dev, drive_pci, list_conf);
return ide_config_drive_speed(drive, speed);
@@ -711,10 +691,11 @@ static int hpt370_tune_chipset(ide_drive_t *drive, u8 xferspeed)
static int hpt372_tune_chipset(ide_drive_t *drive, u8 xferspeed)
{
- struct pci_dev *dev = HWIF(drive)->pci_dev;
+ ide_hwif_t *hwif = drive->hwif;
+ struct pci_dev *dev = hwif->pci_dev;
+ struct hpt_info *info = ide_get_hwifdata(hwif);
u8 speed = hpt3xx_ratefilter(drive, xferspeed);
-// u8 speed = ide_rate_filter(hpt3xx_ratemask(drive), xferspeed);
- u8 regfast = (HWIF(drive)->channel) ? 0x55 : 0x51;
+ u8 regfast = (drive->hwif->channel) ? 0x55 : 0x51;
u8 drive_fast = 0, drive_pci = 0x40 + (drive->dn * 4);
u32 list_conf = 0, drive_conf = 0;
u32 conf_mask = (speed >= XFER_MW_DMA_0) ? 0xc0000000 : 0x30070000;
@@ -726,10 +707,8 @@ static int hpt372_tune_chipset(ide_drive_t *drive, u8 xferspeed)
pci_read_config_byte(dev, regfast, &drive_fast);
drive_fast &= ~0x07;
pci_write_config_byte(dev, regfast, drive_fast);
-
- list_conf = pci_bus_clock_list(speed,
- (struct chipset_bus_clock_list_entry *)
- pci_get_drvdata(dev));
+
+ list_conf = pci_bus_clock_list(speed, info->speed);
pci_read_config_dword(dev, drive_pci, &drive_conf);
list_conf = (list_conf & ~conf_mask) | (drive_conf & conf_mask);
if (speed < XFER_MW_DMA_0)
@@ -741,19 +720,14 @@ static int hpt372_tune_chipset(ide_drive_t *drive, u8 xferspeed)
static int hpt3xx_tune_chipset (ide_drive_t *drive, u8 speed)
{
- struct pci_dev *dev = HWIF(drive)->pci_dev;
+ ide_hwif_t *hwif = drive->hwif;
+ struct hpt_info *info = ide_get_hwifdata(hwif);
- if (hpt_minimum_revision(dev, 8))
+ if (info->revision >= 8)
return hpt372_tune_chipset(drive, speed); /* not a typo */
-#if 0
- else if (hpt_minimum_revision(dev, 7))
- hpt371_tune_chipset(drive, speed);
- else if (hpt_minimum_revision(dev, 6))
- hpt302_tune_chipset(drive, speed);
-#endif
- else if (hpt_minimum_revision(dev, 5))
+ else if (info->revision >= 5)
return hpt372_tune_chipset(drive, speed);
- else if (hpt_minimum_revision(dev, 3))
+ else if (info->revision >= 3)
return hpt370_tune_chipset(drive, speed);
else /* hpt368: hpt_minimum_revision(dev, 2) */
return hpt36x_tune_chipset(drive, speed);
@@ -779,8 +753,14 @@ static void hpt3xx_tune_drive (ide_drive_t *drive, u8 pio)
static int config_chipset_for_dma (ide_drive_t *drive)
{
u8 speed = ide_dma_speed(drive, hpt3xx_ratemask(drive));
+ ide_hwif_t *hwif = drive->hwif;
+ struct hpt_info *info = ide_get_hwifdata(hwif);
- if (!(speed))
+ if (!speed)
+ return 0;
+
+ /* If we don't have any timings we can't do a lot */
+ if (info->speed == NULL)
return 0;
(void) hpt3xx_tune_chipset(drive, speed);
@@ -794,7 +774,7 @@ static int hpt3xx_quirkproc (ide_drive_t *drive)
static void hpt3xx_intrproc (ide_drive_t *drive)
{
- ide_hwif_t *hwif = HWIF(drive);
+ ide_hwif_t *hwif = drive->hwif;
if (drive->quirk_list)
return;
@@ -804,24 +784,26 @@ static void hpt3xx_intrproc (ide_drive_t *drive)
static void hpt3xx_maskproc (ide_drive_t *drive, int mask)
{
- struct pci_dev *dev = HWIF(drive)->pci_dev;
+ ide_hwif_t *hwif = drive->hwif;
+ struct hpt_info *info = ide_get_hwifdata(hwif);
+ struct pci_dev *dev = hwif->pci_dev;
if (drive->quirk_list) {
- if (hpt_minimum_revision(dev,3)) {
+ if (info->revision >= 3) {
u8 reg5a = 0;
pci_read_config_byte(dev, 0x5a, &reg5a);
if (((reg5a & 0x10) >> 4) != mask)
pci_write_config_byte(dev, 0x5a, mask ? (reg5a | 0x10) : (reg5a & ~0x10));
} else {
if (mask) {
- disable_irq(HWIF(drive)->irq);
+ disable_irq(hwif->irq);
} else {
- enable_irq(HWIF(drive)->irq);
+ enable_irq(hwif->irq);
}
}
} else {
if (IDE_CONTROL_REG)
- HWIF(drive)->OUTB(mask ? (drive->ctl | 2) :
+ hwif->OUTB(mask ? (drive->ctl | 2) :
(drive->ctl & ~2),
IDE_CONTROL_REG);
}
@@ -829,12 +811,12 @@ static void hpt3xx_maskproc (ide_drive_t *drive, int mask)
static int hpt366_config_drive_xfer_rate (ide_drive_t *drive)
{
- ide_hwif_t *hwif = HWIF(drive);
+ ide_hwif_t *hwif = drive->hwif;
struct hd_driveid *id = drive->id;
drive->init_speed = 0;
- if (id && (id->capability & 1) && drive->autodma) {
+ if ((id->capability & 1) && drive->autodma) {
if (ide_use_dma(drive)) {
if (config_chipset_for_dma(drive))
@@ -868,15 +850,6 @@ static int hpt366_ide_dma_lostirq (ide_drive_t *drive)
drive->name, __FUNCTION__, reg50h, reg52h, reg5ah);
if (reg5ah & 0x10)
pci_write_config_byte(dev, 0x5a, reg5ah & ~0x10);
-#if 0
- /* how about we flush and reset, mmmkay? */
- pci_write_config_byte(dev, 0x51, 0x1F);
- /* fall through to a reset */
- case dma_start:
- case ide_dma_end:
- /* reset the chips state over and over.. */
- pci_write_config_byte(dev, 0x51, 0x13);
-#endif
return __ide_dma_lostirq(drive);
}
@@ -919,7 +892,7 @@ static void hpt370_lostirq_timeout (ide_drive_t *drive)
u8 dma_stat = 0, dma_cmd = 0;
pci_read_config_byte(HWIF(drive)->pci_dev, reginfo, &bfifo);
- printk("%s: %d bytes in FIFO\n", drive->name, bfifo);
+ printk(KERN_DEBUG "%s: %d bytes in FIFO\n", drive->name, bfifo);
hpt370_clear_engine(drive);
/* get dma command mode */
dma_cmd = hwif->INB(hwif->dma_command);
@@ -1047,15 +1020,6 @@ static void hpt372n_rw_disk(ide_drive_t *drive, struct request *rq)
static void hpt3xx_reset (ide_drive_t *drive)
{
-#if 0
- unsigned long high_16 = pci_resource_start(HWIF(drive)->pci_dev, 4);
- u8 reset = (HWIF(drive)->channel) ? 0x80 : 0x40;
- u8 reg59h = 0;
-
- pci_read_config_byte(HWIF(drive)->pci_dev, 0x59, &reg59h);
- pci_write_config_byte(HWIF(drive)->pci_dev, 0x59, reg59h|reset);
- pci_write_config_byte(HWIF(drive)->pci_dev, 0x59, reg59h);
-#endif
}
static int hpt3xx_tristate (ide_drive_t * drive, int state)
@@ -1065,8 +1029,6 @@ static int hpt3xx_tristate (ide_drive_t * drive, int state)
u8 reg59h = 0, reset = (hwif->channel) ? 0x80 : 0x40;
u8 regXXh = 0, state_reg= (hwif->channel) ? 0x57 : 0x53;
-// hwif->bus_state = state;
-
pci_read_config_byte(dev, 0x59, &reg59h);
pci_read_config_byte(dev, state_reg, &regXXh);
@@ -1093,7 +1055,7 @@ static int hpt3xx_tristate (ide_drive_t * drive, int state)
#define TRISTATE_BIT 0x8000
static int hpt370_busproc(ide_drive_t * drive, int state)
{
- ide_hwif_t *hwif = HWIF(drive);
+ ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = hwif->pci_dev;
u8 tristate = 0, resetmask = 0, bus_reg = 0;
u16 tri_reg;
@@ -1148,33 +1110,44 @@ static int hpt370_busproc(ide_drive_t * drive, int state)
return 0;
}
-static int __devinit init_hpt37x(struct pci_dev *dev)
+static void __devinit hpt366_clocking(ide_hwif_t *hwif)
{
+ u32 reg1 = 0;
+ struct hpt_info *info = ide_get_hwifdata(hwif);
+
+ pci_read_config_dword(hwif->pci_dev, 0x40, &reg1);
+
+ /* detect bus speed by looking at control reg timing: */
+ switch((reg1 >> 8) & 7) {
+ case 5:
+ info->speed = forty_base_hpt366;
+ break;
+ case 9:
+ info->speed = twenty_five_base_hpt366;
+ break;
+ case 7:
+ default:
+ info->speed = thirty_three_base_hpt366;
+ break;
+ }
+}
+
+static void __devinit hpt37x_clocking(ide_hwif_t *hwif)
+{
+ struct hpt_info *info = ide_get_hwifdata(hwif);
+ struct pci_dev *dev = hwif->pci_dev;
int adjust, i;
u16 freq;
u32 pll;
u8 reg5bh;
- u8 reg5ah = 0;
- unsigned long dmabase = pci_resource_start(dev, 4);
- u8 did, rid;
- int is_372n = 0;
- pci_read_config_byte(dev, 0x5a, &reg5ah);
- /* interrupt force enable */
- pci_write_config_byte(dev, 0x5a, (reg5ah & ~0x10));
-
- if(dmabase)
- {
- did = inb(dmabase + 0x22);
- rid = inb(dmabase + 0x28);
-
- if((did == 4 && rid == 6) || (did == 5 && rid > 1))
- is_372n = 1;
- }
-
/*
* default to pci clock. make sure MA15/16 are set to output
- * to prevent drives having problems with 40-pin cables.
+ * to prevent drives having problems with 40-pin cables. Needed
+ * for some drives such as IBM-DTLA which will not enter ready
+ * state on reset when PDIAG is a input.
+ *
+ * ToDo: should we set 0x21 when using PLL mode ?
*/
pci_write_config_byte(dev, 0x5b, 0x23);
@@ -1197,9 +1170,7 @@ static int __devinit init_hpt37x(struct pci_dev *dev)
* Currently we always set up the PLL for the 372N
*/
- pci_set_drvdata(dev, NULL);
-
- if(is_372n)
+ if(info->flags & IS_372N)
{
printk(KERN_INFO "hpt: HPT372N detected, using 372N timing.\n");
if(freq < 0x55)
@@ -1227,39 +1198,38 @@ static int __devinit init_hpt37x(struct pci_dev *dev)
pll = F_LOW_PCI_66;
if (pll == F_LOW_PCI_33) {
- if (hpt_minimum_revision(dev,8))
- pci_set_drvdata(dev, (void *) thirty_three_base_hpt374);
- else if (hpt_minimum_revision(dev,5))
- pci_set_drvdata(dev, (void *) thirty_three_base_hpt372);
- else if (hpt_minimum_revision(dev,4))
- pci_set_drvdata(dev, (void *) thirty_three_base_hpt370a);
+ if (info->revision >= 8)
+ info->speed = thirty_three_base_hpt374;
+ else if (info->revision >= 5)
+ info->speed = thirty_three_base_hpt372;
+ else if (info->revision >= 4)
+ info->speed = thirty_three_base_hpt370a;
else
- pci_set_drvdata(dev, (void *) thirty_three_base_hpt370);
- printk("HPT37X: using 33MHz PCI clock\n");
+ info->speed = thirty_three_base_hpt370;
+ printk(KERN_DEBUG "HPT37X: using 33MHz PCI clock\n");
} else if (pll == F_LOW_PCI_40) {
/* Unsupported */
} else if (pll == F_LOW_PCI_50) {
- if (hpt_minimum_revision(dev,8))
- pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
- else if (hpt_minimum_revision(dev,5))
- pci_set_drvdata(dev, (void *) fifty_base_hpt372);
- else if (hpt_minimum_revision(dev,4))
- pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
+ if (info->revision >= 8)
+ info->speed = fifty_base_hpt370a;
+ else if (info->revision >= 5)
+ info->speed = fifty_base_hpt372;
+ else if (info->revision >= 4)
+ info->speed = fifty_base_hpt370a;
else
- pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
- printk("HPT37X: using 50MHz PCI clock\n");
+ info->speed = fifty_base_hpt370a;
+ printk(KERN_DEBUG "HPT37X: using 50MHz PCI clock\n");
} else {
- if (hpt_minimum_revision(dev,8))
- {
+ if (info->revision >= 8) {
printk(KERN_ERR "HPT37x: 66MHz timings are not supported.\n");
}
- else if (hpt_minimum_revision(dev,5))
- pci_set_drvdata(dev, (void *) sixty_six_base_hpt372);
- else if (hpt_minimum_revision(dev,4))
- pci_set_drvdata(dev, (void *) sixty_six_base_hpt370a);
+ else if (info->revision >= 5)
+ info->speed = sixty_six_base_hpt372;
+ else if (info->revision >= 4)
+ info->speed = sixty_six_base_hpt370a;
else
- pci_set_drvdata(dev, (void *) sixty_six_base_hpt370);
- printk("HPT37X: using 66MHz PCI clock\n");
+ info->speed = sixty_six_base_hpt370;
+ printk(KERN_DEBUG "HPT37X: using 66MHz PCI clock\n");
}
}
@@ -1269,11 +1239,19 @@ static int __devinit init_hpt37x(struct pci_dev *dev)
* result in slow reads when using a 33MHz PCI clock. we also
* don't like to use the PLL because it will cause glitches
* on PRST/SRST when the HPT state engine gets reset.
+ *
+ * ToDo: Use 66MHz PLL when ATA133 devices are present on a
+ * 372 device so we can get ATA133 support
*/
- if (pci_get_drvdata(dev))
+ if (info->speed)
goto init_hpt37X_done;
+
+ info->flags |= PLL_MODE;
/*
+ * FIXME: make this work correctly, esp with 372N as per
+ * reference driver code.
+ *
* adjust PLL based upon PCI clock, enable it, and wait for
* stabilization.
*/
@@ -1298,14 +1276,14 @@ static int __devinit init_hpt37x(struct pci_dev *dev)
pci_write_config_dword(dev, 0x5c,
pll & ~0x100);
pci_write_config_byte(dev, 0x5b, 0x21);
- if (hpt_minimum_revision(dev,8))
- pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
- else if (hpt_minimum_revision(dev,5))
- pci_set_drvdata(dev, (void *) fifty_base_hpt372);
- else if (hpt_minimum_revision(dev,4))
- pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
+ if (info->revision >= 8)
+ info->speed = fifty_base_hpt370a;
+ else if (info->revision >= 5)
+ info->speed = fifty_base_hpt372;
+ else if (info->revision >= 4)
+ info->speed = fifty_base_hpt370a;
else
- pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
+ info->speed = fifty_base_hpt370a;
printk("HPT37X: using 50MHz internal PLL\n");
goto init_hpt37X_done;
}
@@ -1318,10 +1296,22 @@ pll_recal:
}
init_hpt37X_done:
+ if (!info->speed)
+ printk(KERN_ERR "HPT37X%s: unknown bus timing [%d %d].\n",
+ (info->flags & IS_372N)?"N":"", pll, freq);
/* reset state engine */
pci_write_config_byte(dev, 0x50, 0x37);
pci_write_config_byte(dev, 0x54, 0x37);
udelay(100);
+}
+
+static int __devinit init_hpt37x(struct pci_dev *dev)
+{
+ u8 reg5ah;
+
+ pci_read_config_byte(dev, 0x5a, &reg5ah);
+ /* interrupt force enable */
+ pci_write_config_byte(dev, 0x5a, (reg5ah & ~0x10));
return 0;
}
@@ -1338,59 +1328,27 @@ static int __devinit init_hpt366(struct pci_dev *dev)
pci_write_config_byte(dev, 0x51, drive_fast & ~0x80);
pci_read_config_dword(dev, 0x40, &reg1);
- /* detect bus speed by looking at control reg timing: */
- switch((reg1 >> 8) & 7) {
- case 5:
- pci_set_drvdata(dev, (void *) forty_base_hpt366);
- break;
- case 9:
- pci_set_drvdata(dev, (void *) twenty_five_base_hpt366);
- break;
- case 7:
- default:
- pci_set_drvdata(dev, (void *) thirty_three_base_hpt366);
- break;
- }
-
- if (!pci_get_drvdata(dev))
- {
- printk(KERN_ERR "hpt366: unknown bus timing.\n");
- pci_set_drvdata(dev, NULL);
- }
return 0;
}
static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const char *name)
{
int ret = 0;
- u8 test = 0;
-
+ /* FIXME: Not portable */
if (dev->resource[PCI_ROM_RESOURCE].start)
pci_write_config_byte(dev, PCI_ROM_ADDRESS,
dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
- pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &test);
- if (test != (L1_CACHE_BYTES / 4))
- pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
- (L1_CACHE_BYTES / 4));
-
- pci_read_config_byte(dev, PCI_LATENCY_TIMER, &test);
- if (test != 0x78)
- pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
+ pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
+ pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
- pci_read_config_byte(dev, PCI_MIN_GNT, &test);
- if (test != 0x08)
- pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
-
- pci_read_config_byte(dev, PCI_MAX_LAT, &test);
- if (test != 0x08)
- pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
-
- if (hpt_minimum_revision(dev, 3)) {
+ if (hpt_revision(dev) >= 3)
ret = init_hpt37x(dev);
- } else {
- ret =init_hpt366(dev);
- }
+ else
+ ret = init_hpt366(dev);
+
if (ret)
return ret;
@@ -1400,27 +1358,16 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
{
struct pci_dev *dev = hwif->pci_dev;
+ struct hpt_info *info = ide_get_hwifdata(hwif);
u8 ata66 = 0, regmask = (hwif->channel) ? 0x01 : 0x02;
- u8 did, rid;
- unsigned long dmabase = hwif->dma_base;
- int is_372n = 0;
- if(dmabase)
- {
- did = inb(dmabase + 0x22);
- rid = inb(dmabase + 0x28);
-
- if((did == 4 && rid == 6) || (did == 5 && rid > 1))
- is_372n = 1;
- }
-
hwif->tuneproc = &hpt3xx_tune_drive;
hwif->speedproc = &hpt3xx_tune_chipset;
hwif->quirkproc = &hpt3xx_quirkproc;
hwif->intrproc = &hpt3xx_intrproc;
hwif->maskproc = &hpt3xx_maskproc;
- if(is_372n)
+ if(info->flags & IS_372N)
hwif->rw_disk = &hpt372n_rw_disk;
/*
@@ -1428,7 +1375,7 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
* address lines to access an external eeprom. To read valid
* cable detect state the pins must be enabled as inputs.
*/
- if (hpt_minimum_revision(dev, 8) && PCI_FUNC(dev->devfn) & 1) {
+ if (info->revision >= 8 && (PCI_FUNC(dev->devfn) & 1)) {
/*
* HPT374 PCI function 1
* - set bit 15 of reg 0x52 to enable TCBLID as input
@@ -1443,7 +1390,7 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
pci_read_config_byte(dev, 0x5a, &ata66);
pci_write_config_word(dev, 0x52, mcr3);
pci_write_config_word(dev, 0x56, mcr6);
- } else if (hpt_minimum_revision(dev, 3)) {
+ } else if (info->revision >= 3) {
/*
* HPT370/372 and 374 pcifn 0
* - clear bit 0 of 0x5b to enable P/SCBLID as inputs
@@ -1470,7 +1417,7 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
hwif->serialized = hwif->mate->serialized = 1;
#endif
- if (hpt_minimum_revision(dev,3)) {
+ if (info->revision >= 3) {
u8 reg5ah = 0;
pci_write_config_byte(dev, 0x5a, reg5ah & ~0x10);
/*
@@ -1480,8 +1427,7 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
*/
hwif->resetproc = &hpt3xx_reset;
hwif->busproc = &hpt370_busproc;
-// hwif->drives[0].autotune = hwif->drives[1].autotune = 1;
- } else if (hpt_minimum_revision(dev,2)) {
+ } else if (info->revision >= 2) {
hwif->resetproc = &hpt3xx_reset;
hwif->busproc = &hpt3xx_tristate;
} else {
@@ -1502,18 +1448,18 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
hwif->udma_four = ((ata66 & regmask) ? 0 : 1);
hwif->ide_dma_check = &hpt366_config_drive_xfer_rate;
- if (hpt_minimum_revision(dev,8)) {
+ if (info->revision >= 8) {
hwif->ide_dma_test_irq = &hpt374_ide_dma_test_irq;
hwif->ide_dma_end = &hpt374_ide_dma_end;
- } else if (hpt_minimum_revision(dev,5)) {
+ } else if (info->revision >= 5) {
hwif->ide_dma_test_irq = &hpt374_ide_dma_test_irq;
hwif->ide_dma_end = &hpt374_ide_dma_end;
- } else if (hpt_minimum_revision(dev,3)) {
+ } else if (info->revision >= 3) {
hwif->dma_start = &hpt370_ide_dma_start;
hwif->ide_dma_end = &hpt370_ide_dma_end;
hwif->ide_dma_timeout = &hpt370_ide_dma_timeout;
hwif->ide_dma_lostirq = &hpt370_ide_dma_lostirq;
- } else if (hpt_minimum_revision(dev,2))
+ } else if (info->revision >= 2)
hwif->ide_dma_lostirq = &hpt366_ide_dma_lostirq;
else
hwif->ide_dma_lostirq = &hpt366_ide_dma_lostirq;
@@ -1526,6 +1472,7 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
static void __devinit init_dma_hpt366(ide_hwif_t *hwif, unsigned long dmabase)
{
+ struct hpt_info *info = ide_get_hwifdata(hwif);
u8 masterdma = 0, slavedma = 0;
u8 dma_new = 0, dma_old = 0;
u8 primary = hwif->channel ? 0x4b : 0x43;
@@ -1535,8 +1482,7 @@ static void __devinit init_dma_hpt366(ide_hwif_t *hwif, unsigned long dmabase)
if (!dmabase)
return;
- if(pci_get_drvdata(hwif->pci_dev) == NULL)
- {
+ if(info->speed == NULL) {
printk(KERN_WARNING "hpt: no known IDE timings, disabling DMA.\n");
return;
}
@@ -1559,6 +1505,40 @@ static void __devinit init_dma_hpt366(ide_hwif_t *hwif, unsigned long dmabase)
ide_setup_dma(hwif, dmabase, 8);
}
+/*
+ * We "borrow" this hook in order to set the data structures
+ * up early enough before dma or init_hwif calls are made.
+ */
+
+static void __devinit init_iops_hpt366(ide_hwif_t *hwif)
+{
+ struct hpt_info *info = kmalloc(sizeof(struct hpt_info), GFP_KERNEL);
+ unsigned long dmabase = pci_resource_start(hwif->pci_dev, 4);
+ u8 did, rid;
+
+ if(info == NULL) {
+ printk(KERN_WARNING "hpt366: out of memory.\n");
+ return;
+ }
+ memset(info, 0, sizeof(struct hpt_info));
+ ide_set_hwifdata(hwif, info);
+
+ if(dmabase) {
+ did = inb(dmabase + 0x22);
+ rid = inb(dmabase + 0x28);
+
+ if((did == 4 && rid == 6) || (did == 5 && rid > 1))
+ info->flags |= IS_372N;
+ }
+
+ info->revision = hpt_revision(hwif->pci_dev);
+
+ if (info->revision >= 3)
+ hpt37x_clocking(hwif);
+ else
+ hpt366_clocking(hwif);
+}
+
static int __devinit init_setup_hpt374(struct pci_dev *dev, ide_pci_device_t *d)
{
struct pci_dev *findev = NULL;
@@ -1646,6 +1626,7 @@ static ide_pci_device_t hpt366_chipsets[] __devinitdata = {
.name = "HPT366",
.init_setup = init_setup_hpt366,
.init_chipset = init_chipset_hpt366,
+ .init_iops = init_iops_hpt366,
.init_hwif = init_hwif_hpt366,
.init_dma = init_dma_hpt366,
.channels = 2,
@@ -1656,6 +1637,7 @@ static ide_pci_device_t hpt366_chipsets[] __devinitdata = {
.name = "HPT372A",
.init_setup = init_setup_hpt37x,
.init_chipset = init_chipset_hpt366,
+ .init_iops = init_iops_hpt366,
.init_hwif = init_hwif_hpt366,
.init_dma = init_dma_hpt366,
.channels = 2,
@@ -1665,6 +1647,7 @@ static ide_pci_device_t hpt366_chipsets[] __devinitdata = {
.name = "HPT302",
.init_setup = init_setup_hpt37x,
.init_chipset = init_chipset_hpt366,
+ .init_iops = init_iops_hpt366,
.init_hwif = init_hwif_hpt366,
.init_dma = init_dma_hpt366,
.channels = 2,
@@ -1674,6 +1657,7 @@ static ide_pci_device_t hpt366_chipsets[] __devinitdata = {
.name = "HPT371",
.init_setup = init_setup_hpt37x,
.init_chipset = init_chipset_hpt366,
+ .init_iops = init_iops_hpt366,
.init_hwif = init_hwif_hpt366,
.init_dma = init_dma_hpt366,
.channels = 2,
@@ -1683,6 +1667,7 @@ static ide_pci_device_t hpt366_chipsets[] __devinitdata = {
.name = "HPT374",
.init_setup = init_setup_hpt374,
.init_chipset = init_chipset_hpt366,
+ .init_iops = init_iops_hpt366,
.init_hwif = init_hwif_hpt366,
.init_dma = init_dma_hpt366,
.channels = 2, /* 4 */
@@ -1692,6 +1677,7 @@ static ide_pci_device_t hpt366_chipsets[] __devinitdata = {
.name = "HPT372N",
.init_setup = init_setup_hpt37x,
.init_chipset = init_chipset_hpt366,
+ .init_iops = init_iops_hpt366,
.init_hwif = init_hwif_hpt366,
.init_dma = init_dma_hpt366,
.channels = 2, /* 4 */
diff --git a/drivers/ide/pci/it8172.c b/drivers/ide/pci/it8172.c
index 631927cf17d..93462926b9d 100644
--- a/drivers/ide/pci/it8172.c
+++ b/drivers/ide/pci/it8172.c
@@ -216,7 +216,7 @@ fast_ata_pio:
return 0;
}
-static unsigned int __init init_chipset_it8172 (struct pci_dev *dev, const char *name)
+static unsigned int __devinit init_chipset_it8172 (struct pci_dev *dev, const char *name)
{
unsigned char progif;
@@ -230,7 +230,7 @@ static unsigned int __init init_chipset_it8172 (struct pci_dev *dev, const char
}
-static void __init init_hwif_it8172 (ide_hwif_t *hwif)
+static void __devinit init_hwif_it8172 (ide_hwif_t *hwif)
{
struct pci_dev* dev = hwif->pci_dev;
unsigned long cmdBase, ctrlBase;
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
new file mode 100644
index 00000000000..e440036e651
--- /dev/null
+++ b/drivers/ide/pci/it821x.c
@@ -0,0 +1,812 @@
+
+/*
+ * linux/drivers/ide/pci/it821x.c Version 0.09 December 2004
+ *
+ * Copyright (C) 2004 Red Hat <alan@redhat.com>
+ *
+ * May be copied or modified under the terms of the GNU General Public License
+ * Based in part on the ITE vendor provided SCSI driver.
+ *
+ * Documentation available from
+ * http://www.ite.com.tw/pc/IT8212F_V04.pdf
+ * Some other documents are NDA.
+ *
+ * The ITE8212 isn't exactly a standard IDE controller. It has two
+ * modes. In pass through mode then it is an IDE controller. In its smart
+ * mode its actually quite a capable hardware raid controller disguised
+ * as an IDE controller. Smart mode only understands DMA read/write and
+ * identify, none of the fancier commands apply. The IT8211 is identical
+ * in other respects but lacks the raid mode.
+ *
+ * Errata:
+ * o Rev 0x10 also requires master/slave hold the same DMA timings and
+ * cannot do ATAPI MWDMA.
+ * o The identify data for raid volumes lacks CHS info (technically ok)
+ * but also fails to set the LBA28 and other bits. We fix these in
+ * the IDE probe quirk code.
+ * o If you write LBA48 sized I/O's (ie > 256 sector) in smart mode
+ * raid then the controller firmware dies
+ * o Smart mode without RAID doesn't clear all the necessary identify
+ * bits to reduce the command set to the one used
+ *
+ * This has a few impacts on the driver
+ * - In pass through mode we do all the work you would expect
+ * - In smart mode the clocking set up is done by the controller generally
+ * but we must watch the other limits and filter.
+ * - There are a few extra vendor commands that actually talk to the
+ * controller but only work PIO with no IRQ.
+ *
+ * Vendor areas of the identify block in smart mode are used for the
+ * timing and policy set up. Each HDD in raid mode also has a serial
+ * block on the disk. The hardware extra commands are get/set chip status,
+ * rebuild, get rebuild status.
+ *
+ * In Linux the driver supports pass through mode as if the device was
+ * just another IDE controller. If the smart mode is running then
+ * volumes are managed by the controller firmware and each IDE "disk"
+ * is a raid volume. Even more cute - the controller can do automated
+ * hotplug and rebuild.
+ *
+ * The pass through controller itself is a little demented. It has a
+ * flaw that it has a single set of PIO/MWDMA timings per channel so
+ * non UDMA devices restrict each others performance. It also has a
+ * single clock source per channel so mixed UDMA100/133 performance
+ * isn't perfect and we have to pick a clock. Thankfully none of this
+ * matters in smart mode. ATAPI DMA is not currently supported.
+ *
+ * It seems the smart mode is a win for RAID1/RAID10 but otherwise not.
+ *
+ * TODO
+ * - ATAPI UDMA is ok but not MWDMA it seems
+ * - RAID configuration ioctls
+ * - Move to libata once it grows up
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/hdreg.h>
+#include <linux/ide.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+
+struct it821x_dev
+{
+ unsigned int smart:1, /* Are we in smart raid mode */
+ timing10:1; /* Rev 0x10 */
+ u8 clock_mode; /* 0, ATA_50 or ATA_66 */
+ u8 want[2][2]; /* Mode/Pri log for master slave */
+ /* We need these for switching the clock when DMA goes on/off
+ The high byte is the 66Mhz timing */
+ u16 pio[2]; /* Cached PIO values */
+ u16 mwdma[2]; /* Cached MWDMA values */
+ u16 udma[2]; /* Cached UDMA values (per drive) */
+};
+
+#define ATA_66 0
+#define ATA_50 1
+#define ATA_ANY 2
+
+#define UDMA_OFF 0
+#define MWDMA_OFF 0
+
+/*
+ * We allow users to force the card into non raid mode without
+ * flashing the alternative BIOS. This is also neccessary right now
+ * for embedded platforms that cannot run a PC BIOS but are using this
+ * device.
+ */
+
+static int it8212_noraid;
+
+/**
+ * it821x_program - program the PIO/MWDMA registers
+ * @drive: drive to tune
+ *
+ * Program the PIO/MWDMA timing for this channel according to the
+ * current clock.
+ */
+
+static void it821x_program(ide_drive_t *drive, u16 timing)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct it821x_dev *itdev = ide_get_hwifdata(hwif);
+ int channel = hwif->channel;
+ u8 conf;
+
+ /* Program PIO/MWDMA timing bits */
+ if(itdev->clock_mode == ATA_66)
+ conf = timing >> 8;
+ else
+ conf = timing & 0xFF;
+ pci_write_config_byte(hwif->pci_dev, 0x54 + 4 * channel, conf);
+}
+
+/**
+ * it821x_program_udma - program the UDMA registers
+ * @drive: drive to tune
+ *
+ * Program the UDMA timing for this drive according to the
+ * current clock.
+ */
+
+static void it821x_program_udma(ide_drive_t *drive, u16 timing)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct it821x_dev *itdev = ide_get_hwifdata(hwif);
+ int channel = hwif->channel;
+ int unit = drive->select.b.unit;
+ u8 conf;
+
+ /* Program UDMA timing bits */
+ if(itdev->clock_mode == ATA_66)
+ conf = timing >> 8;
+ else
+ conf = timing & 0xFF;
+ if(itdev->timing10 == 0)
+ pci_write_config_byte(hwif->pci_dev, 0x56 + 4 * channel + unit, conf);
+ else {
+ pci_write_config_byte(hwif->pci_dev, 0x56 + 4 * channel, conf);
+ pci_write_config_byte(hwif->pci_dev, 0x56 + 4 * channel + 1, conf);
+ }
+}
+
+
+/**
+ * it821x_clock_strategy
+ * @hwif: hardware interface
+ *
+ * Select between the 50 and 66Mhz base clocks to get the best
+ * results for this interface.
+ */
+
+static void it821x_clock_strategy(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct it821x_dev *itdev = ide_get_hwifdata(hwif);
+
+ u8 unit = drive->select.b.unit;
+ ide_drive_t *pair = &hwif->drives[1-unit];
+
+ int clock, altclock;
+ u8 v;
+ int sel = 0;
+
+ if(itdev->want[0][0] > itdev->want[1][0]) {
+ clock = itdev->want[0][1];
+ altclock = itdev->want[1][1];
+ } else {
+ clock = itdev->want[1][1];
+ altclock = itdev->want[0][1];
+ }
+
+ /* Master doesn't care does the slave ? */
+ if(clock == ATA_ANY)
+ clock = altclock;
+
+ /* Nobody cares - keep the same clock */
+ if(clock == ATA_ANY)
+ return;
+ /* No change */
+ if(clock == itdev->clock_mode)
+ return;
+
+ /* Load this into the controller ? */
+ if(clock == ATA_66)
+ itdev->clock_mode = ATA_66;
+ else {
+ itdev->clock_mode = ATA_50;
+ sel = 1;
+ }
+ pci_read_config_byte(hwif->pci_dev, 0x50, &v);
+ v &= ~(1 << (1 + hwif->channel));
+ v |= sel << (1 + hwif->channel);
+ pci_write_config_byte(hwif->pci_dev, 0x50, v);
+
+ /*
+ * Reprogram the UDMA/PIO of the pair drive for the switch
+ * MWDMA will be dealt with by the dma switcher
+ */
+ if(pair && itdev->udma[1-unit] != UDMA_OFF) {
+ it821x_program_udma(pair, itdev->udma[1-unit]);
+ it821x_program(pair, itdev->pio[1-unit]);
+ }
+ /*
+ * Reprogram the UDMA/PIO of our drive for the switch.
+ * MWDMA will be dealt with by the dma switcher
+ */
+ if(itdev->udma[unit] != UDMA_OFF) {
+ it821x_program_udma(drive, itdev->udma[unit]);
+ it821x_program(drive, itdev->pio[unit]);
+ }
+}
+
+/**
+ * it821x_ratemask - Compute available modes
+ * @drive: IDE drive
+ *
+ * Compute the available speeds for the devices on the interface. This
+ * is all modes to ATA133 clipped by drive cable setup.
+ */
+
+static u8 it821x_ratemask (ide_drive_t *drive)
+{
+ u8 mode = 4;
+ if (!eighty_ninty_three(drive))
+ mode = min(mode, (u8)1);
+ return mode;
+}
+
+/**
+ * it821x_tuneproc - tune a drive
+ * @drive: drive to tune
+ * @mode_wanted: the target operating mode
+ *
+ * Load the timing settings for this device mode into the
+ * controller. By the time we are called the mode has been
+ * modified as neccessary to handle the absence of seperate
+ * master/slave timers for MWDMA/PIO.
+ *
+ * This code is only used in pass through mode.
+ */
+
+static void it821x_tuneproc (ide_drive_t *drive, byte mode_wanted)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct it821x_dev *itdev = ide_get_hwifdata(hwif);
+ int unit = drive->select.b.unit;
+
+ /* Spec says 89 ref driver uses 88 */
+ static u16 pio[] = { 0xAA88, 0xA382, 0xA181, 0x3332, 0x3121 };
+ static u8 pio_want[] = { ATA_66, ATA_66, ATA_66, ATA_66, ATA_ANY };
+
+ if(itdev->smart)
+ return;
+
+ /* We prefer 66Mhz clock for PIO 0-3, don't care for PIO4 */
+ itdev->want[unit][1] = pio_want[mode_wanted];
+ itdev->want[unit][0] = 1; /* PIO is lowest priority */
+ itdev->pio[unit] = pio[mode_wanted];
+ it821x_clock_strategy(drive);
+ it821x_program(drive, itdev->pio[unit]);
+}
+
+/**
+ * it821x_tune_mwdma - tune a channel for MWDMA
+ * @drive: drive to set up
+ * @mode_wanted: the target operating mode
+ *
+ * Load the timing settings for this device mode into the
+ * controller when doing MWDMA in pass through mode. The caller
+ * must manage the whole lack of per device MWDMA/PIO timings and
+ * the shared MWDMA/PIO timing register.
+ */
+
+static void it821x_tune_mwdma (ide_drive_t *drive, byte mode_wanted)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct it821x_dev *itdev = (void *)ide_get_hwifdata(hwif);
+ int unit = drive->select.b.unit;
+ int channel = hwif->channel;
+ u8 conf;
+
+ static u16 dma[] = { 0x8866, 0x3222, 0x3121 };
+ static u8 mwdma_want[] = { ATA_ANY, ATA_66, ATA_ANY };
+
+ itdev->want[unit][1] = mwdma_want[mode_wanted];
+ itdev->want[unit][0] = 2; /* MWDMA is low priority */
+ itdev->mwdma[unit] = dma[mode_wanted];
+ itdev->udma[unit] = UDMA_OFF;
+
+ /* UDMA bits off - Revision 0x10 do them in pairs */
+ pci_read_config_byte(hwif->pci_dev, 0x50, &conf);
+ if(itdev->timing10)
+ conf |= channel ? 0x60: 0x18;
+ else
+ conf |= 1 << (3 + 2 * channel + unit);
+ pci_write_config_byte(hwif->pci_dev, 0x50, conf);
+
+ it821x_clock_strategy(drive);
+ /* FIXME: do we need to program this ? */
+ /* it821x_program(drive, itdev->mwdma[unit]); */
+}
+
+/**
+ * it821x_tune_udma - tune a channel for UDMA
+ * @drive: drive to set up
+ * @mode_wanted: the target operating mode
+ *
+ * Load the timing settings for this device mode into the
+ * controller when doing UDMA modes in pass through.
+ */
+
+static void it821x_tune_udma (ide_drive_t *drive, byte mode_wanted)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct it821x_dev *itdev = ide_get_hwifdata(hwif);
+ int unit = drive->select.b.unit;
+ int channel = hwif->channel;
+ u8 conf;
+
+ static u16 udma[] = { 0x4433, 0x4231, 0x3121, 0x2121, 0x1111, 0x2211, 0x1111 };
+ static u8 udma_want[] = { ATA_ANY, ATA_50, ATA_ANY, ATA_66, ATA_66, ATA_50, ATA_66 };
+
+ itdev->want[unit][1] = udma_want[mode_wanted];
+ itdev->want[unit][0] = 3; /* UDMA is high priority */
+ itdev->mwdma[unit] = MWDMA_OFF;
+ itdev->udma[unit] = udma[mode_wanted];
+ if(mode_wanted >= 5)
+ itdev->udma[unit] |= 0x8080; /* UDMA 5/6 select on */
+
+ /* UDMA on. Again revision 0x10 must do the pair */
+ pci_read_config_byte(hwif->pci_dev, 0x50, &conf);
+ if(itdev->timing10)
+ conf &= channel ? 0x9F: 0xE7;
+ else
+ conf &= ~ (1 << (3 + 2 * channel + unit));
+ pci_write_config_byte(hwif->pci_dev, 0x50, conf);
+
+ it821x_clock_strategy(drive);
+ it821x_program_udma(drive, itdev->udma[unit]);
+
+}
+
+/**
+ * config_it821x_chipset_for_pio - set drive timings
+ * @drive: drive to tune
+ * @speed we want
+ *
+ * Compute the best pio mode we can for a given device. We must
+ * pick a speed that does not cause problems with the other device
+ * on the cable.
+ */
+
+static void config_it821x_chipset_for_pio (ide_drive_t *drive, byte set_speed)
+{
+ u8 unit = drive->select.b.unit;
+ ide_hwif_t *hwif = drive->hwif;
+ ide_drive_t *pair = &hwif->drives[1-unit];
+ u8 speed = 0, set_pio = ide_get_best_pio_mode(drive, 255, 5, NULL);
+ u8 pair_pio;
+
+ /* We have to deal with this mess in pairs */
+ if(pair != NULL) {
+ pair_pio = ide_get_best_pio_mode(pair, 255, 5, NULL);
+ /* Trim PIO to the slowest of the master/slave */
+ if(pair_pio < set_pio)
+ set_pio = pair_pio;
+ }
+ it821x_tuneproc(drive, set_pio);
+ speed = XFER_PIO_0 + set_pio;
+ /* XXX - We trim to the lowest of the pair so the other drive
+ will always be fine at this point until we do hotplug passthru */
+
+ if (set_speed)
+ (void) ide_config_drive_speed(drive, speed);
+}
+
+/**
+ * it821x_dma_read - DMA hook
+ * @drive: drive for DMA
+ *
+ * The IT821x has a single timing register for MWDMA and for PIO
+ * operations. As we flip back and forth we have to reload the
+ * clock. In addition the rev 0x10 device only works if the same
+ * timing value is loaded into the master and slave UDMA clock
+ * so we must also reload that.
+ *
+ * FIXME: we could figure out in advance if we need to do reloads
+ */
+
+static void it821x_dma_start(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct it821x_dev *itdev = ide_get_hwifdata(hwif);
+ int unit = drive->select.b.unit;
+ if(itdev->mwdma[unit] != MWDMA_OFF)
+ it821x_program(drive, itdev->mwdma[unit]);
+ else if(itdev->udma[unit] != UDMA_OFF && itdev->timing10)
+ it821x_program_udma(drive, itdev->udma[unit]);
+ ide_dma_start(drive);
+}
+
+/**
+ * it821x_dma_write - DMA hook
+ * @drive: drive for DMA stop
+ *
+ * The IT821x has a single timing register for MWDMA and for PIO
+ * operations. As we flip back and forth we have to reload the
+ * clock.
+ */
+
+static int it821x_dma_end(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ int unit = drive->select.b.unit;
+ struct it821x_dev *itdev = ide_get_hwifdata(hwif);
+ int ret = __ide_dma_end(drive);
+ if(itdev->mwdma[unit] != MWDMA_OFF)
+ it821x_program(drive, itdev->pio[unit]);
+ return ret;
+}
+
+
+/**
+ * it821x_tune_chipset - set controller timings
+ * @drive: Drive to set up
+ * @xferspeed: speed we want to achieve
+ *
+ * Tune the ITE chipset for the desired mode. If we can't achieve
+ * the desired mode then tune for a lower one, but ultimately
+ * make the thing work.
+ */
+
+static int it821x_tune_chipset (ide_drive_t *drive, byte xferspeed)
+{
+
+ ide_hwif_t *hwif = drive->hwif;
+ struct it821x_dev *itdev = ide_get_hwifdata(hwif);
+ u8 speed = ide_rate_filter(it821x_ratemask(drive), xferspeed);
+
+ if(!itdev->smart) {
+ switch(speed) {
+ case XFER_PIO_4:
+ case XFER_PIO_3:
+ case XFER_PIO_2:
+ case XFER_PIO_1:
+ case XFER_PIO_0:
+ it821x_tuneproc(drive, (speed - XFER_PIO_0));
+ break;
+ /* MWDMA tuning is really hard because our MWDMA and PIO
+ timings are kept in the same place. We can switch in the
+ host dma on/off callbacks */
+ case XFER_MW_DMA_2:
+ case XFER_MW_DMA_1:
+ case XFER_MW_DMA_0:
+ it821x_tune_mwdma(drive, (speed - XFER_MW_DMA_0));
+ break;
+ case XFER_UDMA_6:
+ case XFER_UDMA_5:
+ case XFER_UDMA_4:
+ case XFER_UDMA_3:
+ case XFER_UDMA_2:
+ case XFER_UDMA_1:
+ case XFER_UDMA_0:
+ it821x_tune_udma(drive, (speed - XFER_UDMA_0));
+ break;
+ default:
+ return 1;
+ }
+ }
+ /*
+ * In smart mode the clocking is done by the host controller
+ * snooping the mode we picked. The rest of it is not our problem
+ */
+ return ide_config_drive_speed(drive, speed);
+}
+
+/**
+ * config_chipset_for_dma - configure for DMA
+ * @drive: drive to configure
+ *
+ * Called by the IDE layer when it wants the timings set up.
+ */
+
+static int config_chipset_for_dma (ide_drive_t *drive)
+{
+ u8 speed = ide_dma_speed(drive, it821x_ratemask(drive));
+
+ config_it821x_chipset_for_pio(drive, !speed);
+ it821x_tune_chipset(drive, speed);
+ return ide_dma_enable(drive);
+}
+
+/**
+ * it821x_configure_drive_for_dma - set up for DMA transfers
+ * @drive: drive we are going to set up
+ *
+ * Set up the drive for DMA, tune the controller and drive as
+ * required. If the drive isn't suitable for DMA or we hit
+ * other problems then we will drop down to PIO and set up
+ * PIO appropriately
+ */
+
+static int it821x_config_drive_for_dma (ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ if (ide_use_dma(drive)) {
+ if (config_chipset_for_dma(drive))
+ return hwif->ide_dma_on(drive);
+ }
+ config_it821x_chipset_for_pio(drive, 1);
+ return hwif->ide_dma_off_quietly(drive);
+}
+
+/**
+ * ata66_it821x - check for 80 pin cable
+ * @hwif: interface to check
+ *
+ * Check for the presence of an ATA66 capable cable on the
+ * interface. Problematic as it seems some cards don't have
+ * the needed logic onboard.
+ */
+
+static unsigned int __devinit ata66_it821x(ide_hwif_t *hwif)
+{
+ /* The reference driver also only does disk side */
+ return 1;
+}
+
+/**
+ * it821x_fixup - post init callback
+ * @hwif: interface
+ *
+ * This callback is run after the drives have been probed but
+ * before anything gets attached. It allows drivers to do any
+ * final tuning that is needed, or fixups to work around bugs.
+ */
+
+static void __devinit it821x_fixups(ide_hwif_t *hwif)
+{
+ struct it821x_dev *itdev = ide_get_hwifdata(hwif);
+ int i;
+
+ if(!itdev->smart) {
+ /*
+ * If we are in pass through mode then not much
+ * needs to be done, but we do bother to clear the
+ * IRQ mask as we may well be in PIO (eg rev 0x10)
+ * for now and we know unmasking is safe on this chipset.
+ */
+ for (i = 0; i < 2; i++) {
+ ide_drive_t *drive = &hwif->drives[i];
+ if(drive->present)
+ drive->unmask = 1;
+ }
+ return;
+ }
+ /*
+ * Perform fixups on smart mode. We need to "lose" some
+ * capabilities the firmware lacks but does not filter, and
+ * also patch up some capability bits that it forgets to set
+ * in RAID mode.
+ */
+
+ for(i = 0; i < 2; i++) {
+ ide_drive_t *drive = &hwif->drives[i];
+ struct hd_driveid *id;
+ u16 *idbits;
+
+ if(!drive->present)
+ continue;
+ id = drive->id;
+ idbits = (u16 *)drive->id;
+
+ /* Check for RAID v native */
+ if(strstr(id->model, "Integrated Technology Express")) {
+ /* In raid mode the ident block is slightly buggy
+ We need to set the bits so that the IDE layer knows
+ LBA28. LBA48 and DMA ar valid */
+ id->capability |= 3; /* LBA28, DMA */
+ id->command_set_2 |= 0x0400; /* LBA48 valid */
+ id->cfs_enable_2 |= 0x0400; /* LBA48 on */
+ /* Reporting logic */
+ printk(KERN_INFO "%s: IT8212 %sRAID %d volume",
+ drive->name,
+ idbits[147] ? "Bootable ":"",
+ idbits[129]);
+ if(idbits[129] != 1)
+ printk("(%dK stripe)", idbits[146]);
+ printk(".\n");
+ /* Now the core code will have wrongly decided no DMA
+ so we need to fix this */
+ hwif->ide_dma_off_quietly(drive);
+#ifdef CONFIG_IDEDMA_ONLYDISK
+ if (drive->media == ide_disk)
+#endif
+ hwif->ide_dma_check(drive);
+ } else {
+ /* Non RAID volume. Fixups to stop the core code
+ doing unsupported things */
+ id->field_valid &= 1;
+ id->queue_depth = 0;
+ id->command_set_1 = 0;
+ id->command_set_2 &= 0xC400;
+ id->cfsse &= 0xC000;
+ id->cfs_enable_1 = 0;
+ id->cfs_enable_2 &= 0xC400;
+ id->csf_default &= 0xC000;
+ id->word127 = 0;
+ id->dlf = 0;
+ id->csfo = 0;
+ id->cfa_power = 0;
+ printk(KERN_INFO "%s: Performing identify fixups.\n",
+ drive->name);
+ }
+ }
+
+}
+
+/**
+ * init_hwif_it821x - set up hwif structs
+ * @hwif: interface to set up
+ *
+ * We do the basic set up of the interface structure. The IT8212
+ * requires several custom handlers so we override the default
+ * ide DMA handlers appropriately
+ */
+
+static void __devinit init_hwif_it821x(ide_hwif_t *hwif)
+{
+ struct it821x_dev *idev = kmalloc(sizeof(struct it821x_dev), GFP_KERNEL);
+ u8 conf;
+
+ if(idev == NULL) {
+ printk(KERN_ERR "it821x: out of memory, falling back to legacy behaviour.\n");
+ goto fallback;
+ }
+ memset(idev, 0, sizeof(struct it821x_dev));
+ ide_set_hwifdata(hwif, idev);
+
+ pci_read_config_byte(hwif->pci_dev, 0x50, &conf);
+ if(conf & 1) {
+ idev->smart = 1;
+ hwif->atapi_dma = 0;
+ /* Long I/O's although allowed in LBA48 space cause the
+ onboard firmware to enter the twighlight zone */
+ hwif->rqsize = 256;
+ }
+
+ /* Pull the current clocks from 0x50 also */
+ if (conf & (1 << (1 + hwif->channel)))
+ idev->clock_mode = ATA_50;
+ else
+ idev->clock_mode = ATA_66;
+
+ idev->want[0][1] = ATA_ANY;
+ idev->want[1][1] = ATA_ANY;
+
+ /*
+ * Not in the docs but according to the reference driver
+ * this is neccessary.
+ */
+
+ pci_read_config_byte(hwif->pci_dev, 0x08, &conf);
+ if(conf == 0x10) {
+ idev->timing10 = 1;
+ hwif->atapi_dma = 0;
+ if(!idev->smart)
+ printk(KERN_WARNING "it821x: Revision 0x10, workarounds activated.\n");
+ }
+
+ hwif->speedproc = &it821x_tune_chipset;
+ hwif->tuneproc = &it821x_tuneproc;
+
+ /* MWDMA/PIO clock switching for pass through mode */
+ if(!idev->smart) {
+ hwif->dma_start = &it821x_dma_start;
+ hwif->ide_dma_end = &it821x_dma_end;
+ }
+
+ hwif->drives[0].autotune = 1;
+ hwif->drives[1].autotune = 1;
+
+ if (!hwif->dma_base)
+ goto fallback;
+
+ hwif->ultra_mask = 0x7f;
+ hwif->mwdma_mask = 0x07;
+ hwif->swdma_mask = 0x07;
+
+ hwif->ide_dma_check = &it821x_config_drive_for_dma;
+ if (!(hwif->udma_four))
+ hwif->udma_four = ata66_it821x(hwif);
+
+ /*
+ * The BIOS often doesn't set up DMA on this controller
+ * so we always do it.
+ */
+
+ hwif->autodma = 1;
+ hwif->drives[0].autodma = hwif->autodma;
+ hwif->drives[1].autodma = hwif->autodma;
+ return;
+fallback:
+ hwif->autodma = 0;
+ return;
+}
+
+static void __devinit it8212_disable_raid(struct pci_dev *dev)
+{
+ /* Reset local CPU, and set BIOS not ready */
+ pci_write_config_byte(dev, 0x5E, 0x01);
+
+ /* Set to bypass mode, and reset PCI bus */
+ pci_write_config_byte(dev, 0x50, 0x00);
+ pci_write_config_word(dev, PCI_COMMAND,
+ PCI_COMMAND_PARITY | PCI_COMMAND_IO |
+ PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ pci_write_config_word(dev, 0x40, 0xA0F3);
+
+ pci_write_config_dword(dev,0x4C, 0x02040204);
+ pci_write_config_byte(dev, 0x42, 0x36);
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0);
+}
+
+static unsigned int __devinit init_chipset_it821x(struct pci_dev *dev, const char *name)
+{
+ u8 conf;
+ static char *mode[2] = { "pass through", "smart" };
+
+ /* Force the card into bypass mode if so requested */
+ if (it8212_noraid) {
+ printk(KERN_INFO "it8212: forcing bypass mode.\n");
+ it8212_disable_raid(dev);
+ }
+ pci_read_config_byte(dev, 0x50, &conf);
+ printk(KERN_INFO "it821x: controller in %s mode.\n", mode[conf & 1]);
+ return 0;
+}
+
+
+#define DECLARE_ITE_DEV(name_str) \
+ { \
+ .name = name_str, \
+ .init_chipset = init_chipset_it821x, \
+ .init_hwif = init_hwif_it821x, \
+ .channels = 2, \
+ .autodma = AUTODMA, \
+ .bootable = ON_BOARD, \
+ .fixup = it821x_fixups \
+ }
+
+static ide_pci_device_t it821x_chipsets[] __devinitdata = {
+ /* 0 */ DECLARE_ITE_DEV("IT8212"),
+};
+
+/**
+ * it821x_init_one - pci layer discovery entry
+ * @dev: PCI device
+ * @id: ident table entry
+ *
+ * Called by the PCI code when it finds an ITE821x controller.
+ * We then use the IDE PCI generic helper to do most of the work.
+ */
+
+static int __devinit it821x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ ide_setup_pci_device(dev, &it821x_chipsets[id->driver_data]);
+ return 0;
+}
+
+static struct pci_device_id it821x_pci_tbl[] = {
+ { PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8212, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { 0, },
+};
+
+MODULE_DEVICE_TABLE(pci, it821x_pci_tbl);
+
+static struct pci_driver driver = {
+ .name = "ITE821x IDE",
+ .id_table = it821x_pci_tbl,
+ .probe = it821x_init_one,
+};
+
+static int __init it821x_ide_init(void)
+{
+ return ide_pci_register_driver(&driver);
+}
+
+module_init(it821x_ide_init);
+
+module_param_named(noraid, it8212_noraid, int, S_IRUGO);
+MODULE_PARM_DESC(it8212_noraid, "Force card into bypass mode");
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("PCI driver module for the ITE 821x");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ide/pci/ns87415.c b/drivers/ide/pci/ns87415.c
index 205a32fbc2f..fcd5142f5cf 100644
--- a/drivers/ide/pci/ns87415.c
+++ b/drivers/ide/pci/ns87415.c
@@ -195,7 +195,7 @@ static int ns87415_ide_dma_check (ide_drive_t *drive)
return __ide_dma_check(drive);
}
-static void __init init_hwif_ns87415 (ide_hwif_t *hwif)
+static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
{
struct pci_dev *dev = hwif->pci_dev;
unsigned int ctrl, using_inta;
diff --git a/drivers/ide/pci/opti621.c b/drivers/ide/pci/opti621.c
index cf4fd91d396..7a7c2ef78ac 100644
--- a/drivers/ide/pci/opti621.c
+++ b/drivers/ide/pci/opti621.c
@@ -326,7 +326,7 @@ static void opti621_tune_drive (ide_drive_t *drive, u8 pio)
/*
* init_hwif_opti621() is called once for each hwif found at boot.
*/
-static void __init init_hwif_opti621 (ide_hwif_t *hwif)
+static void __devinit init_hwif_opti621 (ide_hwif_t *hwif)
{
hwif->autodma = 0;
hwif->drives[0].drive_data = PIO_DONT_KNOW;
diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/pci/sc1200.c
index 3bc3bf1be49..10592cec6c4 100644
--- a/drivers/ide/pci/sc1200.c
+++ b/drivers/ide/pci/sc1200.c
@@ -459,7 +459,7 @@ printk("%s: SC1200: resume\n", hwif->name);
* This gets invoked by the IDE driver once for each channel,
* and performs channel-specific pre-initialization before drive probing.
*/
-static void __init init_hwif_sc1200 (ide_hwif_t *hwif)
+static void __devinit init_hwif_sc1200 (ide_hwif_t *hwif)
{
if (hwif->mate)
hwif->serialized = hwif->mate->serialized = 1;
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index 82a1103b241..c6f5fa4b4ca 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -442,7 +442,7 @@ static unsigned int __devinit init_chipset_svwks (struct pci_dev *dev, const cha
return (dev->irq) ? dev->irq : 0;
}
-static unsigned int __init ata66_svwks_svwks (ide_hwif_t *hwif)
+static unsigned int __devinit ata66_svwks_svwks (ide_hwif_t *hwif)
{
return 1;
}
@@ -454,7 +454,7 @@ static unsigned int __init ata66_svwks_svwks (ide_hwif_t *hwif)
* Bit 14 clear = primary IDE channel does not have 80-pin cable.
* Bit 14 set = primary IDE channel has 80-pin cable.
*/
-static unsigned int __init ata66_svwks_dell (ide_hwif_t *hwif)
+static unsigned int __devinit ata66_svwks_dell (ide_hwif_t *hwif)
{
struct pci_dev *dev = hwif->pci_dev;
if (dev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
@@ -472,7 +472,7 @@ static unsigned int __init ata66_svwks_dell (ide_hwif_t *hwif)
*
* WARNING: this only works on Alpine hardware!
*/
-static unsigned int __init ata66_svwks_cobalt (ide_hwif_t *hwif)
+static unsigned int __devinit ata66_svwks_cobalt (ide_hwif_t *hwif)
{
struct pci_dev *dev = hwif->pci_dev;
if (dev->subsystem_vendor == PCI_VENDOR_ID_SUN &&
@@ -483,7 +483,7 @@ static unsigned int __init ata66_svwks_cobalt (ide_hwif_t *hwif)
return 0;
}
-static unsigned int __init ata66_svwks (ide_hwif_t *hwif)
+static unsigned int __devinit ata66_svwks (ide_hwif_t *hwif)
{
struct pci_dev *dev = hwif->pci_dev;
@@ -573,7 +573,7 @@ static int __devinit init_setup_svwks (struct pci_dev *dev, ide_pci_device_t *d)
return ide_setup_pci_device(dev, d);
}
-static int __init init_setup_csb6 (struct pci_dev *dev, ide_pci_device_t *d)
+static int __devinit init_setup_csb6 (struct pci_dev *dev, ide_pci_device_t *d)
{
if (!(PCI_FUNC(dev->devfn) & 1)) {
d->bootable = NEVER_BOARD;
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c
index 1d970a0de21..ea0806c82be 100644
--- a/drivers/ide/pci/sl82c105.c
+++ b/drivers/ide/pci/sl82c105.c
@@ -386,7 +386,7 @@ static unsigned int sl82c105_bridge_revision(struct pci_dev *dev)
* channel 0 here at least, but channel 1 has to be enabled by
* firmware or arch code. We still set both to 16 bits mode.
*/
-static unsigned int __init init_chipset_sl82c105(struct pci_dev *dev, const char *msg)
+static unsigned int __devinit init_chipset_sl82c105(struct pci_dev *dev, const char *msg)
{
u32 val;
@@ -399,7 +399,7 @@ static unsigned int __init init_chipset_sl82c105(struct pci_dev *dev, const char
return dev->irq;
}
-static void __init init_dma_sl82c105(ide_hwif_t *hwif, unsigned long dma_base)
+static void __devinit init_dma_sl82c105(ide_hwif_t *hwif, unsigned long dma_base)
{
unsigned int rev;
u8 dma_state;
@@ -431,7 +431,7 @@ static void __init init_dma_sl82c105(ide_hwif_t *hwif, unsigned long dma_base)
* Initialise the chip
*/
-static void __init init_hwif_sl82c105(ide_hwif_t *hwif)
+static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif)
{
struct pci_dev *dev = hwif->pci_dev;
u32 val;
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c
index 7fbf36342f7..5112c726633 100644
--- a/drivers/ide/pci/slc90e66.c
+++ b/drivers/ide/pci/slc90e66.c
@@ -196,7 +196,7 @@ fast_ata_pio:
}
#endif /* CONFIG_BLK_DEV_IDEDMA */
-static void __init init_hwif_slc90e66 (ide_hwif_t *hwif)
+static void __devinit init_hwif_slc90e66 (ide_hwif_t *hwif)
{
u8 reg47 = 0;
u8 mask = hwif->channel ? 0x01 : 0x02; /* bit0:Primary */
diff --git a/drivers/ide/pci/triflex.c b/drivers/ide/pci/triflex.c
index a1df2bfe363..f96b56838f3 100644
--- a/drivers/ide/pci/triflex.c
+++ b/drivers/ide/pci/triflex.c
@@ -130,7 +130,7 @@ static int triflex_config_drive_xfer_rate(ide_drive_t *drive)
return hwif->ide_dma_off_quietly(drive);
}
-static void __init init_hwif_triflex(ide_hwif_t *hwif)
+static void __devinit init_hwif_triflex(ide_hwif_t *hwif)
{
hwif->tuneproc = &triflex_tune_drive;
hwif->speedproc = &triflex_tune_chipset;
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index 069dbffe211..a4d099c937f 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -415,7 +415,7 @@ static int via82cxxx_ide_dma_check (ide_drive_t *drive)
* and initialize its drive independent registers.
*/
-static unsigned int __init init_chipset_via82cxxx(struct pci_dev *dev, const char *name)
+static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const char *name)
{
struct pci_dev *isa = NULL;
u8 t, v;
@@ -576,7 +576,7 @@ static unsigned int __init init_chipset_via82cxxx(struct pci_dev *dev, const cha
return 0;
}
-static void __init init_hwif_via82cxxx(ide_hwif_t *hwif)
+static void __devinit init_hwif_via82cxxx(ide_hwif_t *hwif)
{
int i;
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 569f1676744..818380b5fd2 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -1324,9 +1324,9 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
/* XXX FIXME: Media bay stuff need re-organizing */
if (np->parent && np->parent->name
&& strcasecmp(np->parent->name, "media-bay") == 0) {
-#ifdef CONFIG_PMAC_PBOOK
+#ifdef CONFIG_PMAC_MEDIABAY
media_bay_set_ide_infos(np->parent, pmif->regbase, pmif->irq, hwif->index);
-#endif /* CONFIG_PMAC_PBOOK */
+#endif /* CONFIG_PMAC_MEDIABAY */
pmif->mediabay = 1;
if (!bidp)
pmif->aapl_bus_id = 1;
@@ -1382,10 +1382,10 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
hwif->index, model_name[pmif->kind], pmif->aapl_bus_id,
pmif->mediabay ? " (mediabay)" : "", hwif->irq);
-#ifdef CONFIG_PMAC_PBOOK
+#ifdef CONFIG_PMAC_MEDIABAY
if (pmif->mediabay && check_media_bay_by_base(pmif->regbase, MB_CD) == 0)
hwif->noprobe = 0;
-#endif /* CONFIG_PMAC_PBOOK */
+#endif /* CONFIG_PMAC_MEDIABAY */
hwif->sg_max_nents = MAX_DCMDS;
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index e501675ad72..77da827b289 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -847,7 +847,7 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
d = list_entry(l, struct pci_driver, node);
if(d->id_table)
{
- const struct pci_device_id *id = pci_match_device(d->id_table, dev);
+ const struct pci_device_id *id = pci_match_id(d->id_table, dev);
if(id != NULL)
{
if(d->probe(dev, id) >= 0)
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 36e25ac823d..b3d3d22fde6 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -3538,8 +3538,8 @@ static void ohci1394_pci_remove(struct pci_dev *pdev)
static int ohci1394_pci_resume (struct pci_dev *pdev)
{
-#ifdef CONFIG_PMAC_PBOOK
- {
+#ifdef CONFIG_PPC_PMAC
+ if (_machine == _MACH_Pmac) {
struct device_node *of_node;
/* Re-enable 1394 */
@@ -3547,7 +3547,7 @@ static int ohci1394_pci_resume (struct pci_dev *pdev)
if (of_node)
pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
}
-#endif
+#endif /* CONFIG_PPC_PMAC */
pci_enable_device(pdev);
@@ -3557,8 +3557,8 @@ static int ohci1394_pci_resume (struct pci_dev *pdev)
static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
{
-#ifdef CONFIG_PMAC_PBOOK
- {
+#ifdef CONFIG_PPC_PMAC
+ if (_machine == _MACH_Pmac) {
struct device_node *of_node;
/* Disable 1394 */
diff --git a/drivers/infiniband/core/packer.c b/drivers/infiniband/core/packer.c
index 5f15feffeae..eb5ff54c10d 100644
--- a/drivers/infiniband/core/packer.c
+++ b/drivers/infiniband/core/packer.c
@@ -96,7 +96,7 @@ void ib_pack(const struct ib_field *desc,
else
val = 0;
- mask = cpu_to_be64(((1ull << desc[i].size_bits) - 1) << shift);
+ mask = cpu_to_be64((~0ull >> (64 - desc[i].size_bits)) << shift);
addr = (__be64 *) ((__be32 *) buf + desc[i].offset_words);
*addr = (*addr & ~mask) | (cpu_to_be64(val) & mask);
} else {
@@ -176,7 +176,7 @@ void ib_unpack(const struct ib_field *desc,
__be64 *addr;
shift = 64 - desc[i].offset_bits - desc[i].size_bits;
- mask = ((1ull << desc[i].size_bits) - 1) << shift;
+ mask = (~0ull >> (64 - desc[i].size_bits)) << shift;
addr = (__be64 *) buf + desc[i].offset_words;
val = (be64_to_cpup(addr) & mask) >> shift;
value_write(desc[i].struct_offset_bytes,
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 276e1a53010..5a08e81fa82 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -507,7 +507,13 @@ retry:
spin_unlock_irqrestore(&idr_lock, flags);
}
- return ret;
+ /*
+ * It's not safe to dereference query any more, because the
+ * send may already have completed and freed the query in
+ * another context. So use wr.wr_id, which has a copy of the
+ * query's id.
+ */
+ return ret ? ret : wr.wr_id;
}
static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
@@ -598,14 +604,15 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
rec, query->sa_query.mad->data);
*sa_query = &query->sa_query;
+
ret = send_mad(&query->sa_query, timeout_ms);
- if (ret) {
+ if (ret < 0) {
*sa_query = NULL;
kfree(query->sa_query.mad);
kfree(query);
}
- return ret ? ret : query->sa_query.id;
+ return ret;
}
EXPORT_SYMBOL(ib_sa_path_rec_get);
@@ -674,14 +681,15 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
rec, query->sa_query.mad->data);
*sa_query = &query->sa_query;
+
ret = send_mad(&query->sa_query, timeout_ms);
- if (ret) {
+ if (ret < 0) {
*sa_query = NULL;
kfree(query->sa_query.mad);
kfree(query);
}
- return ret ? ret : query->sa_query.id;
+ return ret;
}
EXPORT_SYMBOL(ib_sa_mcmember_rec_query);
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index 085baf393ca..d58dcbe6648 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index cd9ed958d92..1557a522d83 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -431,6 +431,36 @@ static int mthca_cmd_imm(struct mthca_dev *dev,
timeout, status);
}
+int mthca_cmd_init(struct mthca_dev *dev)
+{
+ sema_init(&dev->cmd.hcr_sem, 1);
+ sema_init(&dev->cmd.poll_sem, 1);
+ dev->cmd.use_events = 0;
+
+ dev->hcr = ioremap(pci_resource_start(dev->pdev, 0) + MTHCA_HCR_BASE,
+ MTHCA_HCR_SIZE);
+ if (!dev->hcr) {
+ mthca_err(dev, "Couldn't map command register.");
+ return -ENOMEM;
+ }
+
+ dev->cmd.pool = pci_pool_create("mthca_cmd", dev->pdev,
+ MTHCA_MAILBOX_SIZE,
+ MTHCA_MAILBOX_SIZE, 0);
+ if (!dev->cmd.pool) {
+ iounmap(dev->hcr);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void mthca_cmd_cleanup(struct mthca_dev *dev)
+{
+ pci_pool_destroy(dev->cmd.pool);
+ iounmap(dev->hcr);
+}
+
/*
* Switch to using events to issue FW commands (should be called after
* event queue to command events has been initialized).
@@ -489,6 +519,33 @@ void mthca_cmd_use_polling(struct mthca_dev *dev)
up(&dev->cmd.poll_sem);
}
+struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
+ unsigned int gfp_mask)
+{
+ struct mthca_mailbox *mailbox;
+
+ mailbox = kmalloc(sizeof *mailbox, gfp_mask);
+ if (!mailbox)
+ return ERR_PTR(-ENOMEM);
+
+ mailbox->buf = pci_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma);
+ if (!mailbox->buf) {
+ kfree(mailbox);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return mailbox;
+}
+
+void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)
+{
+ if (!mailbox)
+ return;
+
+ pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
+ kfree(mailbox);
+}
+
int mthca_SYS_EN(struct mthca_dev *dev, u8 *status)
{
u64 out;
@@ -513,20 +570,20 @@ int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status)
static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
u64 virt, u8 *status)
{
- u32 *inbox;
- dma_addr_t indma;
+ struct mthca_mailbox *mailbox;
struct mthca_icm_iter iter;
+ __be64 *pages;
int lg;
int nent = 0;
int i;
int err = 0;
int ts = 0, tc = 0;
- inbox = pci_alloc_consistent(dev->pdev, PAGE_SIZE, &indma);
- if (!inbox)
- return -ENOMEM;
-
- memset(inbox, 0, PAGE_SIZE);
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ memset(mailbox->buf, 0, MTHCA_MAILBOX_SIZE);
+ pages = mailbox->buf;
for (mthca_icm_first(icm, &iter);
!mthca_icm_last(&iter);
@@ -546,19 +603,17 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
}
for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i, ++nent) {
if (virt != -1) {
- *((__be64 *) (inbox + nent * 4)) =
- cpu_to_be64(virt);
+ pages[nent * 2] = cpu_to_be64(virt);
virt += 1 << lg;
}
- *((__be64 *) (inbox + nent * 4 + 2)) =
- cpu_to_be64((mthca_icm_addr(&iter) +
- (i << lg)) | (lg - 12));
+ pages[nent * 2 + 1] = cpu_to_be64((mthca_icm_addr(&iter) +
+ (i << lg)) | (lg - 12));
ts += 1 << (lg - 10);
++tc;
- if (nent == PAGE_SIZE / 16) {
- err = mthca_cmd(dev, indma, nent, 0, op,
+ if (nent == MTHCA_MAILBOX_SIZE / 16) {
+ err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
CMD_TIME_CLASS_B, status);
if (err || *status)
goto out;
@@ -568,7 +623,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
}
if (nent)
- err = mthca_cmd(dev, indma, nent, 0, op,
+ err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
CMD_TIME_CLASS_B, status);
switch (op) {
@@ -585,7 +640,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
}
out:
- pci_free_consistent(dev->pdev, PAGE_SIZE, inbox, indma);
+ mthca_free_mailbox(dev, mailbox);
return err;
}
@@ -606,8 +661,8 @@ int mthca_RUN_FW(struct mthca_dev *dev, u8 *status)
int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
{
+ struct mthca_mailbox *mailbox;
u32 *outbox;
- dma_addr_t outdma;
int err = 0;
u8 lg;
@@ -625,12 +680,12 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
#define QUERY_FW_EQ_ARM_BASE_OFFSET 0x40
#define QUERY_FW_EQ_SET_CI_BASE_OFFSET 0x48
- outbox = pci_alloc_consistent(dev->pdev, QUERY_FW_OUT_SIZE, &outdma);
- if (!outbox) {
- return -ENOMEM;
- }
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
- err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_FW,
+ err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW,
CMD_TIME_CLASS_A, status);
if (err)
@@ -681,15 +736,15 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
}
out:
- pci_free_consistent(dev->pdev, QUERY_FW_OUT_SIZE, outbox, outdma);
+ mthca_free_mailbox(dev, mailbox);
return err;
}
int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
{
+ struct mthca_mailbox *mailbox;
u8 info;
u32 *outbox;
- dma_addr_t outdma;
int err = 0;
#define ENABLE_LAM_OUT_SIZE 0x100
@@ -700,11 +755,12 @@ int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
#define ENABLE_LAM_INFO_HIDDEN_FLAG (1 << 4)
#define ENABLE_LAM_INFO_ECC_MASK 0x3
- outbox = pci_alloc_consistent(dev->pdev, ENABLE_LAM_OUT_SIZE, &outdma);
- if (!outbox)
- return -ENOMEM;
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
- err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_ENABLE_LAM,
+ err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM,
CMD_TIME_CLASS_C, status);
if (err)
@@ -733,7 +789,7 @@ int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
(unsigned long long) dev->ddr_end);
out:
- pci_free_consistent(dev->pdev, ENABLE_LAM_OUT_SIZE, outbox, outdma);
+ mthca_free_mailbox(dev, mailbox);
return err;
}
@@ -744,9 +800,9 @@ int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status)
int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
{
+ struct mthca_mailbox *mailbox;
u8 info;
u32 *outbox;
- dma_addr_t outdma;
int err = 0;
#define QUERY_DDR_OUT_SIZE 0x100
@@ -757,11 +813,12 @@ int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
#define QUERY_DDR_INFO_HIDDEN_FLAG (1 << 4)
#define QUERY_DDR_INFO_ECC_MASK 0x3
- outbox = pci_alloc_consistent(dev->pdev, QUERY_DDR_OUT_SIZE, &outdma);
- if (!outbox)
- return -ENOMEM;
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
- err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_DDR,
+ err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR,
CMD_TIME_CLASS_A, status);
if (err)
@@ -787,15 +844,15 @@ int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
(unsigned long long) dev->ddr_end);
out:
- pci_free_consistent(dev->pdev, QUERY_DDR_OUT_SIZE, outbox, outdma);
+ mthca_free_mailbox(dev, mailbox);
return err;
}
int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
struct mthca_dev_lim *dev_lim, u8 *status)
{
+ struct mthca_mailbox *mailbox;
u32 *outbox;
- dma_addr_t outdma;
u8 field;
u16 size;
int err;
@@ -860,11 +917,12 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
#define QUERY_DEV_LIM_LAMR_OFFSET 0x9f
#define QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET 0xa0
- outbox = pci_alloc_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, &outdma);
- if (!outbox)
- return -ENOMEM;
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
- err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_DEV_LIM,
+ err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM,
CMD_TIME_CLASS_A, status);
if (err)
@@ -1020,15 +1078,15 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
}
out:
- pci_free_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, outbox, outdma);
+ mthca_free_mailbox(dev, mailbox);
return err;
}
int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
struct mthca_adapter *adapter, u8 *status)
{
+ struct mthca_mailbox *mailbox;
u32 *outbox;
- dma_addr_t outdma;
int err;
#define QUERY_ADAPTER_OUT_SIZE 0x100
@@ -1037,23 +1095,24 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08
#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
- outbox = pci_alloc_consistent(dev->pdev, QUERY_ADAPTER_OUT_SIZE, &outdma);
- if (!outbox)
- return -ENOMEM;
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
- err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_ADAPTER,
+ err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER,
CMD_TIME_CLASS_A, status);
if (err)
goto out;
- MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET);
- MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET);
+ MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET);
+ MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET);
MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
- MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
+ MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
out:
- pci_free_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, outbox, outdma);
+ mthca_free_mailbox(dev, mailbox);
return err;
}
@@ -1061,8 +1120,8 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
struct mthca_init_hca_param *param,
u8 *status)
{
+ struct mthca_mailbox *mailbox;
u32 *inbox;
- dma_addr_t indma;
int err;
#define INIT_HCA_IN_SIZE 0x200
@@ -1102,9 +1161,10 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
#define INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10)
#define INIT_HCA_UAR_CTX_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x18)
- inbox = pci_alloc_consistent(dev->pdev, INIT_HCA_IN_SIZE, &indma);
- if (!inbox)
- return -ENOMEM;
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ inbox = mailbox->buf;
memset(inbox, 0, INIT_HCA_IN_SIZE);
@@ -1167,10 +1227,9 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET);
}
- err = mthca_cmd(dev, indma, 0, 0, CMD_INIT_HCA,
- HZ, status);
+ err = mthca_cmd(dev, mailbox->dma, 0, 0, CMD_INIT_HCA, HZ, status);
- pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma);
+ mthca_free_mailbox(dev, mailbox);
return err;
}
@@ -1178,8 +1237,8 @@ int mthca_INIT_IB(struct mthca_dev *dev,
struct mthca_init_ib_param *param,
int port, u8 *status)
{
+ struct mthca_mailbox *mailbox;
u32 *inbox;
- dma_addr_t indma;
int err;
u32 flags;
@@ -1199,9 +1258,10 @@ int mthca_INIT_IB(struct mthca_dev *dev,
#define INIT_IB_NODE_GUID_OFFSET 0x18
#define INIT_IB_SI_GUID_OFFSET 0x20
- inbox = pci_alloc_consistent(dev->pdev, INIT_IB_IN_SIZE, &indma);
- if (!inbox)
- return -ENOMEM;
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ inbox = mailbox->buf;
memset(inbox, 0, INIT_IB_IN_SIZE);
@@ -1221,10 +1281,10 @@ int mthca_INIT_IB(struct mthca_dev *dev,
MTHCA_PUT(inbox, param->node_guid, INIT_IB_NODE_GUID_OFFSET);
MTHCA_PUT(inbox, param->si_guid, INIT_IB_SI_GUID_OFFSET);
- err = mthca_cmd(dev, indma, port, 0, CMD_INIT_IB,
+ err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB,
CMD_TIME_CLASS_A, status);
- pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma);
+ mthca_free_mailbox(dev, mailbox);
return err;
}
@@ -1241,8 +1301,8 @@ int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status)
int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
int port, u8 *status)
{
+ struct mthca_mailbox *mailbox;
u32 *inbox;
- dma_addr_t indma;
int err;
u32 flags = 0;
@@ -1253,9 +1313,10 @@ int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
#define SET_IB_CAP_MASK_OFFSET 0x04
#define SET_IB_SI_GUID_OFFSET 0x08
- inbox = pci_alloc_consistent(dev->pdev, SET_IB_IN_SIZE, &indma);
- if (!inbox)
- return -ENOMEM;
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ inbox = mailbox->buf;
memset(inbox, 0, SET_IB_IN_SIZE);
@@ -1266,10 +1327,10 @@ int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
MTHCA_PUT(inbox, param->cap_mask, SET_IB_CAP_MASK_OFFSET);
MTHCA_PUT(inbox, param->si_guid, SET_IB_SI_GUID_OFFSET);
- err = mthca_cmd(dev, indma, port, 0, CMD_SET_IB,
+ err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB,
CMD_TIME_CLASS_B, status);
- pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma);
+ mthca_free_mailbox(dev, mailbox);
return err;
}
@@ -1280,20 +1341,22 @@ int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *st
int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status)
{
+ struct mthca_mailbox *mailbox;
u64 *inbox;
- dma_addr_t indma;
int err;
- inbox = pci_alloc_consistent(dev->pdev, 16, &indma);
- if (!inbox)
- return -ENOMEM;
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ inbox = mailbox->buf;
inbox[0] = cpu_to_be64(virt);
inbox[1] = cpu_to_be64(dma_addr);
- err = mthca_cmd(dev, indma, 1, 0, CMD_MAP_ICM, CMD_TIME_CLASS_B, status);
+ err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM,
+ CMD_TIME_CLASS_B, status);
- pci_free_consistent(dev->pdev, 16, inbox, indma);
+ mthca_free_mailbox(dev, mailbox);
if (!err)
mthca_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
@@ -1338,69 +1401,26 @@ int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
return 0;
}
-int mthca_SW2HW_MPT(struct mthca_dev *dev, void *mpt_entry,
+int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int mpt_index, u8 *status)
{
- dma_addr_t indma;
- int err;
-
- indma = pci_map_single(dev->pdev, mpt_entry,
- MTHCA_MPT_ENTRY_SIZE,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(indma))
- return -ENOMEM;
-
- err = mthca_cmd(dev, indma, mpt_index, 0, CMD_SW2HW_MPT,
- CMD_TIME_CLASS_B, status);
-
- pci_unmap_single(dev->pdev, indma,
- MTHCA_MPT_ENTRY_SIZE, PCI_DMA_TODEVICE);
- return err;
+ return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT,
+ CMD_TIME_CLASS_B, status);
}
-int mthca_HW2SW_MPT(struct mthca_dev *dev, void *mpt_entry,
+int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int mpt_index, u8 *status)
{
- dma_addr_t outdma = 0;
- int err;
-
- if (mpt_entry) {
- outdma = pci_map_single(dev->pdev, mpt_entry,
- MTHCA_MPT_ENTRY_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(outdma))
- return -ENOMEM;
- }
-
- err = mthca_cmd_box(dev, 0, outdma, mpt_index, !mpt_entry,
- CMD_HW2SW_MPT,
- CMD_TIME_CLASS_B, status);
-
- if (mpt_entry)
- pci_unmap_single(dev->pdev, outdma,
- MTHCA_MPT_ENTRY_SIZE,
- PCI_DMA_FROMDEVICE);
- return err;
+ return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
+ !mailbox, CMD_HW2SW_MPT,
+ CMD_TIME_CLASS_B, status);
}
-int mthca_WRITE_MTT(struct mthca_dev *dev, u64 *mtt_entry,
+int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int num_mtt, u8 *status)
{
- dma_addr_t indma;
- int err;
-
- indma = pci_map_single(dev->pdev, mtt_entry,
- (num_mtt + 2) * 8,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(indma))
- return -ENOMEM;
-
- err = mthca_cmd(dev, indma, num_mtt, 0, CMD_WRITE_MTT,
- CMD_TIME_CLASS_B, status);
-
- pci_unmap_single(dev->pdev, indma,
- (num_mtt + 2) * 8, PCI_DMA_TODEVICE);
- return err;
+ return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
+ CMD_TIME_CLASS_B, status);
}
int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status)
@@ -1418,92 +1438,38 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status);
}
-int mthca_SW2HW_EQ(struct mthca_dev *dev, void *eq_context,
+int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int eq_num, u8 *status)
{
- dma_addr_t indma;
- int err;
-
- indma = pci_map_single(dev->pdev, eq_context,
- MTHCA_EQ_CONTEXT_SIZE,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(indma))
- return -ENOMEM;
-
- err = mthca_cmd(dev, indma, eq_num, 0, CMD_SW2HW_EQ,
- CMD_TIME_CLASS_A, status);
-
- pci_unmap_single(dev->pdev, indma,
- MTHCA_EQ_CONTEXT_SIZE, PCI_DMA_TODEVICE);
- return err;
+ return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
+ CMD_TIME_CLASS_A, status);
}
-int mthca_HW2SW_EQ(struct mthca_dev *dev, void *eq_context,
+int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int eq_num, u8 *status)
{
- dma_addr_t outdma = 0;
- int err;
-
- outdma = pci_map_single(dev->pdev, eq_context,
- MTHCA_EQ_CONTEXT_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(outdma))
- return -ENOMEM;
-
- err = mthca_cmd_box(dev, 0, outdma, eq_num, 0,
- CMD_HW2SW_EQ,
- CMD_TIME_CLASS_A, status);
-
- pci_unmap_single(dev->pdev, outdma,
- MTHCA_EQ_CONTEXT_SIZE,
- PCI_DMA_FROMDEVICE);
- return err;
+ return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0,
+ CMD_HW2SW_EQ,
+ CMD_TIME_CLASS_A, status);
}
-int mthca_SW2HW_CQ(struct mthca_dev *dev, void *cq_context,
+int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int cq_num, u8 *status)
{
- dma_addr_t indma;
- int err;
-
- indma = pci_map_single(dev->pdev, cq_context,
- MTHCA_CQ_CONTEXT_SIZE,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(indma))
- return -ENOMEM;
-
- err = mthca_cmd(dev, indma, cq_num, 0, CMD_SW2HW_CQ,
+ return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ,
CMD_TIME_CLASS_A, status);
-
- pci_unmap_single(dev->pdev, indma,
- MTHCA_CQ_CONTEXT_SIZE, PCI_DMA_TODEVICE);
- return err;
}
-int mthca_HW2SW_CQ(struct mthca_dev *dev, void *cq_context,
+int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int cq_num, u8 *status)
{
- dma_addr_t outdma = 0;
- int err;
-
- outdma = pci_map_single(dev->pdev, cq_context,
- MTHCA_CQ_CONTEXT_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(outdma))
- return -ENOMEM;
-
- err = mthca_cmd_box(dev, 0, outdma, cq_num, 0,
- CMD_HW2SW_CQ,
- CMD_TIME_CLASS_A, status);
-
- pci_unmap_single(dev->pdev, outdma,
- MTHCA_CQ_CONTEXT_SIZE,
- PCI_DMA_FROMDEVICE);
- return err;
+ return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0,
+ CMD_HW2SW_CQ,
+ CMD_TIME_CLASS_A, status);
}
int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
- int is_ee, void *qp_context, u32 optmask,
+ int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
u8 *status)
{
static const u16 op[] = {
@@ -1520,36 +1486,34 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
[MTHCA_TRANS_ANY2RST] = CMD_ERR2RST_QPEE
};
u8 op_mod = 0;
-
- dma_addr_t indma;
+ int my_mailbox = 0;
int err;
if (trans < 0 || trans >= ARRAY_SIZE(op))
return -EINVAL;
if (trans == MTHCA_TRANS_ANY2RST) {
- indma = 0;
op_mod = 3; /* don't write outbox, any->reset */
/* For debugging */
- qp_context = pci_alloc_consistent(dev->pdev, MTHCA_QP_CONTEXT_SIZE,
- &indma);
- op_mod = 2; /* write outbox, any->reset */
+ if (!mailbox) {
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (!IS_ERR(mailbox)) {
+ my_mailbox = 1;
+ op_mod = 2; /* write outbox, any->reset */
+ } else
+ mailbox = NULL;
+ }
} else {
- indma = pci_map_single(dev->pdev, qp_context,
- MTHCA_QP_CONTEXT_SIZE,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(indma))
- return -ENOMEM;
-
if (0) {
int i;
mthca_dbg(dev, "Dumping QP context:\n");
- printk(" opt param mask: %08x\n", be32_to_cpup(qp_context));
+ printk(" opt param mask: %08x\n", be32_to_cpup(mailbox->buf));
for (i = 0; i < 0x100 / 4; ++i) {
if (i % 8 == 0)
printk(" [%02x] ", i * 4);
- printk(" %08x", be32_to_cpu(((u32 *) qp_context)[i + 2]));
+ printk(" %08x",
+ be32_to_cpu(((u32 *) mailbox->buf)[i + 2]));
if ((i + 1) % 8 == 0)
printk("\n");
}
@@ -1557,55 +1521,39 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
}
if (trans == MTHCA_TRANS_ANY2RST) {
- err = mthca_cmd_box(dev, 0, indma, (!!is_ee << 24) | num,
- op_mod, op[trans], CMD_TIME_CLASS_C, status);
+ err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
+ (!!is_ee << 24) | num, op_mod,
+ op[trans], CMD_TIME_CLASS_C, status);
- if (0) {
+ if (0 && mailbox) {
int i;
mthca_dbg(dev, "Dumping QP context:\n");
- printk(" %08x\n", be32_to_cpup(qp_context));
+ printk(" %08x\n", be32_to_cpup(mailbox->buf));
for (i = 0; i < 0x100 / 4; ++i) {
if (i % 8 == 0)
printk("[%02x] ", i * 4);
- printk(" %08x", be32_to_cpu(((u32 *) qp_context)[i + 2]));
+ printk(" %08x",
+ be32_to_cpu(((u32 *) mailbox->buf)[i + 2]));
if ((i + 1) % 8 == 0)
printk("\n");
}
}
} else
- err = mthca_cmd(dev, indma, (!!is_ee << 24) | num,
+ err = mthca_cmd(dev, mailbox->dma, (!!is_ee << 24) | num,
op_mod, op[trans], CMD_TIME_CLASS_C, status);
- if (trans != MTHCA_TRANS_ANY2RST)
- pci_unmap_single(dev->pdev, indma,
- MTHCA_QP_CONTEXT_SIZE, PCI_DMA_TODEVICE);
- else
- pci_free_consistent(dev->pdev, MTHCA_QP_CONTEXT_SIZE,
- qp_context, indma);
+ if (my_mailbox)
+ mthca_free_mailbox(dev, mailbox);
+
return err;
}
int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
- void *qp_context, u8 *status)
+ struct mthca_mailbox *mailbox, u8 *status)
{
- dma_addr_t outdma = 0;
- int err;
-
- outdma = pci_map_single(dev->pdev, qp_context,
- MTHCA_QP_CONTEXT_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(outdma))
- return -ENOMEM;
-
- err = mthca_cmd_box(dev, 0, outdma, (!!is_ee << 24) | num, 0,
- CMD_QUERY_QPEE,
- CMD_TIME_CLASS_A, status);
-
- pci_unmap_single(dev->pdev, outdma,
- MTHCA_QP_CONTEXT_SIZE,
- PCI_DMA_FROMDEVICE);
- return err;
+ return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0,
+ CMD_QUERY_QPEE, CMD_TIME_CLASS_A, status);
}
int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
@@ -1635,11 +1583,11 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
}
int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
- int port, struct ib_wc* in_wc, struct ib_grh* in_grh,
+ int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
void *in_mad, void *response_mad, u8 *status)
{
- void *box;
- dma_addr_t dma;
+ struct mthca_mailbox *inmailbox, *outmailbox;
+ void *inbox;
int err;
u32 in_modifier = port;
u8 op_modifier = 0;
@@ -1653,11 +1601,18 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
#define MAD_IFC_PKEY_OFFSET 0x10e
#define MAD_IFC_GRH_OFFSET 0x140
- box = pci_alloc_consistent(dev->pdev, MAD_IFC_BOX_SIZE, &dma);
- if (!box)
- return -ENOMEM;
+ inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(inmailbox))
+ return PTR_ERR(inmailbox);
+ inbox = inmailbox->buf;
- memcpy(box, in_mad, 256);
+ outmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(outmailbox)) {
+ mthca_free_mailbox(dev, inmailbox);
+ return PTR_ERR(outmailbox);
+ }
+
+ memcpy(inbox, in_mad, 256);
/*
* Key check traps can't be generated unless we have in_wc to
@@ -1671,97 +1626,65 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
if (in_wc) {
u8 val;
- memset(box + 256, 0, 256);
+ memset(inbox + 256, 0, 256);
- MTHCA_PUT(box, in_wc->qp_num, MAD_IFC_MY_QPN_OFFSET);
- MTHCA_PUT(box, in_wc->src_qp, MAD_IFC_RQPN_OFFSET);
+ MTHCA_PUT(inbox, in_wc->qp_num, MAD_IFC_MY_QPN_OFFSET);
+ MTHCA_PUT(inbox, in_wc->src_qp, MAD_IFC_RQPN_OFFSET);
val = in_wc->sl << 4;
- MTHCA_PUT(box, val, MAD_IFC_SL_OFFSET);
+ MTHCA_PUT(inbox, val, MAD_IFC_SL_OFFSET);
val = in_wc->dlid_path_bits |
(in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
- MTHCA_PUT(box, val, MAD_IFC_GRH_OFFSET);
+ MTHCA_PUT(inbox, val, MAD_IFC_GRH_OFFSET);
- MTHCA_PUT(box, in_wc->slid, MAD_IFC_RLID_OFFSET);
- MTHCA_PUT(box, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET);
+ MTHCA_PUT(inbox, in_wc->slid, MAD_IFC_RLID_OFFSET);
+ MTHCA_PUT(inbox, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET);
if (in_grh)
- memcpy((u8 *) box + MAD_IFC_GRH_OFFSET, in_grh, 40);
+ memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40);
op_modifier |= 0x10;
in_modifier |= in_wc->slid << 16;
}
- err = mthca_cmd_box(dev, dma, dma + 512, in_modifier, op_modifier,
+ err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma,
+ in_modifier, op_modifier,
CMD_MAD_IFC, CMD_TIME_CLASS_C, status);
if (!err && !*status)
- memcpy(response_mad, box + 512, 256);
+ memcpy(response_mad, outmailbox->buf, 256);
- pci_free_consistent(dev->pdev, MAD_IFC_BOX_SIZE, box, dma);
+ mthca_free_mailbox(dev, inmailbox);
+ mthca_free_mailbox(dev, outmailbox);
return err;
}
-int mthca_READ_MGM(struct mthca_dev *dev, int index, void *mgm,
- u8 *status)
+int mthca_READ_MGM(struct mthca_dev *dev, int index,
+ struct mthca_mailbox *mailbox, u8 *status)
{
- dma_addr_t outdma = 0;
- int err;
-
- outdma = pci_map_single(dev->pdev, mgm,
- MTHCA_MGM_ENTRY_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(outdma))
- return -ENOMEM;
-
- err = mthca_cmd_box(dev, 0, outdma, index, 0,
- CMD_READ_MGM,
- CMD_TIME_CLASS_A, status);
-
- pci_unmap_single(dev->pdev, outdma,
- MTHCA_MGM_ENTRY_SIZE,
- PCI_DMA_FROMDEVICE);
- return err;
+ return mthca_cmd_box(dev, 0, mailbox->dma, index, 0,
+ CMD_READ_MGM, CMD_TIME_CLASS_A, status);
}
-int mthca_WRITE_MGM(struct mthca_dev *dev, int index, void *mgm,
- u8 *status)
+int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
+ struct mthca_mailbox *mailbox, u8 *status)
{
- dma_addr_t indma;
- int err;
-
- indma = pci_map_single(dev->pdev, mgm,
- MTHCA_MGM_ENTRY_SIZE,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(indma))
- return -ENOMEM;
-
- err = mthca_cmd(dev, indma, index, 0, CMD_WRITE_MGM,
- CMD_TIME_CLASS_A, status);
-
- pci_unmap_single(dev->pdev, indma,
- MTHCA_MGM_ENTRY_SIZE, PCI_DMA_TODEVICE);
- return err;
+ return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM,
+ CMD_TIME_CLASS_A, status);
}
-int mthca_MGID_HASH(struct mthca_dev *dev, void *gid, u16 *hash,
- u8 *status)
+int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+ u16 *hash, u8 *status)
{
- dma_addr_t indma;
u64 imm;
int err;
- indma = pci_map_single(dev->pdev, gid, 16, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(indma))
- return -ENOMEM;
-
- err = mthca_cmd_imm(dev, indma, &imm, 0, 0, CMD_MGID_HASH,
+ err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH,
CMD_TIME_CLASS_A, status);
- *hash = imm;
- pci_unmap_single(dev->pdev, indma, 16, PCI_DMA_TODEVICE);
+ *hash = imm;
return err;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h
index adf039b3c54..ed517f175dd 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.h
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.h
@@ -37,8 +37,7 @@
#include <ib_verbs.h>
-#define MTHCA_CMD_MAILBOX_ALIGN 16UL
-#define MTHCA_CMD_MAILBOX_EXTRA (MTHCA_CMD_MAILBOX_ALIGN - 1)
+#define MTHCA_MAILBOX_SIZE 4096
enum {
/* command completed successfully: */
@@ -112,6 +111,11 @@ enum {
DEV_LIM_FLAG_UD_MULTI = 1 << 21,
};
+struct mthca_mailbox {
+ dma_addr_t dma;
+ void *buf;
+};
+
struct mthca_dev_lim {
int max_srq_sz;
int max_qp_sz;
@@ -235,11 +239,17 @@ struct mthca_set_ib_param {
u32 cap_mask;
};
+int mthca_cmd_init(struct mthca_dev *dev);
+void mthca_cmd_cleanup(struct mthca_dev *dev);
int mthca_cmd_use_events(struct mthca_dev *dev);
void mthca_cmd_use_polling(struct mthca_dev *dev);
void mthca_cmd_event(struct mthca_dev *dev, u16 token,
u8 status, u64 out_param);
+struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
+ unsigned int gfp_mask);
+void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox);
+
int mthca_SYS_EN(struct mthca_dev *dev, u8 *status);
int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status);
int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status);
@@ -270,41 +280,39 @@ int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status);
int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status);
int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
u8 *status);
-int mthca_SW2HW_MPT(struct mthca_dev *dev, void *mpt_entry,
+int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int mpt_index, u8 *status);
-int mthca_HW2SW_MPT(struct mthca_dev *dev, void *mpt_entry,
+int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int mpt_index, u8 *status);
-int mthca_WRITE_MTT(struct mthca_dev *dev, u64 *mtt_entry,
+int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int num_mtt, u8 *status);
int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status);
int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
int eq_num, u8 *status);
-int mthca_SW2HW_EQ(struct mthca_dev *dev, void *eq_context,
+int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int eq_num, u8 *status);
-int mthca_HW2SW_EQ(struct mthca_dev *dev, void *eq_context,
+int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int eq_num, u8 *status);
-int mthca_SW2HW_CQ(struct mthca_dev *dev, void *cq_context,
+int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int cq_num, u8 *status);
-int mthca_HW2SW_CQ(struct mthca_dev *dev, void *cq_context,
+int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int cq_num, u8 *status);
int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
- int is_ee, void *qp_context, u32 optmask,
+ int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
u8 *status);
int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
- void *qp_context, u8 *status);
+ struct mthca_mailbox *mailbox, u8 *status);
int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
u8 *status);
int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
- int port, struct ib_wc* in_wc, struct ib_grh* in_grh,
+ int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
void *in_mad, void *response_mad, u8 *status);
-int mthca_READ_MGM(struct mthca_dev *dev, int index, void *mgm,
- u8 *status);
-int mthca_WRITE_MGM(struct mthca_dev *dev, int index, void *mgm,
- u8 *status);
-int mthca_MGID_HASH(struct mthca_dev *dev, void *gid, u16 *hash,
- u8 *status);
+int mthca_READ_MGM(struct mthca_dev *dev, int index,
+ struct mthca_mailbox *mailbox, u8 *status);
+int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
+ struct mthca_mailbox *mailbox, u8 *status);
+int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+ u16 *hash, u8 *status);
int mthca_NOP(struct mthca_dev *dev, u8 *status);
-#define MAILBOX_ALIGN(x) ((void *) ALIGN((unsigned long) (x), MTHCA_CMD_MAILBOX_ALIGN))
-
#endif /* MTHCA_CMD_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 2bf347b84c3..766e9031ec4 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -171,6 +172,17 @@ static inline void set_cqe_hw(struct mthca_cqe *cqe)
cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW;
}
+static void dump_cqe(struct mthca_dev *dev, void *cqe_ptr)
+{
+ __be32 *cqe = cqe_ptr;
+
+ (void) cqe; /* avoid warning if mthca_dbg compiled away... */
+ mthca_dbg(dev, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]),
+ be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]),
+ be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7]));
+}
+
/*
* incr is ignored in native Arbel (mem-free) mode, so cq->cons_index
* should be correct before calling update_cons_index().
@@ -280,16 +292,12 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
int dbd;
u32 new_wqe;
- if (1 && cqe->syndrome != SYNDROME_WR_FLUSH_ERR) {
- int j;
-
- mthca_dbg(dev, "%x/%d: error CQE -> QPN %06x, WQE @ %08x\n",
- cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn),
- be32_to_cpu(cqe->wqe));
-
- for (j = 0; j < 8; ++j)
- printk(KERN_DEBUG " [%2x] %08x\n",
- j * 4, be32_to_cpu(((u32 *) cqe)[j]));
+ if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) {
+ mthca_dbg(dev, "local QP operation err "
+ "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n",
+ be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe),
+ cq->cqn, cq->cons_index);
+ dump_cqe(dev, cqe);
}
/*
@@ -377,15 +385,6 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
return 0;
}
-static void dump_cqe(struct mthca_cqe *cqe)
-{
- int j;
-
- for (j = 0; j < 8; ++j)
- printk(KERN_DEBUG " [%2x] %08x\n",
- j * 4, be32_to_cpu(((u32 *) cqe)[j]));
-}
-
static inline int mthca_poll_one(struct mthca_dev *dev,
struct mthca_cq *cq,
struct mthca_qp **cur_qp,
@@ -414,8 +413,7 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
mthca_dbg(dev, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n",
cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn),
be32_to_cpu(cqe->wqe));
-
- dump_cqe(cqe);
+ dump_cqe(dev, cqe);
}
is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
@@ -638,19 +636,19 @@ static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq)
int size;
if (cq->is_direct)
- pci_free_consistent(dev->pdev,
- (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
- cq->queue.direct.buf,
- pci_unmap_addr(&cq->queue.direct,
- mapping));
+ dma_free_coherent(&dev->pdev->dev,
+ (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
+ cq->queue.direct.buf,
+ pci_unmap_addr(&cq->queue.direct,
+ mapping));
else {
size = (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE;
for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
if (cq->queue.page_list[i].buf)
- pci_free_consistent(dev->pdev, PAGE_SIZE,
- cq->queue.page_list[i].buf,
- pci_unmap_addr(&cq->queue.page_list[i],
- mapping));
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ cq->queue.page_list[i].buf,
+ pci_unmap_addr(&cq->queue.page_list[i],
+ mapping));
kfree(cq->queue.page_list);
}
@@ -670,8 +668,8 @@ static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size,
npages = 1;
shift = get_order(size) + PAGE_SHIFT;
- cq->queue.direct.buf = pci_alloc_consistent(dev->pdev,
- size, &t);
+ cq->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev,
+ size, &t, GFP_KERNEL);
if (!cq->queue.direct.buf)
return -ENOMEM;
@@ -709,7 +707,8 @@ static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size,
for (i = 0; i < npages; ++i) {
cq->queue.page_list[i].buf =
- pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t);
+ dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
+ &t, GFP_KERNEL);
if (!cq->queue.page_list[i].buf)
goto err_free;
@@ -746,7 +745,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
struct mthca_cq *cq)
{
int size = nent * MTHCA_CQ_ENTRY_SIZE;
- void *mailbox = NULL;
+ struct mthca_mailbox *mailbox;
struct mthca_cq_context *cq_context;
int err = -ENOMEM;
u8 status;
@@ -780,12 +779,11 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
goto err_out_ci;
}
- mailbox = kmalloc(sizeof (struct mthca_cq_context) + MTHCA_CMD_MAILBOX_EXTRA,
- GFP_KERNEL);
- if (!mailbox)
- goto err_out_mailbox;
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ goto err_out_arm;
- cq_context = MAILBOX_ALIGN(mailbox);
+ cq_context = mailbox->buf;
err = mthca_alloc_cq_buf(dev, size, cq);
if (err)
@@ -816,7 +814,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
cq_context->state_db = cpu_to_be32(cq->arm_db_index);
}
- err = mthca_SW2HW_CQ(dev, cq_context, cq->cqn, &status);
+ err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status);
if (err) {
mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err);
goto err_out_free_mr;
@@ -840,7 +838,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
cq->cons_index = 0;
- kfree(mailbox);
+ mthca_free_mailbox(dev, mailbox);
return 0;
@@ -849,8 +847,9 @@ err_out_free_mr:
mthca_free_cq_buf(dev, cq);
err_out_mailbox:
- kfree(mailbox);
+ mthca_free_mailbox(dev, mailbox);
+err_out_arm:
if (mthca_is_memfree(dev))
mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
@@ -870,28 +869,26 @@ err_out:
void mthca_free_cq(struct mthca_dev *dev,
struct mthca_cq *cq)
{
- void *mailbox;
+ struct mthca_mailbox *mailbox;
int err;
u8 status;
might_sleep();
- mailbox = kmalloc(sizeof (struct mthca_cq_context) + MTHCA_CMD_MAILBOX_EXTRA,
- GFP_KERNEL);
- if (!mailbox) {
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox)) {
mthca_warn(dev, "No memory for mailbox to free CQ.\n");
return;
}
- err = mthca_HW2SW_CQ(dev, MAILBOX_ALIGN(mailbox), cq->cqn, &status);
+ err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status);
if (err)
mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err);
else if (status)
- mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n",
- status);
+ mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status);
if (0) {
- u32 *ctx = MAILBOX_ALIGN(mailbox);
+ u32 *ctx = mailbox->buf;
int j;
printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n",
@@ -919,11 +916,11 @@ void mthca_free_cq(struct mthca_dev *dev,
if (mthca_is_memfree(dev)) {
mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
- mthca_table_put(dev, dev->cq_table.table, cq->cqn);
}
+ mthca_table_put(dev, dev->cq_table.table, cq->cqn);
mthca_free(&dev->cq_table.alloc, cq->cqn);
- kfree(mailbox);
+ mthca_free_mailbox(dev, mailbox);
}
int __devinit mthca_init_cq_table(struct mthca_dev *dev)
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index e3d79e267dc..4127f09dc5e 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -46,8 +47,8 @@
#define DRV_NAME "ib_mthca"
#define PFX DRV_NAME ": "
-#define DRV_VERSION "0.06-pre"
-#define DRV_RELDATE "November 8, 2004"
+#define DRV_VERSION "0.06"
+#define DRV_RELDATE "June 23, 2005"
enum {
MTHCA_FLAG_DDR_HIDDEN = 1 << 1,
@@ -98,6 +99,7 @@ enum {
};
struct mthca_cmd {
+ struct pci_pool *pool;
int use_events;
struct semaphore hcr_sem;
struct semaphore poll_sem;
@@ -379,6 +381,12 @@ void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar);
int mthca_pd_alloc(struct mthca_dev *dev, struct mthca_pd *pd);
void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd);
+struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size);
+void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt);
+int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
+ int start_index, u64 *buffer_list, int list_len);
+int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
+ u64 iova, u64 total_size, u32 access, struct mthca_mr *mr);
int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
u32 access, struct mthca_mr *mr);
int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
diff --git a/drivers/infiniband/hw/mthca/mthca_doorbell.h b/drivers/infiniband/hw/mthca/mthca_doorbell.h
index 821039a4904..535fad7710f 100644
--- a/drivers/infiniband/hw/mthca/mthca_doorbell.h
+++ b/drivers/infiniband/hw/mthca/mthca_doorbell.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index f46d615d396..cbcf2b4722e 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -469,7 +469,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
PAGE_SIZE;
u64 *dma_list = NULL;
dma_addr_t t;
- void *mailbox = NULL;
+ struct mthca_mailbox *mailbox;
struct mthca_eq_context *eq_context;
int err = -ENOMEM;
int i;
@@ -494,17 +494,16 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
if (!dma_list)
goto err_out_free;
- mailbox = kmalloc(sizeof *eq_context + MTHCA_CMD_MAILBOX_EXTRA,
- GFP_KERNEL);
- if (!mailbox)
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
goto err_out_free;
- eq_context = MAILBOX_ALIGN(mailbox);
+ eq_context = mailbox->buf;
for (i = 0; i < npages; ++i) {
- eq->page_list[i].buf = pci_alloc_consistent(dev->pdev,
- PAGE_SIZE, &t);
+ eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
+ PAGE_SIZE, &t, GFP_KERNEL);
if (!eq->page_list[i].buf)
- goto err_out_free;
+ goto err_out_free_pages;
dma_list[i] = t;
pci_unmap_addr_set(&eq->page_list[i], mapping, t);
@@ -517,7 +516,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
eq->eqn = mthca_alloc(&dev->eq_table.alloc);
if (eq->eqn == -1)
- goto err_out_free;
+ goto err_out_free_pages;
err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
dma_list, PAGE_SHIFT, npages,
@@ -548,7 +547,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
eq_context->intr = intr;
eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey);
- err = mthca_SW2HW_EQ(dev, eq_context, eq->eqn, &status);
+ err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status);
if (err) {
mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err);
goto err_out_free_mr;
@@ -561,7 +560,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
}
kfree(dma_list);
- kfree(mailbox);
+ mthca_free_mailbox(dev, mailbox);
eq->eqn_mask = swab32(1 << eq->eqn);
eq->cons_index = 0;
@@ -579,17 +578,19 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
err_out_free_eq:
mthca_free(&dev->eq_table.alloc, eq->eqn);
- err_out_free:
+ err_out_free_pages:
for (i = 0; i < npages; ++i)
if (eq->page_list[i].buf)
- pci_free_consistent(dev->pdev, PAGE_SIZE,
- eq->page_list[i].buf,
- pci_unmap_addr(&eq->page_list[i],
- mapping));
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ eq->page_list[i].buf,
+ pci_unmap_addr(&eq->page_list[i],
+ mapping));
+
+ mthca_free_mailbox(dev, mailbox);
+ err_out_free:
kfree(eq->page_list);
kfree(dma_list);
- kfree(mailbox);
err_out:
return err;
@@ -598,25 +599,22 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
static void mthca_free_eq(struct mthca_dev *dev,
struct mthca_eq *eq)
{
- void *mailbox = NULL;
+ struct mthca_mailbox *mailbox;
int err;
u8 status;
int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
PAGE_SIZE;
int i;
- mailbox = kmalloc(sizeof (struct mthca_eq_context) + MTHCA_CMD_MAILBOX_EXTRA,
- GFP_KERNEL);
- if (!mailbox)
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
return;
- err = mthca_HW2SW_EQ(dev, MAILBOX_ALIGN(mailbox),
- eq->eqn, &status);
+ err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status);
if (err)
mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err);
if (status)
- mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n",
- status);
+ mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", status);
dev->eq_table.arm_mask &= ~eq->eqn_mask;
@@ -625,7 +623,7 @@ static void mthca_free_eq(struct mthca_dev *dev,
for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) {
if (i % 4 == 0)
printk("[%02x] ", i * 4);
- printk(" %08x", be32_to_cpup(MAILBOX_ALIGN(mailbox) + i * 4));
+ printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
if ((i + 1) % 4 == 0)
printk("\n");
}
@@ -638,7 +636,7 @@ static void mthca_free_eq(struct mthca_dev *dev,
pci_unmap_addr(&eq->page_list[i], mapping));
kfree(eq->page_list);
- kfree(mailbox);
+ mthca_free_mailbox(dev, mailbox);
}
static void mthca_free_irqs(struct mthca_dev *dev)
@@ -709,8 +707,7 @@ static int __devinit mthca_map_eq_regs(struct mthca_dev *dev)
if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
dev->fw.arbel.eq_arm_base) + 4, 4,
&dev->eq_regs.arbel.eq_arm)) {
- mthca_err(dev, "Couldn't map interrupt clear register, "
- "aborting.\n");
+ mthca_err(dev, "Couldn't map EQ arm register, aborting.\n");
mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
dev->clr_base);
@@ -721,8 +718,7 @@ static int __devinit mthca_map_eq_regs(struct mthca_dev *dev)
dev->fw.arbel.eq_set_ci_base,
MTHCA_EQ_SET_CI_SIZE,
&dev->eq_regs.arbel.eq_set_ci_base)) {
- mthca_err(dev, "Couldn't map interrupt clear register, "
- "aborting.\n");
+ mthca_err(dev, "Couldn't map EQ CI register, aborting.\n");
mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
dev->fw.arbel.eq_arm_base) + 4, 4,
dev->eq_regs.arbel.eq_arm);
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index d40590356df..09519b604c0 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -69,7 +70,7 @@ MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero");
#endif /* CONFIG_PCI_MSI */
static const char mthca_version[] __devinitdata =
- "ib_mthca: Mellanox InfiniBand HCA driver v"
+ DRV_NAME ": Mellanox InfiniBand HCA driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
static struct mthca_profile default_profile = {
@@ -927,13 +928,13 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
*/
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
pci_resource_len(pdev, 0) != 1 << 20) {
- dev_err(&pdev->dev, "Missing DCS, aborting.");
+ dev_err(&pdev->dev, "Missing DCS, aborting.\n");
err = -ENODEV;
goto err_disable_pdev;
}
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM) ||
pci_resource_len(pdev, 2) != 1 << 23) {
- dev_err(&pdev->dev, "Missing UAR, aborting.");
+ dev_err(&pdev->dev, "Missing UAR, aborting.\n");
err = -ENODEV;
goto err_disable_pdev;
}
@@ -1004,25 +1005,18 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
!pci_enable_msi(pdev))
mdev->mthca_flags |= MTHCA_FLAG_MSI;
- sema_init(&mdev->cmd.hcr_sem, 1);
- sema_init(&mdev->cmd.poll_sem, 1);
- mdev->cmd.use_events = 0;
-
- mdev->hcr = ioremap(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE, MTHCA_HCR_SIZE);
- if (!mdev->hcr) {
- mthca_err(mdev, "Couldn't map command register, "
- "aborting.\n");
- err = -ENOMEM;
+ if (mthca_cmd_init(mdev)) {
+ mthca_err(mdev, "Failed to init command interface, aborting.\n");
goto err_free_dev;
}
err = mthca_tune_pci(mdev);
if (err)
- goto err_iounmap;
+ goto err_cmd;
err = mthca_init_hca(mdev);
if (err)
- goto err_iounmap;
+ goto err_cmd;
if (mdev->fw_ver < mthca_hca_table[id->driver_data].latest_fw) {
mthca_warn(mdev, "HCA FW version %x.%x.%x is old (%x.%x.%x is current).\n",
@@ -1070,8 +1064,8 @@ err_cleanup:
err_close:
mthca_close_hca(mdev);
-err_iounmap:
- iounmap(mdev->hcr);
+err_cmd:
+ mthca_cmd_cleanup(mdev);
err_free_dev:
if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
@@ -1118,10 +1112,8 @@ static void __devexit mthca_remove_one(struct pci_dev *pdev)
iounmap(mdev->kar);
mthca_uar_free(mdev, &mdev->driver_uar);
mthca_cleanup_uar_table(mdev);
-
mthca_close_hca(mdev);
-
- iounmap(mdev->hcr);
+ mthca_cmd_cleanup(mdev);
if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
pci_disable_msix(pdev);
@@ -1163,7 +1155,7 @@ static struct pci_device_id mthca_pci_table[] = {
MODULE_DEVICE_TABLE(pci, mthca_pci_table);
static struct pci_driver mthca_driver = {
- .name = "ib_mthca",
+ .name = DRV_NAME,
.id_table = mthca_pci_table,
.probe = mthca_init_one,
.remove = __devexit_p(mthca_remove_one)
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c
index 70a6553a588..5be7d949dbf 100644
--- a/drivers/infiniband/hw/mthca/mthca_mcg.c
+++ b/drivers/infiniband/hw/mthca/mthca_mcg.c
@@ -66,22 +66,23 @@ static const u8 zero_gid[16]; /* automatically initialized to 0 */
* entry in hash chain and *mgm holds end of hash chain.
*/
static int find_mgm(struct mthca_dev *dev,
- u8 *gid, struct mthca_mgm *mgm,
+ u8 *gid, struct mthca_mailbox *mgm_mailbox,
u16 *hash, int *prev, int *index)
{
- void *mailbox;
+ struct mthca_mailbox *mailbox;
+ struct mthca_mgm *mgm = mgm_mailbox->buf;
u8 *mgid;
int err;
u8 status;
- mailbox = kmalloc(16 + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL);
- if (!mailbox)
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
return -ENOMEM;
- mgid = MAILBOX_ALIGN(mailbox);
+ mgid = mailbox->buf;
memcpy(mgid, gid, 16);
- err = mthca_MGID_HASH(dev, mgid, hash, &status);
+ err = mthca_MGID_HASH(dev, mailbox, hash, &status);
if (err)
goto out;
if (status) {
@@ -103,7 +104,7 @@ static int find_mgm(struct mthca_dev *dev,
*prev = -1;
do {
- err = mthca_READ_MGM(dev, *index, mgm, &status);
+ err = mthca_READ_MGM(dev, *index, mgm_mailbox, &status);
if (err)
goto out;
if (status) {
@@ -129,14 +130,14 @@ static int find_mgm(struct mthca_dev *dev,
*index = -1;
out:
- kfree(mailbox);
+ mthca_free_mailbox(dev, mailbox);
return err;
}
int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
- void *mailbox;
+ struct mthca_mailbox *mailbox;
struct mthca_mgm *mgm;
u16 hash;
int index, prev;
@@ -145,15 +146,15 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int err;
u8 status;
- mailbox = kmalloc(sizeof *mgm + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL);
- if (!mailbox)
- return -ENOMEM;
- mgm = MAILBOX_ALIGN(mailbox);
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ mgm = mailbox->buf;
if (down_interruptible(&dev->mcg_table.sem))
return -EINTR;
- err = find_mgm(dev, gid->raw, mgm, &hash, &prev, &index);
+ err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index);
if (err)
goto out;
@@ -170,7 +171,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
goto out;
}
- err = mthca_READ_MGM(dev, index, mgm, &status);
+ err = mthca_READ_MGM(dev, index, mailbox, &status);
if (err)
goto out;
if (status) {
@@ -195,7 +196,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
goto out;
}
- err = mthca_WRITE_MGM(dev, index, mgm, &status);
+ err = mthca_WRITE_MGM(dev, index, mailbox, &status);
if (err)
goto out;
if (status) {
@@ -206,7 +207,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
if (!link)
goto out;
- err = mthca_READ_MGM(dev, prev, mgm, &status);
+ err = mthca_READ_MGM(dev, prev, mailbox, &status);
if (err)
goto out;
if (status) {
@@ -217,7 +218,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
mgm->next_gid_index = cpu_to_be32(index << 5);
- err = mthca_WRITE_MGM(dev, prev, mgm, &status);
+ err = mthca_WRITE_MGM(dev, prev, mailbox, &status);
if (err)
goto out;
if (status) {
@@ -227,14 +228,14 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
out:
up(&dev->mcg_table.sem);
- kfree(mailbox);
+ mthca_free_mailbox(dev, mailbox);
return err;
}
int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
- void *mailbox;
+ struct mthca_mailbox *mailbox;
struct mthca_mgm *mgm;
u16 hash;
int prev, index;
@@ -242,15 +243,15 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int err;
u8 status;
- mailbox = kmalloc(sizeof *mgm + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL);
- if (!mailbox)
- return -ENOMEM;
- mgm = MAILBOX_ALIGN(mailbox);
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ mgm = mailbox->buf;
if (down_interruptible(&dev->mcg_table.sem))
return -EINTR;
- err = find_mgm(dev, gid->raw, mgm, &hash, &prev, &index);
+ err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index);
if (err)
goto out;
@@ -285,7 +286,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
mgm->qp[loc] = mgm->qp[i - 1];
mgm->qp[i - 1] = 0;
- err = mthca_WRITE_MGM(dev, index, mgm, &status);
+ err = mthca_WRITE_MGM(dev, index, mailbox, &status);
if (err)
goto out;
if (status) {
@@ -304,7 +305,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
if (be32_to_cpu(mgm->next_gid_index) >> 5) {
err = mthca_READ_MGM(dev,
be32_to_cpu(mgm->next_gid_index) >> 5,
- mgm, &status);
+ mailbox, &status);
if (err)
goto out;
if (status) {
@@ -316,7 +317,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
} else
memset(mgm->gid, 0, 16);
- err = mthca_WRITE_MGM(dev, index, mgm, &status);
+ err = mthca_WRITE_MGM(dev, index, mailbox, &status);
if (err)
goto out;
if (status) {
@@ -327,7 +328,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
} else {
/* Remove entry from AMGM */
index = be32_to_cpu(mgm->next_gid_index) >> 5;
- err = mthca_READ_MGM(dev, prev, mgm, &status);
+ err = mthca_READ_MGM(dev, prev, mailbox, &status);
if (err)
goto out;
if (status) {
@@ -338,7 +339,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
mgm->next_gid_index = cpu_to_be32(index << 5);
- err = mthca_WRITE_MGM(dev, prev, mgm, &status);
+ err = mthca_WRITE_MGM(dev, prev, mailbox, &status);
if (err)
goto out;
if (status) {
@@ -350,7 +351,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
out:
up(&dev->mcg_table.sem);
- kfree(mailbox);
+ mthca_free_mailbox(dev, mailbox);
return err;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 637b30e3559..6d3b05dd9e3 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -179,9 +179,14 @@ out:
void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
{
- int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
+ int i;
u8 status;
+ if (!mthca_is_memfree(dev))
+ return;
+
+ i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
+
down(&table->mutex);
if (--table->icm[i]->refcount == 0) {
@@ -256,6 +261,9 @@ void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table,
{
int i;
+ if (!mthca_is_memfree(dev))
+ return;
+
for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size)
mthca_table_put(dev, table, i);
}
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 8960fc2306b..cbe50feaf68 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -40,6 +40,12 @@
#include "mthca_cmd.h"
#include "mthca_memfree.h"
+struct mthca_mtt {
+ struct mthca_buddy *buddy;
+ int order;
+ u32 first_seg;
+};
+
/*
* Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
*/
@@ -173,8 +179,8 @@ static void __devexit mthca_buddy_cleanup(struct mthca_buddy *buddy)
kfree(buddy->bits);
}
-static u32 mthca_alloc_mtt(struct mthca_dev *dev, int order,
- struct mthca_buddy *buddy)
+static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order,
+ struct mthca_buddy *buddy)
{
u32 seg = mthca_buddy_alloc(buddy, order);
@@ -191,14 +197,102 @@ static u32 mthca_alloc_mtt(struct mthca_dev *dev, int order,
return seg;
}
-static void mthca_free_mtt(struct mthca_dev *dev, u32 seg, int order,
- struct mthca_buddy* buddy)
+static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size,
+ struct mthca_buddy *buddy)
{
- mthca_buddy_free(buddy, seg, order);
+ struct mthca_mtt *mtt;
+ int i;
- if (mthca_is_memfree(dev))
- mthca_table_put_range(dev, dev->mr_table.mtt_table, seg,
- seg + (1 << order) - 1);
+ if (size <= 0)
+ return ERR_PTR(-EINVAL);
+
+ mtt = kmalloc(sizeof *mtt, GFP_KERNEL);
+ if (!mtt)
+ return ERR_PTR(-ENOMEM);
+
+ mtt->buddy = buddy;
+ mtt->order = 0;
+ for (i = MTHCA_MTT_SEG_SIZE / 8; i < size; i <<= 1)
+ ++mtt->order;
+
+ mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy);
+ if (mtt->first_seg == -1) {
+ kfree(mtt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return mtt;
+}
+
+struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size)
+{
+ return __mthca_alloc_mtt(dev, size, &dev->mr_table.mtt_buddy);
+}
+
+void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt)
+{
+ if (!mtt)
+ return;
+
+ mthca_buddy_free(mtt->buddy, mtt->first_seg, mtt->order);
+
+ mthca_table_put_range(dev, dev->mr_table.mtt_table,
+ mtt->first_seg,
+ mtt->first_seg + (1 << mtt->order) - 1);
+
+ kfree(mtt);
+}
+
+int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
+ int start_index, u64 *buffer_list, int list_len)
+{
+ struct mthca_mailbox *mailbox;
+ u64 *mtt_entry;
+ int err = 0;
+ u8 status;
+ int i;
+
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ mtt_entry = mailbox->buf;
+
+ while (list_len > 0) {
+ mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
+ mtt->first_seg * MTHCA_MTT_SEG_SIZE +
+ start_index * 8);
+ mtt_entry[1] = 0;
+ for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i)
+ mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] |
+ MTHCA_MTT_FLAG_PRESENT);
+
+ /*
+ * If we have an odd number of entries to write, add
+ * one more dummy entry for firmware efficiency.
+ */
+ if (i & 1)
+ mtt_entry[i + 2] = 0;
+
+ err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1, &status);
+ if (err) {
+ mthca_warn(dev, "WRITE_MTT failed (%d)\n", err);
+ goto out;
+ }
+ if (status) {
+ mthca_warn(dev, "WRITE_MTT returned status 0x%02x\n",
+ status);
+ err = -EINVAL;
+ goto out;
+ }
+
+ list_len -= i;
+ start_index += i;
+ buffer_list += i;
+ }
+
+out:
+ mthca_free_mailbox(dev, mailbox);
+ return err;
}
static inline u32 tavor_hw_index_to_key(u32 ind)
@@ -237,91 +331,18 @@ static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key)
return tavor_key_to_hw_index(key);
}
-int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
- u32 access, struct mthca_mr *mr)
+int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
+ u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
{
- void *mailbox = NULL;
+ struct mthca_mailbox *mailbox;
struct mthca_mpt_entry *mpt_entry;
u32 key;
+ int i;
int err;
u8 status;
might_sleep();
- mr->order = -1;
- key = mthca_alloc(&dev->mr_table.mpt_alloc);
- if (key == -1)
- return -ENOMEM;
- mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
-
- if (mthca_is_memfree(dev)) {
- err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
- if (err)
- goto err_out_mpt_free;
- }
-
- mailbox = kmalloc(sizeof *mpt_entry + MTHCA_CMD_MAILBOX_EXTRA,
- GFP_KERNEL);
- if (!mailbox) {
- err = -ENOMEM;
- goto err_out_table;
- }
- mpt_entry = MAILBOX_ALIGN(mailbox);
-
- mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
- MTHCA_MPT_FLAG_MIO |
- MTHCA_MPT_FLAG_PHYSICAL |
- MTHCA_MPT_FLAG_REGION |
- access);
- mpt_entry->page_size = 0;
- mpt_entry->key = cpu_to_be32(key);
- mpt_entry->pd = cpu_to_be32(pd);
- mpt_entry->start = 0;
- mpt_entry->length = ~0ULL;
-
- memset(&mpt_entry->lkey, 0,
- sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey));
-
- err = mthca_SW2HW_MPT(dev, mpt_entry,
- key & (dev->limits.num_mpts - 1),
- &status);
- if (err) {
- mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
- goto err_out_table;
- } else if (status) {
- mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
- status);
- err = -EINVAL;
- goto err_out_table;
- }
-
- kfree(mailbox);
- return err;
-
-err_out_table:
- if (mthca_is_memfree(dev))
- mthca_table_put(dev, dev->mr_table.mpt_table, key);
-
-err_out_mpt_free:
- mthca_free(&dev->mr_table.mpt_alloc, key);
- kfree(mailbox);
- return err;
-}
-
-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
- u64 *buffer_list, int buffer_size_shift,
- int list_len, u64 iova, u64 total_size,
- u32 access, struct mthca_mr *mr)
-{
- void *mailbox;
- u64 *mtt_entry;
- struct mthca_mpt_entry *mpt_entry;
- u32 key;
- int err = -ENOMEM;
- u8 status;
- int i;
-
- might_sleep();
WARN_ON(buffer_size_shift >= 32);
key = mthca_alloc(&dev->mr_table.mpt_alloc);
@@ -335,75 +356,33 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
goto err_out_mpt_free;
}
- for (i = MTHCA_MTT_SEG_SIZE / 8, mr->order = 0;
- i < list_len;
- i <<= 1, ++mr->order)
- ; /* nothing */
-
- mr->first_seg = mthca_alloc_mtt(dev, mr->order,
- &dev->mr_table.mtt_buddy);
- if (mr->first_seg == -1)
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
goto err_out_table;
-
- /*
- * If list_len is odd, we add one more dummy entry for
- * firmware efficiency.
- */
- mailbox = kmalloc(max(sizeof *mpt_entry,
- (size_t) 8 * (list_len + (list_len & 1) + 2)) +
- MTHCA_CMD_MAILBOX_EXTRA,
- GFP_KERNEL);
- if (!mailbox)
- goto err_out_free_mtt;
-
- mtt_entry = MAILBOX_ALIGN(mailbox);
-
- mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
- mr->first_seg * MTHCA_MTT_SEG_SIZE);
- mtt_entry[1] = 0;
- for (i = 0; i < list_len; ++i)
- mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] |
- MTHCA_MTT_FLAG_PRESENT);
- if (list_len & 1) {
- mtt_entry[i + 2] = 0;
- ++list_len;
- }
-
- if (0) {
- mthca_dbg(dev, "Dumping MPT entry\n");
- for (i = 0; i < list_len + 2; ++i)
- printk(KERN_ERR "[%2d] %016llx\n",
- i, (unsigned long long) be64_to_cpu(mtt_entry[i]));
- }
-
- err = mthca_WRITE_MTT(dev, mtt_entry, list_len, &status);
- if (err) {
- mthca_warn(dev, "WRITE_MTT failed (%d)\n", err);
- goto err_out_mailbox_free;
- }
- if (status) {
- mthca_warn(dev, "WRITE_MTT returned status 0x%02x\n",
- status);
- err = -EINVAL;
- goto err_out_mailbox_free;
}
-
- mpt_entry = MAILBOX_ALIGN(mailbox);
+ mpt_entry = mailbox->buf;
mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
MTHCA_MPT_FLAG_MIO |
MTHCA_MPT_FLAG_REGION |
access);
+ if (!mr->mtt)
+ mpt_entry->flags |= cpu_to_be32(MTHCA_MPT_FLAG_PHYSICAL);
mpt_entry->page_size = cpu_to_be32(buffer_size_shift - 12);
mpt_entry->key = cpu_to_be32(key);
mpt_entry->pd = cpu_to_be32(pd);
mpt_entry->start = cpu_to_be64(iova);
mpt_entry->length = cpu_to_be64(total_size);
+
memset(&mpt_entry->lkey, 0,
sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey));
- mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base +
- mr->first_seg * MTHCA_MTT_SEG_SIZE);
+
+ if (mr->mtt)
+ mpt_entry->mtt_seg =
+ cpu_to_be64(dev->mr_table.mtt_base +
+ mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE);
if (0) {
mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
@@ -416,45 +395,70 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
}
}
- err = mthca_SW2HW_MPT(dev, mpt_entry,
+ err = mthca_SW2HW_MPT(dev, mailbox,
key & (dev->limits.num_mpts - 1),
&status);
- if (err)
+ if (err) {
mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
- else if (status) {
+ goto err_out_mailbox;
+ } else if (status) {
mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
status);
err = -EINVAL;
+ goto err_out_mailbox;
}
- kfree(mailbox);
+ mthca_free_mailbox(dev, mailbox);
return err;
-err_out_mailbox_free:
- kfree(mailbox);
-
-err_out_free_mtt:
- mthca_free_mtt(dev, mr->first_seg, mr->order, &dev->mr_table.mtt_buddy);
+err_out_mailbox:
+ mthca_free_mailbox(dev, mailbox);
err_out_table:
- if (mthca_is_memfree(dev))
- mthca_table_put(dev, dev->mr_table.mpt_table, key);
+ mthca_table_put(dev, dev->mr_table.mpt_table, key);
err_out_mpt_free:
mthca_free(&dev->mr_table.mpt_alloc, key);
return err;
}
-/* Free mr or fmr */
-static void mthca_free_region(struct mthca_dev *dev, u32 lkey, int order,
- u32 first_seg, struct mthca_buddy *buddy)
+int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
+ u32 access, struct mthca_mr *mr)
{
- if (order >= 0)
- mthca_free_mtt(dev, first_seg, order, buddy);
+ mr->mtt = NULL;
+ return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
+}
- if (mthca_is_memfree(dev))
- mthca_table_put(dev, dev->mr_table.mpt_table,
- arbel_key_to_hw_index(lkey));
+int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
+ u64 *buffer_list, int buffer_size_shift,
+ int list_len, u64 iova, u64 total_size,
+ u32 access, struct mthca_mr *mr)
+{
+ int err;
+
+ mr->mtt = mthca_alloc_mtt(dev, list_len);
+ if (IS_ERR(mr->mtt))
+ return PTR_ERR(mr->mtt);
+
+ err = mthca_write_mtt(dev, mr->mtt, 0, buffer_list, list_len);
+ if (err) {
+ mthca_free_mtt(dev, mr->mtt);
+ return err;
+ }
+
+ err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova,
+ total_size, access, mr);
+ if (err)
+ mthca_free_mtt(dev, mr->mtt);
+
+ return err;
+}
+
+/* Free mr or fmr */
+static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
+{
+ mthca_table_put(dev, dev->mr_table.mpt_table,
+ arbel_key_to_hw_index(lkey));
mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey));
}
@@ -476,15 +480,15 @@ void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
mthca_warn(dev, "HW2SW_MPT returned status 0x%02x\n",
status);
- mthca_free_region(dev, mr->ibmr.lkey, mr->order, mr->first_seg,
- &dev->mr_table.mtt_buddy);
+ mthca_free_region(dev, mr->ibmr.lkey);
+ mthca_free_mtt(dev, mr->mtt);
}
int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
u32 access, struct mthca_fmr *mr)
{
struct mthca_mpt_entry *mpt_entry;
- void *mailbox;
+ struct mthca_mailbox *mailbox;
u64 mtt_seg;
u32 key, idx;
u8 status;
@@ -522,31 +526,24 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
mr->mem.tavor.mpt = dev->mr_table.tavor_fmr.mpt_base +
sizeof *(mr->mem.tavor.mpt) * idx;
- for (i = MTHCA_MTT_SEG_SIZE / 8, mr->order = 0;
- i < list_len;
- i <<= 1, ++mr->order)
- ; /* nothing */
-
- mr->first_seg = mthca_alloc_mtt(dev, mr->order,
- dev->mr_table.fmr_mtt_buddy);
- if (mr->first_seg == -1)
+ mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
+ if (IS_ERR(mr->mtt))
goto err_out_table;
- mtt_seg = mr->first_seg * MTHCA_MTT_SEG_SIZE;
+ mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE;
if (mthca_is_memfree(dev)) {
mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
- mr->first_seg);
+ mr->mtt->first_seg);
BUG_ON(!mr->mem.arbel.mtts);
} else
mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
- mailbox = kmalloc(sizeof *mpt_entry + MTHCA_CMD_MAILBOX_EXTRA,
- GFP_KERNEL);
- if (!mailbox)
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
goto err_out_free_mtt;
- mpt_entry = MAILBOX_ALIGN(mailbox);
+ mpt_entry = mailbox->buf;
mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
MTHCA_MPT_FLAG_MIO |
@@ -571,7 +568,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
}
}
- err = mthca_SW2HW_MPT(dev, mpt_entry,
+ err = mthca_SW2HW_MPT(dev, mailbox,
key & (dev->limits.num_mpts - 1),
&status);
if (err) {
@@ -585,19 +582,17 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
goto err_out_mailbox_free;
}
- kfree(mailbox);
+ mthca_free_mailbox(dev, mailbox);
return 0;
err_out_mailbox_free:
- kfree(mailbox);
+ mthca_free_mailbox(dev, mailbox);
err_out_free_mtt:
- mthca_free_mtt(dev, mr->first_seg, mr->order,
- dev->mr_table.fmr_mtt_buddy);
+ mthca_free_mtt(dev, mr->mtt);
err_out_table:
- if (mthca_is_memfree(dev))
- mthca_table_put(dev, dev->mr_table.mpt_table, key);
+ mthca_table_put(dev, dev->mr_table.mpt_table, key);
err_out_mpt_free:
mthca_free(&dev->mr_table.mpt_alloc, mr->ibmr.lkey);
@@ -609,8 +604,9 @@ int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr)
if (fmr->maps)
return -EBUSY;
- mthca_free_region(dev, fmr->ibmr.lkey, fmr->order, fmr->first_seg,
- dev->mr_table.fmr_mtt_buddy);
+ mthca_free_region(dev, fmr->ibmr.lkey);
+ mthca_free_mtt(dev, fmr->mtt);
+
return 0;
}
@@ -826,7 +822,8 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev)
if (dev->limits.reserved_mtts) {
i = fls(dev->limits.reserved_mtts - 1);
- if (mthca_alloc_mtt(dev, i, dev->mr_table.fmr_mtt_buddy) == -1) {
+ if (mthca_alloc_mtt_range(dev, i,
+ dev->mr_table.fmr_mtt_buddy) == -1) {
mthca_warn(dev, "MTT table of order %d is too small.\n",
dev->mr_table.fmr_mtt_buddy->max_order);
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 159f4e6c312..0b5adfd9159 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -52,7 +53,7 @@ static int mthca_query_device(struct ib_device *ibdev,
if (!in_mad || !out_mad)
goto out;
- memset(props, 0, sizeof props);
+ memset(props, 0, sizeof *props);
props->fw_ver = mdev->fw_ver;
@@ -558,6 +559,7 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
convert_access(acc), mr);
if (err) {
+ kfree(page_list);
kfree(mr);
return ERR_PTR(err);
}
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 619710f95a8..4d976cccb1a 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -54,18 +54,18 @@ struct mthca_uar {
int index;
};
+struct mthca_mtt;
+
struct mthca_mr {
- struct ib_mr ibmr;
- int order;
- u32 first_seg;
+ struct ib_mr ibmr;
+ struct mthca_mtt *mtt;
};
struct mthca_fmr {
- struct ib_fmr ibmr;
+ struct ib_fmr ibmr;
struct ib_fmr_attr attr;
- int order;
- u32 first_seg;
- int maps;
+ struct mthca_mtt *mtt;
+ int maps;
union {
struct {
struct mthca_mpt_entry __iomem *mpt;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index ca73bab11a0..163a8ef4186 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -357,6 +357,9 @@ static const struct {
[UD] = (IB_QP_PKEY_INDEX |
IB_QP_PORT |
IB_QP_QKEY),
+ [UC] = (IB_QP_PKEY_INDEX |
+ IB_QP_PORT |
+ IB_QP_ACCESS_FLAGS),
[RC] = (IB_QP_PKEY_INDEX |
IB_QP_PORT |
IB_QP_ACCESS_FLAGS),
@@ -378,6 +381,9 @@ static const struct {
[UD] = (IB_QP_PKEY_INDEX |
IB_QP_PORT |
IB_QP_QKEY),
+ [UC] = (IB_QP_PKEY_INDEX |
+ IB_QP_PORT |
+ IB_QP_ACCESS_FLAGS),
[RC] = (IB_QP_PKEY_INDEX |
IB_QP_PORT |
IB_QP_ACCESS_FLAGS),
@@ -388,6 +394,11 @@ static const struct {
[IB_QPS_RTR] = {
.trans = MTHCA_TRANS_INIT2RTR,
.req_param = {
+ [UC] = (IB_QP_AV |
+ IB_QP_PATH_MTU |
+ IB_QP_DEST_QPN |
+ IB_QP_RQ_PSN |
+ IB_QP_MAX_DEST_RD_ATOMIC),
[RC] = (IB_QP_AV |
IB_QP_PATH_MTU |
IB_QP_DEST_QPN |
@@ -398,6 +409,9 @@ static const struct {
.opt_param = {
[UD] = (IB_QP_PKEY_INDEX |
IB_QP_QKEY),
+ [UC] = (IB_QP_ALT_PATH |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_PKEY_INDEX),
[RC] = (IB_QP_ALT_PATH |
IB_QP_ACCESS_FLAGS |
IB_QP_PKEY_INDEX),
@@ -413,6 +427,8 @@ static const struct {
.trans = MTHCA_TRANS_RTR2RTS,
.req_param = {
[UD] = IB_QP_SQ_PSN,
+ [UC] = (IB_QP_SQ_PSN |
+ IB_QP_MAX_QP_RD_ATOMIC),
[RC] = (IB_QP_TIMEOUT |
IB_QP_RETRY_CNT |
IB_QP_RNR_RETRY |
@@ -423,6 +439,11 @@ static const struct {
.opt_param = {
[UD] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
+ [UC] = (IB_QP_CUR_STATE |
+ IB_QP_ALT_PATH |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_PKEY_INDEX |
+ IB_QP_PATH_MIG_STATE),
[RC] = (IB_QP_CUR_STATE |
IB_QP_ALT_PATH |
IB_QP_ACCESS_FLAGS |
@@ -442,6 +463,9 @@ static const struct {
.opt_param = {
[UD] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
+ [UC] = (IB_QP_ACCESS_FLAGS |
+ IB_QP_ALT_PATH |
+ IB_QP_PATH_MIG_STATE),
[RC] = (IB_QP_ACCESS_FLAGS |
IB_QP_ALT_PATH |
IB_QP_PATH_MIG_STATE |
@@ -462,6 +486,10 @@ static const struct {
.opt_param = {
[UD] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
+ [UC] = (IB_QP_CUR_STATE |
+ IB_QP_ALT_PATH |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_PATH_MIG_STATE),
[RC] = (IB_QP_CUR_STATE |
IB_QP_ALT_PATH |
IB_QP_ACCESS_FLAGS |
@@ -476,6 +504,14 @@ static const struct {
.opt_param = {
[UD] = (IB_QP_PKEY_INDEX |
IB_QP_QKEY),
+ [UC] = (IB_QP_AV |
+ IB_QP_MAX_QP_RD_ATOMIC |
+ IB_QP_MAX_DEST_RD_ATOMIC |
+ IB_QP_CUR_STATE |
+ IB_QP_ALT_PATH |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_PKEY_INDEX |
+ IB_QP_PATH_MIG_STATE),
[RC] = (IB_QP_AV |
IB_QP_TIMEOUT |
IB_QP_RETRY_CNT |
@@ -501,6 +537,7 @@ static const struct {
.opt_param = {
[UD] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
+ [UC] = (IB_QP_CUR_STATE),
[RC] = (IB_QP_CUR_STATE |
IB_QP_MIN_RNR_TIMER),
[MLX] = (IB_QP_CUR_STATE |
@@ -552,7 +589,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
enum ib_qp_state cur_state, new_state;
- void *mailbox = NULL;
+ struct mthca_mailbox *mailbox;
struct mthca_qp_param *qp_param;
struct mthca_qp_context *qp_context;
u32 req_param, opt_param;
@@ -609,10 +646,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
return -EINVAL;
}
- mailbox = kmalloc(sizeof (*qp_param) + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL);
- if (!mailbox)
- return -ENOMEM;
- qp_param = MAILBOX_ALIGN(mailbox);
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ qp_param = mailbox->buf;
qp_context = &qp_param->context;
memset(qp_param, 0, sizeof *qp_param);
@@ -683,7 +720,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
if (attr_mask & IB_QP_AV) {
qp_context->pri_path.g_mylmc = attr->ah_attr.src_path_bits & 0x7f;
qp_context->pri_path.rlid = cpu_to_be16(attr->ah_attr.dlid);
- qp_context->pri_path.static_rate = (!!attr->ah_attr.static_rate) << 3;
+ qp_context->pri_path.static_rate = !!attr->ah_attr.static_rate;
if (attr->ah_attr.ah_flags & IB_AH_GRH) {
qp_context->pri_path.g_mylmc |= 1 << 7;
qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index;
@@ -724,9 +761,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
}
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
- qp_context->params1 |= cpu_to_be32(min(attr->max_dest_rd_atomic ?
- ffs(attr->max_dest_rd_atomic) - 1 : 0,
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ qp_context->params1 |= cpu_to_be32(min(attr->max_rd_atomic ?
+ ffs(attr->max_rd_atomic) - 1 : 0,
7) << 21);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
}
@@ -764,10 +801,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
qp->atomic_rd_en = attr->qp_access_flags;
}
- if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
u8 rra_max;
- if (qp->resp_depth && !attr->max_rd_atomic) {
+ if (qp->resp_depth && !attr->max_dest_rd_atomic) {
/*
* Lowering our responder resources to zero.
* Turn off RDMA/atomics as responder.
@@ -778,7 +815,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
MTHCA_QP_OPTPAR_RAE);
}
- if (!qp->resp_depth && attr->max_rd_atomic) {
+ if (!qp->resp_depth && attr->max_dest_rd_atomic) {
/*
* Increasing our responder resources from
* zero. Turn on RDMA/atomics as appropriate.
@@ -799,7 +836,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
}
for (rra_max = 0;
- 1 << rra_max < attr->max_rd_atomic &&
+ 1 << rra_max < attr->max_dest_rd_atomic &&
rra_max < dev->qp_table.rdb_shift;
++rra_max)
; /* nothing */
@@ -807,7 +844,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
qp_context->params2 |= cpu_to_be32(rra_max << 21);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
- qp->resp_depth = attr->max_rd_atomic;
+ qp->resp_depth = attr->max_dest_rd_atomic;
}
qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
@@ -835,7 +872,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
}
err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
- qp->qpn, 0, qp_param, 0, &status);
+ qp->qpn, 0, mailbox, 0, &status);
if (status) {
mthca_warn(dev, "modify QP %d returned status %02x.\n",
state_table[cur_state][new_state].trans, status);
@@ -845,7 +882,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
if (!err)
qp->state = new_state;
- kfree(mailbox);
+ mthca_free_mailbox(dev, mailbox);
if (is_sqp(dev, qp))
store_attrs(to_msqp(qp), attr, attr_mask);
@@ -934,7 +971,8 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n",
size, shift);
- qp->queue.direct.buf = pci_alloc_consistent(dev->pdev, size, &t);
+ qp->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, size,
+ &t, GFP_KERNEL);
if (!qp->queue.direct.buf)
goto err_out;
@@ -973,7 +1011,8 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
for (i = 0; i < npages; ++i) {
qp->queue.page_list[i].buf =
- pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t);
+ dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
+ &t, GFP_KERNEL);
if (!qp->queue.page_list[i].buf)
goto err_out_free;
@@ -996,16 +1035,15 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
err_out_free:
if (qp->is_direct) {
- pci_free_consistent(dev->pdev, size,
- qp->queue.direct.buf,
- pci_unmap_addr(&qp->queue.direct, mapping));
+ dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf,
+ pci_unmap_addr(&qp->queue.direct, mapping));
} else
for (i = 0; i < npages; ++i) {
if (qp->queue.page_list[i].buf)
- pci_free_consistent(dev->pdev, PAGE_SIZE,
- qp->queue.page_list[i].buf,
- pci_unmap_addr(&qp->queue.page_list[i],
- mapping));
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ qp->queue.page_list[i].buf,
+ pci_unmap_addr(&qp->queue.page_list[i],
+ mapping));
}
@@ -1073,11 +1111,12 @@ static void mthca_free_memfree(struct mthca_dev *dev,
if (mthca_is_memfree(dev)) {
mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
- mthca_table_put(dev, dev->qp_table.rdb_table,
- qp->qpn << dev->qp_table.rdb_shift);
- mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
- mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
}
+
+ mthca_table_put(dev, dev->qp_table.rdb_table,
+ qp->qpn << dev->qp_table.rdb_shift);
+ mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
+ mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
}
static void mthca_wq_init(struct mthca_wq* wq)
@@ -1529,6 +1568,26 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
+ case UC:
+ switch (wr->opcode) {
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ ((struct mthca_raddr_seg *) wqe)->raddr =
+ cpu_to_be64(wr->wr.rdma.remote_addr);
+ ((struct mthca_raddr_seg *) wqe)->rkey =
+ cpu_to_be32(wr->wr.rdma.rkey);
+ ((struct mthca_raddr_seg *) wqe)->reserved = 0;
+ wqe += sizeof (struct mthca_raddr_seg);
+ size += sizeof (struct mthca_raddr_seg) / 16;
+ break;
+
+ default:
+ /* No extra segments required for sends */
+ break;
+ }
+
+ break;
+
case UD:
((struct mthca_tavor_ud_seg *) wqe)->lkey =
cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
@@ -1814,9 +1873,29 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
sizeof (struct mthca_atomic_seg);
break;
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ ((struct mthca_raddr_seg *) wqe)->raddr =
+ cpu_to_be64(wr->wr.rdma.remote_addr);
+ ((struct mthca_raddr_seg *) wqe)->rkey =
+ cpu_to_be32(wr->wr.rdma.rkey);
+ ((struct mthca_raddr_seg *) wqe)->reserved = 0;
+ wqe += sizeof (struct mthca_raddr_seg);
+ size += sizeof (struct mthca_raddr_seg) / 16;
+ break;
+
+ default:
+ /* No extra segments required for sends */
+ break;
+ }
+
+ break;
+
+ case UC:
+ switch (wr->opcode) {
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
- case IB_WR_RDMA_READ:
((struct mthca_raddr_seg *) wqe)->raddr =
cpu_to_be64(wr->wr.rdma.remote_addr);
((struct mthca_raddr_seg *) wqe)->rkey =
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 556264b4342..374f404e81d 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -21,6 +21,7 @@
#include <linux/smp_lock.h>
#include <linux/device.h>
#include <linux/devfs_fs_kernel.h>
+#include <linux/compat.h>
struct evdev {
int exist;
@@ -145,6 +146,41 @@ static int evdev_open(struct inode * inode, struct file * file)
return 0;
}
+#ifdef CONFIG_COMPAT
+struct input_event_compat {
+ struct compat_timeval time;
+ __u16 type;
+ __u16 code;
+ __s32 value;
+};
+
+#ifdef CONFIG_X86_64
+# define COMPAT_TEST test_thread_flag(TIF_IA32)
+#elif defined(CONFIG_IA64)
+# define COMPAT_TEST IS_IA32_PROCESS(ia64_task_regs(current))
+#elif defined(CONFIG_ARCH_S390)
+# define COMPAT_TEST test_thread_flag(TIF_31BIT)
+#else
+# define COMPAT_TEST test_thread_flag(TIF_32BIT)
+#endif
+
+static ssize_t evdev_write_compat(struct file * file, const char __user * buffer, size_t count, loff_t *ppos)
+{
+ struct evdev_list *list = file->private_data;
+ struct input_event_compat event;
+ int retval = 0;
+
+ while (retval < count) {
+ if (copy_from_user(&event, buffer + retval, sizeof(struct input_event_compat)))
+ return -EFAULT;
+ input_event(list->evdev->handle.dev, event.type, event.code, event.value);
+ retval += sizeof(struct input_event_compat);
+ }
+
+ return retval;
+}
+#endif
+
static ssize_t evdev_write(struct file * file, const char __user * buffer, size_t count, loff_t *ppos)
{
struct evdev_list *list = file->private_data;
@@ -153,6 +189,11 @@ static ssize_t evdev_write(struct file * file, const char __user * buffer, size_
if (!list->evdev->exist) return -ENODEV;
+#ifdef CONFIG_COMPAT
+ if (COMPAT_TEST)
+ return evdev_write_compat(file, buffer, count, ppos);
+#endif
+
while (retval < count) {
if (copy_from_user(&event, buffer + retval, sizeof(struct input_event)))
@@ -164,11 +205,56 @@ static ssize_t evdev_write(struct file * file, const char __user * buffer, size_
return retval;
}
+#ifdef CONFIG_COMPAT
+static ssize_t evdev_read_compat(struct file * file, char __user * buffer, size_t count, loff_t *ppos)
+{
+ struct evdev_list *list = file->private_data;
+ int retval;
+
+ if (count < sizeof(struct input_event_compat))
+ return -EINVAL;
+
+ if (list->head == list->tail && list->evdev->exist && (file->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
+ retval = wait_event_interruptible(list->evdev->wait,
+ list->head != list->tail || (!list->evdev->exist));
+
+ if (retval)
+ return retval;
+
+ if (!list->evdev->exist)
+ return -ENODEV;
+
+ while (list->head != list->tail && retval + sizeof(struct input_event_compat) <= count) {
+ struct input_event *event = (struct input_event *) list->buffer + list->tail;
+ struct input_event_compat event_compat;
+ event_compat.time.tv_sec = event->time.tv_sec;
+ event_compat.time.tv_usec = event->time.tv_usec;
+ event_compat.type = event->type;
+ event_compat.code = event->code;
+ event_compat.value = event->value;
+
+ if (copy_to_user(buffer + retval, &event_compat,
+ sizeof(struct input_event_compat))) return -EFAULT;
+ list->tail = (list->tail + 1) & (EVDEV_BUFFER_SIZE - 1);
+ retval += sizeof(struct input_event_compat);
+ }
+
+ return retval;
+}
+#endif
+
static ssize_t evdev_read(struct file * file, char __user * buffer, size_t count, loff_t *ppos)
{
struct evdev_list *list = file->private_data;
int retval;
+#ifdef CONFIG_COMPAT
+ if (COMPAT_TEST)
+ return evdev_read_compat(file, buffer, count, ppos);
+#endif
+
if (count < sizeof(struct input_event))
return -EINVAL;
@@ -186,7 +272,7 @@ static ssize_t evdev_read(struct file * file, char __user * buffer, size_t count
while (list->head != list->tail && retval + sizeof(struct input_event) <= count) {
if (copy_to_user(buffer + retval, list->buffer + list->tail,
- sizeof(struct input_event))) return -EFAULT;
+ sizeof(struct input_event))) return -EFAULT;
list->tail = (list->tail + 1) & (EVDEV_BUFFER_SIZE - 1);
retval += sizeof(struct input_event);
}
@@ -203,7 +289,7 @@ static unsigned int evdev_poll(struct file *file, poll_table *wait)
(list->evdev->exist ? 0 : (POLLHUP | POLLERR));
}
-static int evdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+static long evdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct evdev_list *list = file->private_data;
struct evdev *evdev = list->evdev;
@@ -285,109 +371,267 @@ static int evdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
default:
- if (_IOC_TYPE(cmd) != 'E' || _IOC_DIR(cmd) != _IOC_READ)
+ if (_IOC_TYPE(cmd) != 'E')
return -EINVAL;
- if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0,0))) {
-
- long *bits;
- int len;
-
- switch (_IOC_NR(cmd) & EV_MAX) {
- case 0: bits = dev->evbit; len = EV_MAX; break;
- case EV_KEY: bits = dev->keybit; len = KEY_MAX; break;
- case EV_REL: bits = dev->relbit; len = REL_MAX; break;
- case EV_ABS: bits = dev->absbit; len = ABS_MAX; break;
- case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break;
- case EV_LED: bits = dev->ledbit; len = LED_MAX; break;
- case EV_SND: bits = dev->sndbit; len = SND_MAX; break;
- case EV_FF: bits = dev->ffbit; len = FF_MAX; break;
- default: return -EINVAL;
+ if (_IOC_DIR(cmd) == _IOC_READ) {
+
+ if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0,0))) {
+
+ long *bits;
+ int len;
+
+ switch (_IOC_NR(cmd) & EV_MAX) {
+ case 0: bits = dev->evbit; len = EV_MAX; break;
+ case EV_KEY: bits = dev->keybit; len = KEY_MAX; break;
+ case EV_REL: bits = dev->relbit; len = REL_MAX; break;
+ case EV_ABS: bits = dev->absbit; len = ABS_MAX; break;
+ case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break;
+ case EV_LED: bits = dev->ledbit; len = LED_MAX; break;
+ case EV_SND: bits = dev->sndbit; len = SND_MAX; break;
+ case EV_FF: bits = dev->ffbit; len = FF_MAX; break;
+ default: return -EINVAL;
+ }
+ len = NBITS(len) * sizeof(long);
+ if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
+ return copy_to_user(p, bits, len) ? -EFAULT : len;
}
- len = NBITS(len) * sizeof(long);
- if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
- return copy_to_user(p, bits, len) ? -EFAULT : len;
- }
- if (_IOC_NR(cmd) == _IOC_NR(EVIOCGKEY(0))) {
- int len;
- len = NBITS(KEY_MAX) * sizeof(long);
- if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
- return copy_to_user(p, dev->key, len) ? -EFAULT : len;
- }
+ if (_IOC_NR(cmd) == _IOC_NR(EVIOCGKEY(0))) {
+ int len;
+ len = NBITS(KEY_MAX) * sizeof(long);
+ if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
+ return copy_to_user(p, dev->key, len) ? -EFAULT : len;
+ }
- if (_IOC_NR(cmd) == _IOC_NR(EVIOCGLED(0))) {
- int len;
- len = NBITS(LED_MAX) * sizeof(long);
- if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
- return copy_to_user(p, dev->led, len) ? -EFAULT : len;
- }
+ if (_IOC_NR(cmd) == _IOC_NR(EVIOCGLED(0))) {
+ int len;
+ len = NBITS(LED_MAX) * sizeof(long);
+ if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
+ return copy_to_user(p, dev->led, len) ? -EFAULT : len;
+ }
- if (_IOC_NR(cmd) == _IOC_NR(EVIOCGSND(0))) {
- int len;
- len = NBITS(SND_MAX) * sizeof(long);
- if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
- return copy_to_user(p, dev->snd, len) ? -EFAULT : len;
- }
+ if (_IOC_NR(cmd) == _IOC_NR(EVIOCGSND(0))) {
+ int len;
+ len = NBITS(SND_MAX) * sizeof(long);
+ if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
+ return copy_to_user(p, dev->snd, len) ? -EFAULT : len;
+ }
- if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0))) {
- int len;
- if (!dev->name) return -ENOENT;
- len = strlen(dev->name) + 1;
- if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
- return copy_to_user(p, dev->name, len) ? -EFAULT : len;
- }
+ if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0))) {
+ int len;
+ if (!dev->name) return -ENOENT;
+ len = strlen(dev->name) + 1;
+ if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
+ return copy_to_user(p, dev->name, len) ? -EFAULT : len;
+ }
+
+ if (_IOC_NR(cmd) == _IOC_NR(EVIOCGPHYS(0))) {
+ int len;
+ if (!dev->phys) return -ENOENT;
+ len = strlen(dev->phys) + 1;
+ if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
+ return copy_to_user(p, dev->phys, len) ? -EFAULT : len;
+ }
+
+ if (_IOC_NR(cmd) == _IOC_NR(EVIOCGUNIQ(0))) {
+ int len;
+ if (!dev->uniq) return -ENOENT;
+ len = strlen(dev->uniq) + 1;
+ if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
+ return copy_to_user(p, dev->uniq, len) ? -EFAULT : len;
+ }
+
+ if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
+
+ int t = _IOC_NR(cmd) & ABS_MAX;
+
+ abs.value = dev->abs[t];
+ abs.minimum = dev->absmin[t];
+ abs.maximum = dev->absmax[t];
+ abs.fuzz = dev->absfuzz[t];
+ abs.flat = dev->absflat[t];
+
+ if (copy_to_user(p, &abs, sizeof(struct input_absinfo)))
+ return -EFAULT;
+
+ return 0;
+ }
- if (_IOC_NR(cmd) == _IOC_NR(EVIOCGPHYS(0))) {
- int len;
- if (!dev->phys) return -ENOENT;
- len = strlen(dev->phys) + 1;
- if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
- return copy_to_user(p, dev->phys, len) ? -EFAULT : len;
}
- if (_IOC_NR(cmd) == _IOC_NR(EVIOCGUNIQ(0))) {
- int len;
- if (!dev->uniq) return -ENOENT;
- len = strlen(dev->uniq) + 1;
- if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
- return copy_to_user(p, dev->uniq, len) ? -EFAULT : len;
+ if (_IOC_DIR(cmd) == _IOC_WRITE) {
+
+ if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
+
+ int t = _IOC_NR(cmd) & ABS_MAX;
+
+ if (copy_from_user(&abs, p, sizeof(struct input_absinfo)))
+ return -EFAULT;
+
+ dev->abs[t] = abs.value;
+ dev->absmin[t] = abs.minimum;
+ dev->absmax[t] = abs.maximum;
+ dev->absfuzz[t] = abs.fuzz;
+ dev->absflat[t] = abs.flat;
+
+ return 0;
+ }
}
+ }
+ return -EINVAL;
+}
- if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
+#ifdef CONFIG_COMPAT
+
+#define BITS_PER_LONG_COMPAT (sizeof(compat_long_t) * 8)
+#define NBITS_COMPAT(x) ((((x)-1)/BITS_PER_LONG_COMPAT)+1)
+#define OFF_COMPAT(x) ((x)%BITS_PER_LONG_COMPAT)
+#define BIT_COMPAT(x) (1UL<<OFF_COMPAT(x))
+#define LONG_COMPAT(x) ((x)/BITS_PER_LONG_COMPAT)
+#define test_bit_compat(bit, array) ((array[LONG_COMPAT(bit)] >> OFF_COMPAT(bit)) & 1)
+
+#ifdef __BIG_ENDIAN
+#define bit_to_user(bit, max) \
+do { \
+ int i; \
+ int len = NBITS_COMPAT((max)) * sizeof(compat_long_t); \
+ if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd); \
+ for (i = 0; i < len / sizeof(compat_long_t); i++) \
+ if (copy_to_user((compat_long_t*) p + i, \
+ (compat_long_t*) (bit) + i + 1 - ((i % 2) << 1), \
+ sizeof(compat_long_t))) \
+ return -EFAULT; \
+ return len; \
+} while (0)
+#else
+#define bit_to_user(bit, max) \
+do { \
+ int len = NBITS_COMPAT((max)) * sizeof(compat_long_t); \
+ if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd); \
+ return copy_to_user(p, (bit), len) ? -EFAULT : len; \
+} while (0)
+#endif
+
+static long evdev_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct evdev_list *list = file->private_data;
+ struct evdev *evdev = list->evdev;
+ struct input_dev *dev = evdev->handle.dev;
+ struct input_absinfo abs;
+ void __user *p = compat_ptr(arg);
- int t = _IOC_NR(cmd) & ABS_MAX;
+ if (!evdev->exist) return -ENODEV;
- abs.value = dev->abs[t];
- abs.minimum = dev->absmin[t];
- abs.maximum = dev->absmax[t];
- abs.fuzz = dev->absfuzz[t];
- abs.flat = dev->absflat[t];
+ switch (cmd) {
- if (copy_to_user(p, &abs, sizeof(struct input_absinfo)))
- return -EFAULT;
+ case EVIOCGVERSION:
+ case EVIOCGID:
+ case EVIOCGKEYCODE:
+ case EVIOCSKEYCODE:
+ case EVIOCSFF:
+ case EVIOCRMFF:
+ case EVIOCGEFFECTS:
+ case EVIOCGRAB:
+ return evdev_ioctl(file, cmd, (unsigned long) p);
- return 0;
+ default:
+
+ if (_IOC_TYPE(cmd) != 'E')
+ return -EINVAL;
+
+ if (_IOC_DIR(cmd) == _IOC_READ) {
+
+ if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0,0))) {
+ long *bits;
+ int max;
+
+ switch (_IOC_NR(cmd) & EV_MAX) {
+ case 0: bits = dev->evbit; max = EV_MAX; break;
+ case EV_KEY: bits = dev->keybit; max = KEY_MAX; break;
+ case EV_REL: bits = dev->relbit; max = REL_MAX; break;
+ case EV_ABS: bits = dev->absbit; max = ABS_MAX; break;
+ case EV_MSC: bits = dev->mscbit; max = MSC_MAX; break;
+ case EV_LED: bits = dev->ledbit; max = LED_MAX; break;
+ case EV_SND: bits = dev->sndbit; max = SND_MAX; break;
+ case EV_FF: bits = dev->ffbit; max = FF_MAX; break;
+ default: return -EINVAL;
+ }
+ bit_to_user(bits, max);
+ }
+
+ if (_IOC_NR(cmd) == _IOC_NR(EVIOCGKEY(0)))
+ bit_to_user(dev->key, KEY_MAX);
+
+ if (_IOC_NR(cmd) == _IOC_NR(EVIOCGLED(0)))
+ bit_to_user(dev->led, LED_MAX);
+
+ if (_IOC_NR(cmd) == _IOC_NR(EVIOCGSND(0)))
+ bit_to_user(dev->snd, SND_MAX);
+
+ if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0))) {
+ int len;
+ if (!dev->name) return -ENOENT;
+ len = strlen(dev->name) + 1;
+ if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
+ return copy_to_user(p, dev->name, len) ? -EFAULT : len;
+ }
+
+ if (_IOC_NR(cmd) == _IOC_NR(EVIOCGPHYS(0))) {
+ int len;
+ if (!dev->phys) return -ENOENT;
+ len = strlen(dev->phys) + 1;
+ if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
+ return copy_to_user(p, dev->phys, len) ? -EFAULT : len;
+ }
+
+ if (_IOC_NR(cmd) == _IOC_NR(EVIOCGUNIQ(0))) {
+ int len;
+ if (!dev->uniq) return -ENOENT;
+ len = strlen(dev->uniq) + 1;
+ if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
+ return copy_to_user(p, dev->uniq, len) ? -EFAULT : len;
+ }
+
+ if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
+
+ int t = _IOC_NR(cmd) & ABS_MAX;
+
+ abs.value = dev->abs[t];
+ abs.minimum = dev->absmin[t];
+ abs.maximum = dev->absmax[t];
+ abs.fuzz = dev->absfuzz[t];
+ abs.flat = dev->absflat[t];
+
+ if (copy_to_user(p, &abs, sizeof(struct input_absinfo)))
+ return -EFAULT;
+
+ return 0;
+ }
}
- if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
+ if (_IOC_DIR(cmd) == _IOC_WRITE) {
- int t = _IOC_NR(cmd) & ABS_MAX;
+ if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
- if (copy_from_user(&abs, p, sizeof(struct input_absinfo)))
- return -EFAULT;
+ int t = _IOC_NR(cmd) & ABS_MAX;
- dev->abs[t] = abs.value;
- dev->absmin[t] = abs.minimum;
- dev->absmax[t] = abs.maximum;
- dev->absfuzz[t] = abs.fuzz;
- dev->absflat[t] = abs.flat;
+ if (copy_from_user(&abs, p, sizeof(struct input_absinfo)))
+ return -EFAULT;
- return 0;
+ dev->abs[t] = abs.value;
+ dev->absmin[t] = abs.minimum;
+ dev->absmax[t] = abs.maximum;
+ dev->absfuzz[t] = abs.fuzz;
+ dev->absflat[t] = abs.flat;
+
+ return 0;
+ }
}
}
return -EINVAL;
}
+#endif
static struct file_operations evdev_fops = {
.owner = THIS_MODULE,
@@ -396,7 +640,10 @@ static struct file_operations evdev_fops = {
.poll = evdev_poll,
.open = evdev_open,
.release = evdev_release,
- .ioctl = evdev_ioctl,
+ .unlocked_ioctl = evdev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = evdev_ioctl_compat,
+#endif
.fasync = evdev_fasync,
.flush = evdev_flush
};
diff --git a/drivers/input/gameport/Kconfig b/drivers/input/gameport/Kconfig
index 1d93f509290..7524bd7d8b8 100644
--- a/drivers/input/gameport/Kconfig
+++ b/drivers/input/gameport/Kconfig
@@ -49,22 +49,8 @@ config GAMEPORT_EMU10K1
To compile this driver as a module, choose M here: the
module will be called emu10k1-gp.
-config GAMEPORT_VORTEX
- tristate "Aureal Vortex, Vortex 2 gameport support"
- depends on PCI
- help
- Say Y here if you have an Aureal Vortex 1 or 2 card and want
- to use its gameport.
-
- To compile this driver as a module, choose M here: the
- module will be called vortex.
-
config GAMEPORT_FM801
tristate "ForteMedia FM801 gameport support"
depends on PCI
-config GAMEPORT_CS461X
- tristate "Crystal SoundFusion gameport support"
- depends on PCI
-
endif
diff --git a/drivers/input/gameport/Makefile b/drivers/input/gameport/Makefile
index 5367b4267ad..b6f6097bd8c 100644
--- a/drivers/input/gameport/Makefile
+++ b/drivers/input/gameport/Makefile
@@ -5,9 +5,7 @@
# Each configuration option enables a list of files.
obj-$(CONFIG_GAMEPORT) += gameport.o
-obj-$(CONFIG_GAMEPORT_CS461X) += cs461x.o
obj-$(CONFIG_GAMEPORT_EMU10K1) += emu10k1-gp.o
obj-$(CONFIG_GAMEPORT_FM801) += fm801-gp.o
obj-$(CONFIG_GAMEPORT_L4) += lightning.o
obj-$(CONFIG_GAMEPORT_NS558) += ns558.o
-obj-$(CONFIG_GAMEPORT_VORTEX) += vortex.o
diff --git a/drivers/input/gameport/cs461x.c b/drivers/input/gameport/cs461x.c
deleted file mode 100644
index d4013ff9862..00000000000
--- a/drivers/input/gameport/cs461x.c
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- The all defines and part of code (such as cs461x_*) are
- contributed from ALSA 0.5.8 sources.
- See http://www.alsa-project.org/ for sources
-
- Tested on Linux 686 2.4.0-test9, ALSA 0.5.8a and CS4610
-*/
-
-#include <asm/io.h>
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/gameport.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-
-MODULE_AUTHOR("Victor Krapivin");
-MODULE_LICENSE("GPL");
-
-/*
- These options are experimental
-
-#define CS461X_FULL_MAP
-*/
-
-
-#ifndef PCI_VENDOR_ID_CIRRUS
-#define PCI_VENDOR_ID_CIRRUS 0x1013
-#endif
-#ifndef PCI_DEVICE_ID_CIRRUS_4610
-#define PCI_DEVICE_ID_CIRRUS_4610 0x6001
-#endif
-#ifndef PCI_DEVICE_ID_CIRRUS_4612
-#define PCI_DEVICE_ID_CIRRUS_4612 0x6003
-#endif
-#ifndef PCI_DEVICE_ID_CIRRUS_4615
-#define PCI_DEVICE_ID_CIRRUS_4615 0x6004
-#endif
-
-/* Registers */
-
-#define BA0_JSPT 0x00000480
-#define BA0_JSCTL 0x00000484
-#define BA0_JSC1 0x00000488
-#define BA0_JSC2 0x0000048C
-#define BA0_JSIO 0x000004A0
-
-/* Bits for JSPT */
-
-#define JSPT_CAX 0x00000001
-#define JSPT_CAY 0x00000002
-#define JSPT_CBX 0x00000004
-#define JSPT_CBY 0x00000008
-#define JSPT_BA1 0x00000010
-#define JSPT_BA2 0x00000020
-#define JSPT_BB1 0x00000040
-#define JSPT_BB2 0x00000080
-
-/* Bits for JSCTL */
-
-#define JSCTL_SP_MASK 0x00000003
-#define JSCTL_SP_SLOW 0x00000000
-#define JSCTL_SP_MEDIUM_SLOW 0x00000001
-#define JSCTL_SP_MEDIUM_FAST 0x00000002
-#define JSCTL_SP_FAST 0x00000003
-#define JSCTL_ARE 0x00000004
-
-/* Data register pairs masks */
-
-#define JSC1_Y1V_MASK 0x0000FFFF
-#define JSC1_X1V_MASK 0xFFFF0000
-#define JSC1_Y1V_SHIFT 0
-#define JSC1_X1V_SHIFT 16
-#define JSC2_Y2V_MASK 0x0000FFFF
-#define JSC2_X2V_MASK 0xFFFF0000
-#define JSC2_Y2V_SHIFT 0
-#define JSC2_X2V_SHIFT 16
-
-/* JS GPIO */
-
-#define JSIO_DAX 0x00000001
-#define JSIO_DAY 0x00000002
-#define JSIO_DBX 0x00000004
-#define JSIO_DBY 0x00000008
-#define JSIO_AXOE 0x00000010
-#define JSIO_AYOE 0x00000020
-#define JSIO_BXOE 0x00000040
-#define JSIO_BYOE 0x00000080
-
-/*
- The card initialization code is obfuscated; the module cs461x
- need to be loaded after ALSA modules initialized and something
- played on the CS 4610 chip (see sources for details of CS4610
- initialization code from ALSA)
-*/
-
-/* Card specific definitions */
-
-#define CS461X_BA0_SIZE 0x2000
-#define CS461X_BA1_DATA0_SIZE 0x3000
-#define CS461X_BA1_DATA1_SIZE 0x3800
-#define CS461X_BA1_PRG_SIZE 0x7000
-#define CS461X_BA1_REG_SIZE 0x0100
-
-#define BA1_SP_DMEM0 0x00000000
-#define BA1_SP_DMEM1 0x00010000
-#define BA1_SP_PMEM 0x00020000
-#define BA1_SP_REG 0x00030000
-
-#define BA1_DWORD_SIZE (13 * 1024 + 512)
-#define BA1_MEMORY_COUNT 3
-
-/*
- Only one CS461x card is still suppoted; the code requires
- redesign to avoid this limitatuion.
-*/
-
-static unsigned long ba0_addr;
-static unsigned int __iomem *ba0;
-
-#ifdef CS461X_FULL_MAP
-static unsigned long ba1_addr;
-static union ba1_t {
- struct {
- unsigned int __iomem *data0;
- unsigned int __iomem *data1;
- unsigned int __iomem *pmem;
- unsigned int __iomem *reg;
- } name;
- unsigned int __iomem *idx[4];
-} ba1;
-
-static void cs461x_poke(unsigned long reg, unsigned int val)
-{
- writel(val, &ba1.idx[(reg >> 16) & 3][(reg >> 2) & 0x3fff]);
-}
-
-static unsigned int cs461x_peek(unsigned long reg)
-{
- return readl(&ba1.idx[(reg >> 16) & 3][(reg >> 2) & 0x3fff]);
-}
-
-#endif
-
-static void cs461x_pokeBA0(unsigned long reg, unsigned int val)
-{
- writel(val, &ba0[reg >> 2]);
-}
-
-static unsigned int cs461x_peekBA0(unsigned long reg)
-{
- return readl(&ba0[reg >> 2]);
-}
-
-static int cs461x_free(struct pci_dev *pdev)
-{
- struct gameport *port = pci_get_drvdata(pdev);
-
- if (port)
- gameport_unregister_port(port);
-
- if (ba0) iounmap(ba0);
-#ifdef CS461X_FULL_MAP
- if (ba1.name.data0) iounmap(ba1.name.data0);
- if (ba1.name.data1) iounmap(ba1.name.data1);
- if (ba1.name.pmem) iounmap(ba1.name.pmem);
- if (ba1.name.reg) iounmap(ba1.name.reg);
-#endif
- return 0;
-}
-
-static void cs461x_gameport_trigger(struct gameport *gameport)
-{
- cs461x_pokeBA0(BA0_JSPT, 0xFF); //outb(gameport->io, 0xFF);
-}
-
-static unsigned char cs461x_gameport_read(struct gameport *gameport)
-{
- return cs461x_peekBA0(BA0_JSPT); //inb(gameport->io);
-}
-
-static int cs461x_gameport_cooked_read(struct gameport *gameport, int *axes, int *buttons)
-{
- unsigned js1, js2, jst;
-
- js1 = cs461x_peekBA0(BA0_JSC1);
- js2 = cs461x_peekBA0(BA0_JSC2);
- jst = cs461x_peekBA0(BA0_JSPT);
-
- *buttons = (~jst >> 4) & 0x0F;
-
- axes[0] = ((js1 & JSC1_Y1V_MASK) >> JSC1_Y1V_SHIFT) & 0xFFFF;
- axes[1] = ((js1 & JSC1_X1V_MASK) >> JSC1_X1V_SHIFT) & 0xFFFF;
- axes[2] = ((js2 & JSC2_Y2V_MASK) >> JSC2_Y2V_SHIFT) & 0xFFFF;
- axes[3] = ((js2 & JSC2_X2V_MASK) >> JSC2_X2V_SHIFT) & 0xFFFF;
-
- for(jst=0;jst<4;++jst)
- if(axes[jst]==0xFFFF) axes[jst] = -1;
- return 0;
-}
-
-static int cs461x_gameport_open(struct gameport *gameport, int mode)
-{
- switch (mode) {
- case GAMEPORT_MODE_COOKED:
- case GAMEPORT_MODE_RAW:
- return 0;
- default:
- return -1;
- }
- return 0;
-}
-
-static struct pci_device_id cs461x_pci_tbl[] = {
- { PCI_VENDOR_ID_CIRRUS, 0x6001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* Cirrus CS4610 */
- { PCI_VENDOR_ID_CIRRUS, 0x6003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* Cirrus CS4612 */
- { PCI_VENDOR_ID_CIRRUS, 0x6005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* Cirrus CS4615 */
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, cs461x_pci_tbl);
-
-static int __devinit cs461x_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int rc;
- struct gameport* port;
-
- rc = pci_enable_device(pdev);
- if (rc) {
- printk(KERN_ERR "cs461x: Cannot enable PCI gameport (bus %d, devfn %d) error=%d\n",
- pdev->bus->number, pdev->devfn, rc);
- return rc;
- }
-
- ba0_addr = pci_resource_start(pdev, 0);
-#ifdef CS461X_FULL_MAP
- ba1_addr = pci_resource_start(pdev, 1);
-#endif
- if (ba0_addr == 0 || ba0_addr == ~0
-#ifdef CS461X_FULL_MAP
- || ba1_addr == 0 || ba1_addr == ~0
-#endif
- ) {
- printk(KERN_ERR "cs461x: wrong address - ba0 = 0x%lx\n", ba0_addr);
-#ifdef CS461X_FULL_MAP
- printk(KERN_ERR "cs461x: wrong address - ba1 = 0x%lx\n", ba1_addr);
-#endif
- cs461x_free(pdev);
- return -ENOMEM;
- }
-
- ba0 = ioremap(ba0_addr, CS461X_BA0_SIZE);
-#ifdef CS461X_FULL_MAP
- ba1.name.data0 = ioremap(ba1_addr + BA1_SP_DMEM0, CS461X_BA1_DATA0_SIZE);
- ba1.name.data1 = ioremap(ba1_addr + BA1_SP_DMEM1, CS461X_BA1_DATA1_SIZE);
- ba1.name.pmem = ioremap(ba1_addr + BA1_SP_PMEM, CS461X_BA1_PRG_SIZE);
- ba1.name.reg = ioremap(ba1_addr + BA1_SP_REG, CS461X_BA1_REG_SIZE);
-
- if (ba0 == NULL || ba1.name.data0 == NULL ||
- ba1.name.data1 == NULL || ba1.name.pmem == NULL ||
- ba1.name.reg == NULL) {
- cs461x_free(pdev);
- return -ENOMEM;
- }
-#else
- if (ba0 == NULL) {
- cs461x_free(pdev);
- return -ENOMEM;
- }
-#endif
-
- if (!(port = gameport_allocate_port())) {
- printk(KERN_ERR "cs461x: Memory allocation failed\n");
- cs461x_free(pdev);
- return -ENOMEM;
- }
-
- pci_set_drvdata(pdev, port);
-
- port->open = cs461x_gameport_open;
- port->trigger = cs461x_gameport_trigger;
- port->read = cs461x_gameport_read;
- port->cooked_read = cs461x_gameport_cooked_read;
-
- gameport_set_name(port, "CS416x");
- gameport_set_phys(port, "pci%s/gameport0", pci_name(pdev));
- port->dev.parent = &pdev->dev;
-
- cs461x_pokeBA0(BA0_JSIO, 0xFF); // ?
- cs461x_pokeBA0(BA0_JSCTL, JSCTL_SP_MEDIUM_SLOW);
-
- gameport_register_port(port);
-
- return 0;
-}
-
-static void __devexit cs461x_pci_remove(struct pci_dev *pdev)
-{
- cs461x_free(pdev);
-}
-
-static struct pci_driver cs461x_pci_driver = {
- .name = "CS461x_gameport",
- .id_table = cs461x_pci_tbl,
- .probe = cs461x_pci_probe,
- .remove = __devexit_p(cs461x_pci_remove),
-};
-
-static int __init cs461x_init(void)
-{
- return pci_register_driver(&cs461x_pci_driver);
-}
-
-static void __exit cs461x_exit(void)
-{
- pci_unregister_driver(&cs461x_pci_driver);
-}
-
-module_init(cs461x_init);
-module_exit(cs461x_exit);
-
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index c77a82e4605..ab09cf4093e 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -17,11 +17,10 @@
#include <linux/init.h>
#include <linux/gameport.h>
#include <linux/wait.h>
-#include <linux/completion.h>
#include <linux/sched.h>
-#include <linux/smp_lock.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/kthread.h>
/*#include <asm/io.h>*/
@@ -61,12 +60,13 @@ static void gameport_disconnect_port(struct gameport *gameport);
#if defined(__i386__)
+#include <asm/i8253.h>
+
#define DELTA(x,y) ((y)-(x)+((y)<(x)?1193182/HZ:0))
#define GET_TIME(x) do { x = get_time_pit(); } while (0)
static unsigned int get_time_pit(void)
{
- extern spinlock_t i8253_lock;
unsigned long flags;
unsigned int count;
@@ -238,8 +238,7 @@ struct gameport_event {
static DEFINE_SPINLOCK(gameport_event_lock); /* protects gameport_event_list */
static LIST_HEAD(gameport_event_list);
static DECLARE_WAIT_QUEUE_HEAD(gameport_wait);
-static DECLARE_COMPLETION(gameport_exited);
-static int gameport_pid;
+static struct task_struct *gameport_task;
static void gameport_queue_event(void *object, struct module *owner,
enum gameport_event_type event_type)
@@ -250,12 +249,12 @@ static void gameport_queue_event(void *object, struct module *owner,
spin_lock_irqsave(&gameport_event_lock, flags);
/*
- * Scan event list for the other events for the same gameport port,
+ * Scan event list for the other events for the same gameport port,
* starting with the most recent one. If event is the same we
* do not need add new one. If event is of different type we
* need to add this event and should not look further because
* we need to preseve sequence of distinct events.
- */
+ */
list_for_each_entry_reverse(event, &gameport_event_list, node) {
if (event->object == object) {
if (event->type == event_type)
@@ -432,20 +431,15 @@ static struct gameport *gameport_get_pending_child(struct gameport *parent)
static int gameport_thread(void *nothing)
{
- lock_kernel();
- daemonize("kgameportd");
- allow_signal(SIGTERM);
-
do {
gameport_handle_events();
- wait_event_interruptible(gameport_wait, !list_empty(&gameport_event_list));
+ wait_event_interruptible(gameport_wait,
+ kthread_should_stop() || !list_empty(&gameport_event_list));
try_to_freeze();
- } while (!signal_pending(current));
+ } while (!kthread_should_stop());
printk(KERN_DEBUG "gameport: kgameportd exiting\n");
-
- unlock_kernel();
- complete_and_exit(&gameport_exited, 0);
+ return 0;
}
@@ -773,9 +767,10 @@ void gameport_close(struct gameport *gameport)
static int __init gameport_init(void)
{
- if (!(gameport_pid = kernel_thread(gameport_thread, NULL, CLONE_KERNEL))) {
+ gameport_task = kthread_run(gameport_thread, NULL, "kgameportd");
+ if (IS_ERR(gameport_task)) {
printk(KERN_ERR "gameport: Failed to start kgameportd\n");
- return -1;
+ return PTR_ERR(gameport_task);
}
gameport_bus.dev_attrs = gameport_device_attrs;
@@ -789,8 +784,7 @@ static int __init gameport_init(void)
static void __exit gameport_exit(void)
{
bus_unregister(&gameport_bus);
- kill_proc(gameport_pid, SIGTERM, 1);
- wait_for_completion(&gameport_exited);
+ kthread_stop(gameport_task);
}
module_init(gameport_init);
diff --git a/drivers/input/gameport/ns558.c b/drivers/input/gameport/ns558.c
index 7c5c6318eeb..1ab5f2dc8a2 100644
--- a/drivers/input/gameport/ns558.c
+++ b/drivers/input/gameport/ns558.c
@@ -258,18 +258,18 @@ static int __init ns558_init(void)
{
int i = 0;
+ if (pnp_register_driver(&ns558_pnp_driver) >= 0)
+ pnp_registered = 1;
+
/*
- * Probe ISA ports first so that PnP gets to choose free port addresses
- * not occupied by the ISA ports.
+ * Probe ISA ports after PnP, so that PnP ports that are already
+ * enabled get detected as PnP. This may be suboptimal in multi-device
+ * configurations, but saves hassle with simple setups.
*/
while (ns558_isa_portlist[i])
ns558_isa_probe(ns558_isa_portlist[i++]);
- if (pnp_register_driver(&ns558_pnp_driver) >= 0)
- pnp_registered = 1;
-
-
return (list_empty(&ns558_list) && !pnp_registered) ? -ENODEV : 0;
}
diff --git a/drivers/input/gameport/vortex.c b/drivers/input/gameport/vortex.c
deleted file mode 100644
index 36b0309c8bf..00000000000
--- a/drivers/input/gameport/vortex.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * $Id: vortex.c,v 1.5 2002/07/01 15:39:30 vojtech Exp $
- *
- * Copyright (c) 2000-2001 Vojtech Pavlik
- *
- * Based on the work of:
- * Raymond Ingles
- */
-
-/*
- * Trident 4DWave and Aureal Vortex gameport driver for Linux
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
- * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
- */
-
-#include <asm/io.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/gameport.h>
-
-MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
-MODULE_DESCRIPTION("Aureal Vortex and Vortex2 gameport driver");
-MODULE_LICENSE("GPL");
-
-#define VORTEX_GCR 0x0c /* Gameport control register */
-#define VORTEX_LEG 0x08 /* Legacy port location */
-#define VORTEX_AXD 0x10 /* Axes start */
-#define VORTEX_DATA_WAIT 20 /* 20 ms */
-
-struct vortex {
- struct gameport *gameport;
- struct pci_dev *dev;
- unsigned char __iomem *base;
- unsigned char __iomem *io;
-};
-
-static unsigned char vortex_read(struct gameport *gameport)
-{
- struct vortex *vortex = gameport->port_data;
- return readb(vortex->io + VORTEX_LEG);
-}
-
-static void vortex_trigger(struct gameport *gameport)
-{
- struct vortex *vortex = gameport->port_data;
- writeb(0xff, vortex->io + VORTEX_LEG);
-}
-
-static int vortex_cooked_read(struct gameport *gameport, int *axes, int *buttons)
-{
- struct vortex *vortex = gameport->port_data;
- int i;
-
- *buttons = (~readb(vortex->base + VORTEX_LEG) >> 4) & 0xf;
-
- for (i = 0; i < 4; i++) {
- axes[i] = readw(vortex->io + VORTEX_AXD + i * sizeof(u32));
- if (axes[i] == 0x1fff) axes[i] = -1;
- }
-
- return 0;
-}
-
-static int vortex_open(struct gameport *gameport, int mode)
-{
- struct vortex *vortex = gameport->port_data;
-
- switch (mode) {
- case GAMEPORT_MODE_COOKED:
- writeb(0x40, vortex->io + VORTEX_GCR);
- msleep(VORTEX_DATA_WAIT);
- return 0;
- case GAMEPORT_MODE_RAW:
- writeb(0x00, vortex->io + VORTEX_GCR);
- return 0;
- default:
- return -1;
- }
-
- return 0;
-}
-
-static int __devinit vortex_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct vortex *vortex;
- struct gameport *port;
- int i;
-
- vortex = kcalloc(1, sizeof(struct vortex), GFP_KERNEL);
- port = gameport_allocate_port();
- if (!vortex || !port) {
- printk(KERN_ERR "vortex: Memory allocation failed.\n");
- kfree(vortex);
- gameport_free_port(port);
- return -ENOMEM;
- }
-
- for (i = 0; i < 6; i++)
- if (~pci_resource_flags(dev, i) & IORESOURCE_IO)
- break;
-
- pci_enable_device(dev);
-
- vortex->dev = dev;
- vortex->gameport = port;
- vortex->base = ioremap(pci_resource_start(vortex->dev, i),
- pci_resource_len(vortex->dev, i));
- vortex->io = vortex->base + id->driver_data;
-
- pci_set_drvdata(dev, vortex);
-
- port->port_data = vortex;
- port->fuzz = 64;
-
- gameport_set_name(port, "AU88x0");
- gameport_set_phys(port, "pci%s/gameport0", pci_name(dev));
- port->dev.parent = &dev->dev;
- port->read = vortex_read;
- port->trigger = vortex_trigger;
- port->cooked_read = vortex_cooked_read;
- port->open = vortex_open;
-
- gameport_register_port(port);
-
- return 0;
-}
-
-static void __devexit vortex_remove(struct pci_dev *dev)
-{
- struct vortex *vortex = pci_get_drvdata(dev);
-
- gameport_unregister_port(vortex->gameport);
- iounmap(vortex->base);
- kfree(vortex);
-}
-
-static struct pci_device_id vortex_id_table[] = {
- { 0x12eb, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x11000 },
- { 0x12eb, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x28800 },
- { 0 }
-};
-
-static struct pci_driver vortex_driver = {
- .name = "vortex_gameport",
- .id_table = vortex_id_table,
- .probe = vortex_probe,
- .remove = __devexit_p(vortex_remove),
-};
-
-static int __init vortex_init(void)
-{
- return pci_register_driver(&vortex_driver);
-}
-
-static void __exit vortex_exit(void)
-{
- pci_unregister_driver(&vortex_driver);
-}
-
-module_init(vortex_init);
-module_exit(vortex_exit);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 83c77c990dd..7c4b4d37b3e 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -219,10 +219,24 @@ void input_release_device(struct input_handle *handle)
int input_open_device(struct input_handle *handle)
{
+ struct input_dev *dev = handle->dev;
+ int err;
+
+ err = down_interruptible(&dev->sem);
+ if (err)
+ return err;
+
handle->open++;
- if (handle->dev->open)
- return handle->dev->open(handle->dev);
- return 0;
+
+ if (!dev->users++ && dev->open)
+ err = dev->open(dev);
+
+ if (err)
+ handle->open--;
+
+ up(&dev->sem);
+
+ return err;
}
int input_flush_device(struct input_handle* handle, struct file* file)
@@ -235,10 +249,17 @@ int input_flush_device(struct input_handle* handle, struct file* file)
void input_close_device(struct input_handle *handle)
{
+ struct input_dev *dev = handle->dev;
+
input_release_device(handle);
- if (handle->dev->close)
- handle->dev->close(handle->dev);
+
+ down(&dev->sem);
+
+ if (!--dev->users && dev->close)
+ dev->close(dev);
handle->open--;
+
+ up(&dev->sem);
}
static void input_link_handle(struct input_handle *handle)
@@ -415,6 +436,8 @@ void input_register_device(struct input_dev *dev)
set_bit(EV_SYN, dev->evbit);
+ init_MUTEX(&dev->sem);
+
/*
* If delay and period are pre-set by the driver, then autorepeating
* is handled by the driver itself and we don't do it in input.c.
@@ -674,6 +697,8 @@ static int input_handlers_read(char *buf, char **start, off_t pos, int count, in
return (count > cnt) ? cnt : count;
}
+static struct file_operations input_fileops;
+
static int __init input_proc_init(void)
{
struct proc_dir_entry *entry;
@@ -688,6 +713,8 @@ static int __init input_proc_init(void)
return -ENOMEM;
}
entry->owner = THIS_MODULE;
+ input_fileops = *entry->proc_fops;
+ entry->proc_fops = &input_fileops;
entry->proc_fops->poll = input_devices_poll;
entry = create_proc_read_entry("handlers", 0, proc_bus_input_dir, input_handlers_read, NULL);
if (entry == NULL) {
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 39775fc380c..ff8e1bbd0e1 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -285,48 +285,33 @@ static unsigned int joydev_poll(struct file *file, poll_table *wait)
(POLLIN | POLLRDNORM) : 0) | (list->joydev->exist ? 0 : (POLLHUP | POLLERR));
}
-static int joydev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+static int joydev_ioctl_common(struct joydev *joydev, unsigned int cmd, void __user *argp)
{
- struct joydev_list *list = file->private_data;
- struct joydev *joydev = list->joydev;
struct input_dev *dev = joydev->handle.dev;
- void __user *argp = (void __user *)arg;
int i, j;
- if (!joydev->exist) return -ENODEV;
-
switch (cmd) {
case JS_SET_CAL:
return copy_from_user(&joydev->glue.JS_CORR, argp,
- sizeof(struct JS_DATA_TYPE)) ? -EFAULT : 0;
+ sizeof(joydev->glue.JS_CORR)) ? -EFAULT : 0;
case JS_GET_CAL:
return copy_to_user(argp, &joydev->glue.JS_CORR,
- sizeof(struct JS_DATA_TYPE)) ? -EFAULT : 0;
+ sizeof(joydev->glue.JS_CORR)) ? -EFAULT : 0;
case JS_SET_TIMEOUT:
- return get_user(joydev->glue.JS_TIMEOUT, (int __user *) arg);
+ return get_user(joydev->glue.JS_TIMEOUT, (s32 __user *) argp);
case JS_GET_TIMEOUT:
- return put_user(joydev->glue.JS_TIMEOUT, (int __user *) arg);
- case JS_SET_TIMELIMIT:
- return get_user(joydev->glue.JS_TIMELIMIT, (long __user *) arg);
- case JS_GET_TIMELIMIT:
- return put_user(joydev->glue.JS_TIMELIMIT, (long __user *) arg);
- case JS_SET_ALL:
- return copy_from_user(&joydev->glue, argp,
- sizeof(struct JS_DATA_SAVE_TYPE)) ? -EFAULT : 0;
- case JS_GET_ALL:
- return copy_to_user(argp, &joydev->glue,
- sizeof(struct JS_DATA_SAVE_TYPE)) ? -EFAULT : 0;
+ return put_user(joydev->glue.JS_TIMEOUT, (s32 __user *) argp);
case JSIOCGVERSION:
- return put_user(JS_VERSION, (__u32 __user *) arg);
+ return put_user(JS_VERSION, (__u32 __user *) argp);
case JSIOCGAXES:
- return put_user(joydev->nabs, (__u8 __user *) arg);
+ return put_user(joydev->nabs, (__u8 __user *) argp);
case JSIOCGBUTTONS:
- return put_user(joydev->nkey, (__u8 __user *) arg);
+ return put_user(joydev->nkey, (__u8 __user *) argp);
case JSIOCSCORR:
if (copy_from_user(joydev->corr, argp,
- sizeof(struct js_corr) * joydev->nabs))
+ sizeof(joydev->corr[0]) * joydev->nabs))
return -EFAULT;
for (i = 0; i < joydev->nabs; i++) {
j = joydev->abspam[i];
@@ -335,7 +320,7 @@ static int joydev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
return 0;
case JSIOCGCORR:
return copy_to_user(argp, joydev->corr,
- sizeof(struct js_corr) * joydev->nabs) ? -EFAULT : 0;
+ sizeof(joydev->corr[0]) * joydev->nabs) ? -EFAULT : 0;
case JSIOCSAXMAP:
if (copy_from_user(joydev->abspam, argp, sizeof(__u8) * (ABS_MAX + 1)))
return -EFAULT;
@@ -371,6 +356,84 @@ static int joydev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
return -EINVAL;
}
+#ifdef CONFIG_COMPAT
+static long joydev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct joydev_list *list = file->private_data;
+ struct joydev *joydev = list->joydev;
+ void __user *argp = (void __user *)arg;
+ s32 tmp32;
+ struct JS_DATA_SAVE_TYPE_32 ds32;
+ int err;
+
+ if (!joydev->exist) return -ENODEV;
+ switch(cmd) {
+ case JS_SET_TIMELIMIT:
+ err = get_user(tmp32, (s32 __user *) arg);
+ if (err == 0)
+ joydev->glue.JS_TIMELIMIT = tmp32;
+ break;
+ case JS_GET_TIMELIMIT:
+ tmp32 = joydev->glue.JS_TIMELIMIT;
+ err = put_user(tmp32, (s32 __user *) arg);
+ break;
+
+ case JS_SET_ALL:
+ err = copy_from_user(&ds32, argp,
+ sizeof(ds32)) ? -EFAULT : 0;
+ if (err == 0) {
+ joydev->glue.JS_TIMEOUT = ds32.JS_TIMEOUT;
+ joydev->glue.BUSY = ds32.BUSY;
+ joydev->glue.JS_EXPIRETIME = ds32.JS_EXPIRETIME;
+ joydev->glue.JS_TIMELIMIT = ds32.JS_TIMELIMIT;
+ joydev->glue.JS_SAVE = ds32.JS_SAVE;
+ joydev->glue.JS_CORR = ds32.JS_CORR;
+ }
+ break;
+
+ case JS_GET_ALL:
+ ds32.JS_TIMEOUT = joydev->glue.JS_TIMEOUT;
+ ds32.BUSY = joydev->glue.BUSY;
+ ds32.JS_EXPIRETIME = joydev->glue.JS_EXPIRETIME;
+ ds32.JS_TIMELIMIT = joydev->glue.JS_TIMELIMIT;
+ ds32.JS_SAVE = joydev->glue.JS_SAVE;
+ ds32.JS_CORR = joydev->glue.JS_CORR;
+
+ err = copy_to_user(argp, &ds32,
+ sizeof(ds32)) ? -EFAULT : 0;
+ break;
+
+ default:
+ err = joydev_ioctl_common(joydev, cmd, argp);
+ }
+ return err;
+}
+#endif /* CONFIG_COMPAT */
+
+static int joydev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct joydev_list *list = file->private_data;
+ struct joydev *joydev = list->joydev;
+ void __user *argp = (void __user *)arg;
+
+ if (!joydev->exist) return -ENODEV;
+
+ switch(cmd) {
+ case JS_SET_TIMELIMIT:
+ return get_user(joydev->glue.JS_TIMELIMIT, (long __user *) arg);
+ case JS_GET_TIMELIMIT:
+ return put_user(joydev->glue.JS_TIMELIMIT, (long __user *) arg);
+ case JS_SET_ALL:
+ return copy_from_user(&joydev->glue, argp,
+ sizeof(joydev->glue)) ? -EFAULT : 0;
+ case JS_GET_ALL:
+ return copy_to_user(argp, &joydev->glue,
+ sizeof(joydev->glue)) ? -EFAULT : 0;
+ default:
+ return joydev_ioctl_common(joydev, cmd, argp);
+ }
+}
+
static struct file_operations joydev_fops = {
.owner = THIS_MODULE,
.read = joydev_read,
@@ -379,6 +442,9 @@ static struct file_operations joydev_fops = {
.open = joydev_open,
.release = joydev_release,
.ioctl = joydev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = joydev_compat_ioctl,
+#endif
.fasync = joydev_fasync,
};
diff --git a/drivers/input/joystick/a3d.c b/drivers/input/joystick/a3d.c
index ad39fe4bf35..bf34f75b946 100644
--- a/drivers/input/joystick/a3d.c
+++ b/drivers/input/joystick/a3d.c
@@ -185,7 +185,7 @@ static void a3d_poll(struct gameport *gameport)
a3d->reads++;
if (a3d_read_packet(a3d->gameport, a3d->length, data) != a3d->length ||
data[0] != a3d->mode || a3d_csum(data, a3d->length))
- a3d->bads++;
+ a3d->bads++;
else
a3d_read(a3d, data);
}
diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c
index 83f6dafc171..265962956c6 100644
--- a/drivers/input/joystick/adi.c
+++ b/drivers/input/joystick/adi.c
@@ -82,7 +82,7 @@ static char adi_cm2_abs[] = { ABS_X, ABS_Y, ABS_Z, ABS_RX, ABS_RY, ABS_RZ };
static char adi_wmf_abs[] = { ABS_WHEEL, ABS_GAS, ABS_BRAKE, ABS_HAT0X, ABS_HAT0Y, ABS_HAT1X, ABS_HAT1Y, ABS_HAT2X, ABS_HAT2Y };
static short adi_wmgpe_key[] = { BTN_A, BTN_B, BTN_C, BTN_X, BTN_Y, BTN_Z, BTN_TL, BTN_TR, BTN_START, BTN_MODE, BTN_SELECT };
-static short adi_wmi_key[] = { BTN_TRIGGER, BTN_TOP, BTN_THUMB, BTN_TOP2, BTN_BASE, BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_EXTRA };
+static short adi_wmi_key[] = { BTN_TRIGGER, BTN_TOP, BTN_THUMB, BTN_TOP2, BTN_BASE, BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_EXTRA };
static short adi_wmed3d_key[] = { BTN_TRIGGER, BTN_THUMB, BTN_THUMB2, BTN_TOP, BTN_TOP2, BTN_BASE, BTN_BASE2 };
static short adi_cm2_key[] = { BTN_1, BTN_2, BTN_3, BTN_4, BTN_5, BTN_6, BTN_7, BTN_8 };
@@ -183,7 +183,7 @@ static void adi_move_bits(struct adi_port *port, int length)
int i;
struct adi *adi = port->adi;
- adi[0].idx = adi[1].idx = 0;
+ adi[0].idx = adi[1].idx = 0;
if (adi[0].ret <= 0 || adi[1].ret <= 0) return;
if (adi[0].data[0] & 0x20 || ~adi[1].data[0] & 0x20) return;
diff --git a/drivers/input/joystick/amijoy.c b/drivers/input/joystick/amijoy.c
index cf36ca9b92f..033456bb9fe 100644
--- a/drivers/input/joystick/amijoy.c
+++ b/drivers/input/joystick/amijoy.c
@@ -51,7 +51,8 @@ MODULE_PARM_DESC(map, "Map of attached joysticks in form of <a>,<b> (default is
__obsolete_setup("amijoy=");
-static int amijoy_used[2] = { 0, 0 };
+static int amijoy_used;
+static DECLARE_MUTEX(amijoy_sem);
static struct input_dev amijoy_dev[2];
static char *amijoy_phys[2] = { "amijoy/input0", "amijoy/input1" };
@@ -84,26 +85,30 @@ static irqreturn_t amijoy_interrupt(int irq, void *dummy, struct pt_regs *fp)
static int amijoy_open(struct input_dev *dev)
{
- int *used = dev->private;
+ int err;
- if ((*used)++)
- return 0;
+ err = down_interruptible(&amijoy_sem);
+ if (err)
+ return err;
- if (request_irq(IRQ_AMIGA_VERTB, amijoy_interrupt, 0, "amijoy", amijoy_interrupt)) {
- (*used)--;
+ if (!amijoy_used && request_irq(IRQ_AMIGA_VERTB, amijoy_interrupt, 0, "amijoy", amijoy_interrupt)) {
printk(KERN_ERR "amijoy.c: Can't allocate irq %d\n", IRQ_AMIGA_VERTB);
- return -EBUSY;
+ err = -EBUSY;
+ goto out;
}
- return 0;
+ amijoy_used++;
+out:
+ up(&amijoy_sem);
+ return err;
}
static void amijoy_close(struct input_dev *dev)
{
- int *used = dev->private;
-
- if (!--(*used))
+ down(&amijoysem);
+ if (!--amijoy_used)
free_irq(IRQ_AMIGA_VERTB, amijoy_interrupt);
+ up(&amijoy_sem);
}
static int __init amijoy_init(void)
@@ -138,8 +143,6 @@ static int __init amijoy_init(void)
amijoy_dev[i].id.product = 0x0003;
amijoy_dev[i].id.version = 0x0100;
- amijoy_dev[i].private = amijoy_used + i;
-
input_register_device(amijoy_dev + i);
printk(KERN_INFO "input: %s at joy%ddat\n", amijoy_name, i);
}
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 504b7d55056..c3a5739030c 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -140,12 +140,14 @@ struct analog_port {
*/
#ifdef __i386__
+
+#include <asm/i8253.h>
+
#define GET_TIME(x) do { if (cpu_has_tsc) rdtscl(x); else x = get_time_pit(); } while (0)
#define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? CLOCK_TICK_RATE / HZ : 0)))
#define TIME_NAME (cpu_has_tsc?"TSC":"PIT")
static unsigned int get_time_pit(void)
{
- extern spinlock_t i8253_lock;
unsigned long flags;
unsigned int count;
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index cfdd3acf06a..fbd3eed07f9 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -87,7 +87,7 @@ __obsolete_setup("db9_3=");
#define DB9_NORMAL 0x0a
#define DB9_NOSELECT 0x08
-#define DB9_MAX_DEVICES 2
+#define DB9_MAX_DEVICES 2
#define DB9_GENESIS6_DELAY 14
#define DB9_REFRESH_TIME HZ/100
@@ -98,6 +98,7 @@ struct db9 {
struct pardevice *pd;
int mode;
int used;
+ struct semaphore sem;
char phys[2][32];
};
@@ -503,6 +504,11 @@ static int db9_open(struct input_dev *dev)
{
struct db9 *db9 = dev->private;
struct parport *port = db9->pd->port;
+ int err;
+
+ err = down_interruptible(&db9->sem);
+ if (err)
+ return err;
if (!db9->used++) {
parport_claim(db9->pd);
@@ -514,6 +520,7 @@ static int db9_open(struct input_dev *dev)
mod_timer(&db9->timer, jiffies + DB9_REFRESH_TIME);
}
+ up(&db9->sem);
return 0;
}
@@ -522,12 +529,14 @@ static void db9_close(struct input_dev *dev)
struct db9 *db9 = dev->private;
struct parport *port = db9->pd->port;
+ down(&db9->sem);
if (!--db9->used) {
- del_timer(&db9->timer);
+ del_timer_sync(&db9->timer);
parport_write_control(port, 0x00);
parport_data_forward(port);
parport_release(db9->pd);
}
+ up(&db9->sem);
}
static struct db9 __init *db9_probe(int *config, int nargs)
@@ -563,12 +572,12 @@ static struct db9 __init *db9_probe(int *config, int nargs)
}
}
- if (!(db9 = kmalloc(sizeof(struct db9), GFP_KERNEL))) {
+ if (!(db9 = kcalloc(1, sizeof(struct db9), GFP_KERNEL))) {
parport_put_port(pp);
return NULL;
}
- memset(db9, 0, sizeof(struct db9));
+ init_MUTEX(&db9->sem);
db9->mode = config[1];
init_timer(&db9->timer);
db9->timer.data = (long) db9;
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index 8732f52bdd0..95bbdd302aa 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -1,12 +1,12 @@
/*
* NES, SNES, N64, MultiSystem, PSX gamepad driver for Linux
*
- * Copyright (c) 1999-2004 Vojtech Pavlik <vojtech@suse.cz>
- * Copyright (c) 2004 Peter Nelson <rufus-kernel@hackish.org>
+ * Copyright (c) 1999-2004 Vojtech Pavlik <vojtech@suse.cz>
+ * Copyright (c) 2004 Peter Nelson <rufus-kernel@hackish.org>
*
* Based on the work of:
- * Andree Borrmann John Dahlstrom
- * David Kuder Nathan Hand
+ * Andree Borrmann John Dahlstrom
+ * David Kuder Nathan Hand
*/
/*
@@ -81,6 +81,7 @@ struct gc {
struct timer_list timer;
unsigned char pads[GC_MAX + 1];
int used;
+ struct semaphore sem;
char phys[5][32];
};
@@ -433,7 +434,7 @@ static void gc_timer(unsigned long private)
gc_psx_read_packet(gc, data_psx, data);
for (i = 0; i < 5; i++) {
- switch (data[i]) {
+ switch (data[i]) {
case GC_PSX_RUMBLE:
@@ -503,22 +504,33 @@ static void gc_timer(unsigned long private)
static int gc_open(struct input_dev *dev)
{
struct gc *gc = dev->private;
+ int err;
+
+ err = down_interruptible(&gc->sem);
+ if (err)
+ return err;
+
if (!gc->used++) {
parport_claim(gc->pd);
parport_write_control(gc->pd->port, 0x04);
mod_timer(&gc->timer, jiffies + GC_REFRESH_TIME);
}
+
+ up(&gc->sem);
return 0;
}
static void gc_close(struct input_dev *dev)
{
struct gc *gc = dev->private;
+
+ down(&gc->sem);
if (!--gc->used) {
- del_timer(&gc->timer);
+ del_timer_sync(&gc->timer);
parport_write_control(gc->pd->port, 0x00);
parport_release(gc->pd);
}
+ up(&gc->sem);
}
static struct gc __init *gc_probe(int *config, int nargs)
@@ -542,11 +554,12 @@ static struct gc __init *gc_probe(int *config, int nargs)
return NULL;
}
- if (!(gc = kmalloc(sizeof(struct gc), GFP_KERNEL))) {
+ if (!(gc = kcalloc(1, sizeof(struct gc), GFP_KERNEL))) {
parport_put_port(pp);
return NULL;
}
- memset(gc, 0, sizeof(struct gc));
+
+ init_MUTEX(&gc->sem);
gc->pd = parport_register_device(pp, "gamecon", NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL);
diff --git a/drivers/input/joystick/gf2k.c b/drivers/input/joystick/gf2k.c
index ad13f09a4e7..7d969420066 100644
--- a/drivers/input/joystick/gf2k.c
+++ b/drivers/input/joystick/gf2k.c
@@ -329,7 +329,7 @@ static int gf2k_connect(struct gameport *gameport, struct gameport_driver *drv)
for (i = 0; i < gf2k_axes[gf2k->id]; i++) {
gf2k->dev.absmax[gf2k_abs[i]] = (i < 2) ? gf2k->dev.abs[gf2k_abs[i]] * 2 - 32 :
- gf2k->dev.abs[gf2k_abs[0]] + gf2k->dev.abs[gf2k_abs[1]] - 32;
+ gf2k->dev.abs[gf2k_abs[0]] + gf2k->dev.abs[gf2k_abs[1]] - 32;
gf2k->dev.absmin[gf2k_abs[i]] = 32;
gf2k->dev.absfuzz[gf2k_abs[i]] = 8;
gf2k->dev.absflat[gf2k_abs[i]] = (i < 2) ? 24 : 0;
diff --git a/drivers/input/joystick/grip_mp.c b/drivers/input/joystick/grip_mp.c
index 42e5005d621..0da7bd133cc 100644
--- a/drivers/input/joystick/grip_mp.c
+++ b/drivers/input/joystick/grip_mp.c
@@ -171,7 +171,7 @@ static int mp_io(struct gameport* gameport, int sendflags, int sendcode, u32 *pa
*packet = 0;
raw_data = gameport_read(gameport);
if (raw_data & 1)
- return IO_RETRY;
+ return IO_RETRY;
for (i = 0; i < 64; i++) {
raw_data = gameport_read(gameport);
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index 028f3513629..e31b7b93fde 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -78,6 +78,7 @@ static struct iforce_device iforce_device[] = {
{ 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x06f8, 0x0004, "Guillemot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //?
+ { 0x06f8, 0x0004, "Gullemot Jet Leader 3D", btn_joystick, abs_joystick, ff_iforce }, //?
{ 0x0000, 0x0000, "Unknown I-Force Device [%04x:%04x]", btn_joystick, abs_joystick, ff_iforce }
};
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index 617c0b0e5a3..6369a24684f 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -229,6 +229,7 @@ static struct usb_device_id iforce_usb_ids [] = {
{ USB_DEVICE(0x061c, 0xc0a4) }, /* ACT LABS Force RS */
{ USB_DEVICE(0x06f8, 0x0001) }, /* Guillemot Race Leader Force Feedback */
{ USB_DEVICE(0x06f8, 0x0004) }, /* Guillemot Force Feedback Racing Wheel */
+ { USB_DEVICE(0x06f8, 0xa302) }, /* Guillemot Jet Leader 3D */
{ } /* Terminating entry */
};
diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
index ec0a2a64d49..a436f222085 100644
--- a/drivers/input/joystick/spaceball.c
+++ b/drivers/input/joystick/spaceball.c
@@ -4,8 +4,8 @@
* Copyright (c) 1999-2001 Vojtech Pavlik
*
* Based on the work of:
- * David Thompson
- * Joseph Krahn
+ * David Thompson
+ * Joseph Krahn
*/
/*
diff --git a/drivers/input/joystick/spaceorb.c b/drivers/input/joystick/spaceorb.c
index 874367bfab0..01fd2e4791a 100644
--- a/drivers/input/joystick/spaceorb.c
+++ b/drivers/input/joystick/spaceorb.c
@@ -4,7 +4,7 @@
* Copyright (c) 1999-2001 Vojtech Pavlik
*
* Based on the work of:
- * David Thompson
+ * David Thompson
*/
/*
diff --git a/drivers/input/joystick/tmdc.c b/drivers/input/joystick/tmdc.c
index aaee52ceb92..9eb9954cac6 100644
--- a/drivers/input/joystick/tmdc.c
+++ b/drivers/input/joystick/tmdc.c
@@ -79,7 +79,7 @@ static short tmdc_btn_pad[TMDC_BTN] =
{ BTN_A, BTN_B, BTN_C, BTN_X, BTN_Y, BTN_Z, BTN_START, BTN_SELECT, BTN_TL, BTN_TR };
static short tmdc_btn_joy[TMDC_BTN] =
{ BTN_TRIGGER, BTN_THUMB, BTN_TOP, BTN_TOP2, BTN_BASE, BTN_BASE2, BTN_THUMB2, BTN_PINKIE,
- BTN_BASE3, BTN_BASE4, BTN_A, BTN_B, BTN_C, BTN_X, BTN_Y, BTN_Z };
+ BTN_BASE3, BTN_BASE4, BTN_A, BTN_B, BTN_C, BTN_X, BTN_Y, BTN_Z };
static short tmdc_btn_fm[TMDC_BTN] =
{ BTN_TRIGGER, BTN_C, BTN_B, BTN_A, BTN_THUMB, BTN_X, BTN_Y, BTN_Z, BTN_TOP, BTN_TOP2 };
static short tmdc_btn_at[TMDC_BTN] =
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index dd88b9cb49f..28100d461cb 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -84,6 +84,7 @@ static struct tgfx {
char phys[7][32];
int sticks;
int used;
+ struct semaphore sem;
} *tgfx_base[3];
/*
@@ -99,7 +100,7 @@ static void tgfx_timer(unsigned long private)
for (i = 0; i < 7; i++)
if (tgfx->sticks & (1 << i)) {
- dev = tgfx->dev + i;
+ dev = tgfx->dev + i;
parport_write_data(tgfx->pd->port, ~(1 << i));
data1 = parport_read_status(tgfx->pd->port) ^ 0x7f;
@@ -122,23 +123,34 @@ static void tgfx_timer(unsigned long private)
static int tgfx_open(struct input_dev *dev)
{
- struct tgfx *tgfx = dev->private;
- if (!tgfx->used++) {
+ struct tgfx *tgfx = dev->private;
+ int err;
+
+ err = down_interruptible(&tgfx->sem);
+ if (err)
+ return err;
+
+ if (!tgfx->used++) {
parport_claim(tgfx->pd);
parport_write_control(tgfx->pd->port, 0x04);
- mod_timer(&tgfx->timer, jiffies + TGFX_REFRESH_TIME);
+ mod_timer(&tgfx->timer, jiffies + TGFX_REFRESH_TIME);
}
- return 0;
+
+ up(&tgfx->sem);
+ return 0;
}
static void tgfx_close(struct input_dev *dev)
{
- struct tgfx *tgfx = dev->private;
- if (!--tgfx->used) {
- del_timer(&tgfx->timer);
+ struct tgfx *tgfx = dev->private;
+
+ down(&tgfx->sem);
+ if (!--tgfx->used) {
+ del_timer_sync(&tgfx->timer);
parport_write_control(tgfx->pd->port, 0x00);
- parport_release(tgfx->pd);
+ parport_release(tgfx->pd);
}
+ up(&tgfx->sem);
}
/*
@@ -166,11 +178,12 @@ static struct tgfx __init *tgfx_probe(int *config, int nargs)
return NULL;
}
- if (!(tgfx = kmalloc(sizeof(struct tgfx), GFP_KERNEL))) {
+ if (!(tgfx = kcalloc(1, sizeof(struct tgfx), GFP_KERNEL))) {
parport_put_port(pp);
return NULL;
}
- memset(tgfx, 0, sizeof(struct tgfx));
+
+ init_MUTEX(&tgfx->sem);
tgfx->pd = parport_register_device(pp, "turbografx", NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL);
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 82fad9a23ac..4d4985b59ab 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -227,7 +227,7 @@ static ssize_t atkbd_do_set_##_name(struct device *d, struct device_attribute *a
{ \
return atkbd_attr_set_helper(d, b, s, atkbd_set_##_name); \
} \
-static struct device_attribute atkbd_attr_##_name = \
+static struct device_attribute atkbd_attr_##_name = \
__ATTR(_name, S_IWUSR | S_IRUGO, atkbd_do_show_##_name, atkbd_do_set_##_name);
ATKBD_DEFINE_ATTR(extra);
@@ -388,7 +388,7 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
value = atkbd->release ? 0 :
(1 + (!atkbd->softrepeat && test_bit(atkbd->keycode[code], atkbd->dev.key)));
- switch (value) { /* Workaround Toshiba laptop multiple keypress */
+ switch (value) { /* Workaround Toshiba laptop multiple keypress */
case 0:
atkbd->last = 0;
break;
@@ -894,7 +894,7 @@ static int atkbd_reconnect(struct serio *serio)
if (atkbd->write) {
param[0] = (test_bit(LED_SCROLLL, atkbd->dev.led) ? 1 : 0)
| (test_bit(LED_NUML, atkbd->dev.led) ? 2 : 0)
- | (test_bit(LED_CAPSL, atkbd->dev.led) ? 4 : 0);
+ | (test_bit(LED_CAPSL, atkbd->dev.led) ? 4 : 0);
if (atkbd_probe(atkbd))
return -1;
diff --git a/drivers/input/keyboard/corgikbd.c b/drivers/input/keyboard/corgikbd.c
index 0f1220a0ceb..a8551711e8d 100644
--- a/drivers/input/keyboard/corgikbd.c
+++ b/drivers/input/keyboard/corgikbd.c
@@ -39,6 +39,7 @@
#define CORGI_KEY_CALENDER KEY_F1
#define CORGI_KEY_ADDRESS KEY_F2
#define CORGI_KEY_FN KEY_F3
+#define CORGI_KEY_CANCEL KEY_F4
#define CORGI_KEY_OFF KEY_SUSPEND
#define CORGI_KEY_EXOK KEY_F5
#define CORGI_KEY_EXCANCEL KEY_F6
@@ -46,6 +47,7 @@
#define CORGI_KEY_EXJOGUP KEY_F8
#define CORGI_KEY_JAP1 KEY_LEFTCTRL
#define CORGI_KEY_JAP2 KEY_LEFTALT
+#define CORGI_KEY_MAIL KEY_F10
#define CORGI_KEY_OK KEY_F11
#define CORGI_KEY_MENU KEY_F12
#define CORGI_HINGE_0 KEY_KP0
@@ -59,8 +61,8 @@ static unsigned char corgikbd_keycode[NR_SCANCODES] = {
KEY_TAB, KEY_Q, KEY_E, KEY_T, KEY_G, KEY_U, KEY_J, KEY_K, 0, 0, 0, 0, 0, 0, 0, 0, /* 33-48 */
CORGI_KEY_CALENDER, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0, /* 49-64 */
CORGI_KEY_ADDRESS, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, 0, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, /* 65-80 */
- KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, 0, CORGI_KEY_FN, 0, 0, 0, 0, /* 81-96 */
- KEY_SYSRQ, CORGI_KEY_JAP1, CORGI_KEY_JAP2, KEY_CANCEL, CORGI_KEY_OK, CORGI_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0, /* 97-112 */
+ CORGI_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, 0, CORGI_KEY_FN, 0, 0, 0, 0, /* 81-96 */
+ KEY_SYSRQ, CORGI_KEY_JAP1, CORGI_KEY_JAP2, CORGI_KEY_CANCEL, CORGI_KEY_OK, CORGI_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0, /* 97-112 */
CORGI_KEY_OFF, CORGI_KEY_EXOK, CORGI_KEY_EXCANCEL, CORGI_KEY_EXJOGDOWN, CORGI_KEY_EXJOGUP, 0, 0, 0, 0, 0, 0, 0, /* 113-124 */
CORGI_HINGE_0, CORGI_HINGE_1, CORGI_HINGE_2 /* 125-127 */
};
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c
index 2694ff2b5be..098963c7cdd 100644
--- a/drivers/input/keyboard/lkkbd.c
+++ b/drivers/input/keyboard/lkkbd.c
@@ -15,10 +15,10 @@
* information given below, I will _not_ be liable!
*
* RJ10 pinout: To DE9: Or DB25:
- * 1 - RxD <----> Pin 3 (TxD) <-> Pin 2 (TxD)
- * 2 - GND <----> Pin 5 (GND) <-> Pin 7 (GND)
- * 4 - TxD <----> Pin 2 (RxD) <-> Pin 3 (RxD)
- * 3 - +12V (from HDD drive connector), DON'T connect to DE9 or DB25!!!
+ * 1 - RxD <----> Pin 3 (TxD) <-> Pin 2 (TxD)
+ * 2 - GND <----> Pin 5 (GND) <-> Pin 7 (GND)
+ * 4 - TxD <----> Pin 2 (RxD) <-> Pin 3 (RxD)
+ * 3 - +12V (from HDD drive connector), DON'T connect to DE9 or DB25!!!
*
* Pin numbers for DE9 and DB25 are noted on the plug (quite small:). For
* RJ10, it's like this:
diff --git a/drivers/input/keyboard/locomokbd.c b/drivers/input/keyboard/locomokbd.c
index d3e9dd6a13c..8935290256b 100644
--- a/drivers/input/keyboard/locomokbd.c
+++ b/drivers/input/keyboard/locomokbd.c
@@ -42,7 +42,7 @@ MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>");
MODULE_DESCRIPTION("LoCoMo keyboard driver");
MODULE_LICENSE("GPL");
-#define LOCOMOKBD_NUMKEYS 128
+#define LOCOMOKBD_NUMKEYS 128
#define KEY_ACTIVITY KEY_F16
#define KEY_CONTACT KEY_F18
@@ -61,7 +61,7 @@ static unsigned char locomokbd_keycode[LOCOMOKBD_NUMKEYS] = {
KEY_G, KEY_F, KEY_X, KEY_S, 0, 0, 0, 0, 0, 0, /* 90 - 99 */
0, 0, KEY_DOT, 0, KEY_COMMA, KEY_N, KEY_B, KEY_C, KEY_Z, KEY_A, /* 100 - 109 */
KEY_LEFTSHIFT, KEY_TAB, KEY_LEFTCTRL, 0, 0, 0, 0, 0, 0, 0, /* 110 - 119 */
- KEY_M, KEY_SPACE, KEY_V, KEY_APOSTROPHE, KEY_SLASH, 0, 0, 0 /* 120 - 128 */
+ KEY_M, KEY_SPACE, KEY_V, KEY_APOSTROPHE, KEY_SLASH, 0, 0, 0 /* 120 - 128 */
};
#define KB_ROWS 16
@@ -82,7 +82,7 @@ struct locomokbd {
struct locomo_dev *ldev;
unsigned long base;
spinlock_t lock;
-
+
struct timer_list timer;
};
@@ -95,7 +95,7 @@ static inline void locomokbd_charge_all(unsigned long membase)
static inline void locomokbd_activate_all(unsigned long membase)
{
unsigned long r;
-
+
locomo_writel(0, membase + LOCOMO_KSC);
r = locomo_readl(membase + LOCOMO_KIC);
r &= 0xFEFF;
@@ -127,7 +127,7 @@ static inline void locomokbd_reset_col(unsigned long membase, int col)
*/
/* Scan the hardware keyboard and push any changes up through the input layer */
-static void locomokbd_scankeyboard(struct locomokbd *locomokbd, struct pt_regs *regs)
+static void locomokbd_scankeyboard(struct locomokbd *locomokbd, struct pt_regs *regs)
{
unsigned int row, col, rowd, scancode;
unsigned long flags;
@@ -138,7 +138,7 @@ static void locomokbd_scankeyboard(struct locomokbd *locomokbd, struct pt_regs *
if (regs)
input_regs(&locomokbd->input, regs);
-
+
locomokbd_charge_all(membase);
num_pressed = 0;
@@ -146,9 +146,9 @@ static void locomokbd_scankeyboard(struct locomokbd *locomokbd, struct pt_regs *
locomokbd_activate_col(membase, col);
udelay(KB_DELAY);
-
+
rowd = ~locomo_readl(membase + LOCOMO_KIB);
- for (row = 0; row < KB_ROWS; row++ ) {
+ for (row = 0; row < KB_ROWS; row++) {
scancode = SCANCODE(col, row);
if (rowd & KB_ROWMASK(row)) {
num_pressed += 1;
@@ -170,7 +170,7 @@ static void locomokbd_scankeyboard(struct locomokbd *locomokbd, struct pt_regs *
spin_unlock_irqrestore(&locomokbd->lock, flags);
}
-/*
+/*
* LoCoMo keyboard interrupt handler.
*/
static irqreturn_t locomokbd_interrupt(int irq, void *dev_id, struct pt_regs *regs)
@@ -205,8 +205,8 @@ static int locomokbd_probe(struct locomo_dev *dev)
memset(locomokbd, 0, sizeof(struct locomokbd));
/* try and claim memory region */
- if (!request_mem_region((unsigned long) dev->mapbase,
- dev->length,
+ if (!request_mem_region((unsigned long) dev->mapbase,
+ dev->length,
LOCOMO_DRIVER_NAME(dev))) {
ret = -EBUSY;
printk(KERN_ERR "locomokbd: Can't acquire access to io memory for keyboard\n");
@@ -225,7 +225,7 @@ static int locomokbd_probe(struct locomo_dev *dev)
locomokbd->timer.data = (unsigned long) locomokbd;
locomokbd->input.evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
-
+
init_input_dev(&locomokbd->input);
locomokbd->input.keycode = locomokbd->keycode;
locomokbd->input.keycodesize = sizeof(unsigned char);
@@ -271,11 +271,11 @@ free:
static int locomokbd_remove(struct locomo_dev *dev)
{
struct locomokbd *locomokbd = locomo_get_drvdata(dev);
-
+
free_irq(dev->irq[0], locomokbd);
del_timer_sync(&locomokbd->timer);
-
+
input_unregister_device(&locomokbd->input);
locomo_set_drvdata(dev, NULL);
diff --git a/drivers/input/keyboard/maple_keyb.c b/drivers/input/keyboard/maple_keyb.c
index 859ed771ee0..eecbde294f1 100644
--- a/drivers/input/keyboard/maple_keyb.c
+++ b/drivers/input/keyboard/maple_keyb.c
@@ -1,6 +1,6 @@
/*
* $Id: maple_keyb.c,v 1.4 2004/03/22 01:18:15 lethal Exp $
- * SEGA Dreamcast keyboard driver
+ * SEGA Dreamcast keyboard driver
* Based on drivers/usb/usbkbd.c
*/
@@ -40,7 +40,6 @@ struct dc_kbd {
struct input_dev dev;
unsigned char new[8];
unsigned char old[8];
- int open;
};
@@ -95,22 +94,6 @@ static void dc_kbd_callback(struct mapleq *mq)
}
}
-
-static int dc_kbd_open(struct input_dev *dev)
-{
- struct dc_kbd *kbd = dev->private;
- kbd->open++;
- return 0;
-}
-
-
-static void dc_kbd_close(struct input_dev *dev)
-{
- struct dc_kbd *kbd = dev->private;
- kbd->open--;
-}
-
-
static int dc_kbd_connect(struct maple_device *dev)
{
int i;
@@ -133,9 +116,6 @@ static int dc_kbd_connect(struct maple_device *dev)
clear_bit(0, kbd->dev.keybit);
kbd->dev.private = kbd;
- kbd->dev.open = dc_kbd_open;
- kbd->dev.close = dc_kbd_close;
- kbd->dev.event = NULL;
kbd->dev.name = dev->product_name;
kbd->dev.id.bustype = BUS_MAPLE;
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 158c8e845ff..98710997aaa 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -298,9 +298,11 @@ static int uinput_alloc_device(struct file *file, const char __user *buffer, siz
/* check if absmin/absmax/absfuzz/absflat are filled as
* told in Documentation/input/input-programming.txt */
if (test_bit(EV_ABS, dev->evbit)) {
- retval = uinput_validate_absbits(dev);
- if (retval < 0)
+ int err = uinput_validate_absbits(dev);
+ if (err < 0) {
+ retval = err;
kfree(dev->name);
+ }
}
exit:
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index a7864195806..c4909b49337 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -15,4 +15,4 @@ obj-$(CONFIG_MOUSE_SERIAL) += sermouse.o
obj-$(CONFIG_MOUSE_HIL) += hil_ptr.o
obj-$(CONFIG_MOUSE_VSXXXAA) += vsxxxaa.o
-psmouse-objs := psmouse-base.o alps.o logips2pp.o synaptics.o
+psmouse-objs := psmouse-base.o alps.o logips2pp.o synaptics.o lifebook.o
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 7bf4be733e9..a12e98158a7 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -30,10 +30,11 @@
#define ALPS_DUALPOINT 0x01
#define ALPS_WHEEL 0x02
-#define ALPS_FW_BK 0x04
+#define ALPS_FW_BK_1 0x04
#define ALPS_4BTN 0x08
#define ALPS_OLDPROTO 0x10
#define ALPS_PASS 0x20
+#define ALPS_FW_BK_2 0x40
static struct alps_model_info alps_model_data[] = {
{ { 0x33, 0x02, 0x0a }, 0x88, 0xf8, ALPS_OLDPROTO }, /* UMAX-530T */
@@ -43,11 +44,11 @@ static struct alps_model_info alps_model_data[] = {
{ { 0x63, 0x02, 0x14 }, 0xf8, 0xf8, 0 },
{ { 0x63, 0x02, 0x28 }, 0xf8, 0xf8, 0 },
{ { 0x63, 0x02, 0x3c }, 0x8f, 0x8f, ALPS_WHEEL }, /* Toshiba Satellite S2400-103 */
- { { 0x63, 0x02, 0x50 }, 0xef, 0xef, ALPS_FW_BK }, /* NEC Versa L320 */
+ { { 0x63, 0x02, 0x50 }, 0xef, 0xef, ALPS_FW_BK_1 }, /* NEC Versa L320 */
{ { 0x63, 0x02, 0x64 }, 0xf8, 0xf8, 0 },
{ { 0x63, 0x03, 0xc8 }, 0xf8, 0xf8, ALPS_PASS }, /* Dell Latitude D800 */
{ { 0x73, 0x02, 0x0a }, 0xf8, 0xf8, 0 },
- { { 0x73, 0x02, 0x14 }, 0xf8, 0xf8, 0 },
+ { { 0x73, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Ahtec Laptop */
{ { 0x20, 0x02, 0x0e }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */
{ { 0x22, 0x02, 0x0a }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
{ { 0x22, 0x02, 0x14 }, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */
@@ -61,11 +62,11 @@ static struct alps_model_info alps_model_data[] = {
/*
* ALPS abolute Mode - new format
- *
- * byte 0: 1 ? ? ? 1 ? ? ?
+ *
+ * byte 0: 1 ? ? ? 1 ? ? ?
* byte 1: 0 x6 x5 x4 x3 x2 x1 x0
* byte 2: 0 x10 x9 x8 x7 ? fin ges
- * byte 3: 0 y9 y8 y7 1 M R L
+ * byte 3: 0 y9 y8 y7 1 M R L
* byte 4: 0 y6 y5 y4 y3 y2 y1 y0
* byte 5: 0 z6 z5 z4 z3 z2 z1 z0
*
@@ -81,11 +82,12 @@ static void alps_process_packet(struct psmouse *psmouse, struct pt_regs *regs)
struct input_dev *dev = &psmouse->dev;
struct input_dev *dev2 = &priv->dev2;
int x, y, z, ges, fin, left, right, middle;
+ int back = 0, forward = 0;
input_regs(dev, regs);
if ((packet[0] & 0xc8) == 0x08) { /* 3-byte PS/2 packet */
- input_report_key(dev2, BTN_LEFT, packet[0] & 1);
+ input_report_key(dev2, BTN_LEFT, packet[0] & 1);
input_report_key(dev2, BTN_RIGHT, packet[0] & 2);
input_report_key(dev2, BTN_MIDDLE, packet[0] & 4);
input_report_rel(dev2, REL_X,
@@ -112,6 +114,18 @@ static void alps_process_packet(struct psmouse *psmouse, struct pt_regs *regs)
z = packet[5];
}
+ if (priv->i->flags & ALPS_FW_BK_1) {
+ back = packet[2] & 4;
+ forward = packet[0] & 0x10;
+ }
+
+ if (priv->i->flags & ALPS_FW_BK_2) {
+ back = packet[3] & 4;
+ forward = packet[2] & 4;
+ if ((middle = forward && back))
+ forward = back = 0;
+ }
+
ges = packet[2] & 1;
fin = packet[2] & 2;
@@ -155,13 +169,12 @@ static void alps_process_packet(struct psmouse *psmouse, struct pt_regs *regs)
input_report_abs(dev, ABS_PRESSURE, z);
input_report_key(dev, BTN_TOOL_FINGER, z > 0);
-
if (priv->i->flags & ALPS_WHEEL)
input_report_rel(dev, REL_WHEEL, ((packet[0] >> 4) & 0x07) | ((packet[2] >> 2) & 0x08));
- if (priv->i->flags & ALPS_FW_BK) {
- input_report_key(dev, BTN_FORWARD, packet[0] & 0x10);
- input_report_key(dev, BTN_BACK, packet[2] & 0x04);
+ if (priv->i->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) {
+ input_report_key(dev, BTN_FORWARD, forward);
+ input_report_key(dev, BTN_BACK, back);
}
input_sync(dev);
@@ -257,7 +270,6 @@ static struct alps_model_info *alps_get_model(struct psmouse *psmouse, int *vers
static int alps_passthrough_mode(struct psmouse *psmouse, int enable)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
- unsigned char param[3];
int cmd = enable ? PSMOUSE_CMD_SETSCALE21 : PSMOUSE_CMD_SETSCALE11;
if (ps2_command(ps2dev, NULL, cmd) ||
@@ -267,7 +279,7 @@ static int alps_passthrough_mode(struct psmouse *psmouse, int enable)
return -1;
/* we may get 3 more bytes, just ignore them */
- ps2_command(ps2dev, param, 0x0300);
+ ps2_drain(ps2dev, 3, 100);
return 0;
}
@@ -425,7 +437,7 @@ int alps_init(struct psmouse *psmouse)
psmouse->dev.relbit[LONG(REL_WHEEL)] |= BIT(REL_WHEEL);
}
- if (priv->i->flags & ALPS_FW_BK) {
+ if (priv->i->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) {
psmouse->dev.keybit[LONG(BTN_FORWARD)] |= BIT(BTN_FORWARD);
psmouse->dev.keybit[LONG(BTN_BACK)] |= BIT(BTN_BACK);
}
@@ -436,8 +448,8 @@ int alps_init(struct psmouse *psmouse)
priv->dev2.id.bustype = BUS_I8042;
priv->dev2.id.vendor = 0x0002;
priv->dev2.id.product = PSMOUSE_ALPS;
- priv->dev2.id.version = 0x0000;
-
+ priv->dev2.id.version = 0x0000;
+
priv->dev2.evbit[0] = BIT(EV_KEY) | BIT(EV_REL);
priv->dev2.relbit[LONG(REL_X)] |= BIT(REL_X) | BIT(REL_Y);
priv->dev2.keybit[LONG(BTN_LEFT)] |= BIT(BTN_LEFT) | BIT(BTN_MIDDLE) | BIT(BTN_RIGHT);
@@ -461,17 +473,15 @@ init_fail:
int alps_detect(struct psmouse *psmouse, int set_properties)
{
int version;
- struct alps_model_info *model;
+ struct alps_model_info *model;
if (!(model = alps_get_model(psmouse, &version)))
return -1;
if (set_properties) {
psmouse->vendor = "ALPS";
- if (model->flags & ALPS_DUALPOINT)
- psmouse->name = "DualPoint TouchPad";
- else
- psmouse->name = "GlidePoint";
+ psmouse->name = model->flags & ALPS_DUALPOINT ?
+ "DualPoint TouchPad" : "GlidePoint";
psmouse->model = version;
}
return 0;
diff --git a/drivers/input/mouse/amimouse.c b/drivers/input/mouse/amimouse.c
index 7baa09cca7c..e994849efb8 100644
--- a/drivers/input/mouse/amimouse.c
+++ b/drivers/input/mouse/amimouse.c
@@ -33,7 +33,6 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Amiga mouse driver");
MODULE_LICENSE("GPL");
-static int amimouse_used = 0;
static int amimouse_lastx, amimouse_lasty;
static struct input_dev amimouse_dev;
@@ -81,16 +80,12 @@ static int amimouse_open(struct input_dev *dev)
{
unsigned short joy0dat;
- if (amimouse_used++)
- return 0;
-
joy0dat = custom.joy0dat;
amimouse_lastx = joy0dat & 0xff;
amimouse_lasty = joy0dat >> 8;
if (request_irq(IRQ_AMIGA_VERTB, amimouse_interrupt, 0, "amimouse", amimouse_interrupt)) {
- amimouse_used--;
printk(KERN_ERR "amimouse.c: Can't allocate irq %d\n", IRQ_AMIGA_VERTB);
return -EBUSY;
}
@@ -100,8 +95,7 @@ static int amimouse_open(struct input_dev *dev)
static void amimouse_close(struct input_dev *dev)
{
- if (!--amimouse_used)
- free_irq(IRQ_AMIGA_VERTB, amimouse_interrupt);
+ free_irq(IRQ_AMIGA_VERTB, amimouse_interrupt);
}
static int __init amimouse_init(void)
diff --git a/drivers/input/mouse/inport.c b/drivers/input/mouse/inport.c
index ca4e9688662..1f62c013401 100644
--- a/drivers/input/mouse/inport.c
+++ b/drivers/input/mouse/inport.c
@@ -17,18 +17,18 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
+ * the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
+ *
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
@@ -87,29 +87,23 @@ MODULE_PARM_DESC(irq, "IRQ number (5=default)");
__obsolete_setup("inport_irq=");
-static int inport_used;
-
static irqreturn_t inport_interrupt(int irq, void *dev_id, struct pt_regs *regs);
static int inport_open(struct input_dev *dev)
{
- if (!inport_used++) {
- if (request_irq(inport_irq, inport_interrupt, 0, "inport", NULL))
- return -EBUSY;
- outb(INPORT_REG_MODE, INPORT_CONTROL_PORT);
- outb(INPORT_MODE_IRQ | INPORT_MODE_BASE, INPORT_DATA_PORT);
- }
+ if (request_irq(inport_irq, inport_interrupt, 0, "inport", NULL))
+ return -EBUSY;
+ outb(INPORT_REG_MODE, INPORT_CONTROL_PORT);
+ outb(INPORT_MODE_IRQ | INPORT_MODE_BASE, INPORT_DATA_PORT);
return 0;
}
static void inport_close(struct input_dev *dev)
{
- if (!--inport_used) {
- outb(INPORT_REG_MODE, INPORT_CONTROL_PORT);
- outb(INPORT_MODE_BASE, INPORT_DATA_PORT);
- free_irq(inport_irq, NULL);
- }
+ outb(INPORT_REG_MODE, INPORT_CONTROL_PORT);
+ outb(INPORT_MODE_BASE, INPORT_DATA_PORT);
+ free_irq(inport_irq, NULL);
}
static struct input_dev inport_dev = {
@@ -120,11 +114,11 @@ static struct input_dev inport_dev = {
.close = inport_close,
.name = INPORT_NAME,
.phys = "isa023c/input0",
- .id = {
- .bustype = BUS_ISA,
- .vendor = INPORT_VENDOR,
- .product = 0x0001,
- .version = 0x0100,
+ .id = {
+ .bustype = BUS_ISA,
+ .vendor = INPORT_VENDOR,
+ .product = 0x0001,
+ .version = 0x0100,
},
};
diff --git a/drivers/input/mouse/lifebook.c b/drivers/input/mouse/lifebook.c
new file mode 100644
index 00000000000..bd9df9b2832
--- /dev/null
+++ b/drivers/input/mouse/lifebook.c
@@ -0,0 +1,134 @@
+/*
+ * Fujitsu B-series Lifebook PS/2 TouchScreen driver
+ *
+ * Copyright (c) 2005 Vojtech Pavlik <vojtech@suse.cz>
+ * Copyright (c) 2005 Kenan Esau <kenan.esau@conan.de>
+ *
+ * TouchScreen detection, absolute mode setting and packet layout is taken from
+ * Harald Hoyer's description of the device.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/input.h>
+#include <linux/serio.h>
+#include <linux/libps2.h>
+#include <linux/dmi.h>
+
+#include "psmouse.h"
+#include "lifebook.h"
+
+static struct dmi_system_id lifebook_dmi_table[] = {
+ {
+ .ident = "Lifebook B",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK B Series"),
+ },
+ },
+ { }
+};
+
+
+static psmouse_ret_t lifebook_process_byte(struct psmouse *psmouse, struct pt_regs *regs)
+{
+ unsigned char *packet = psmouse->packet;
+ struct input_dev *dev = &psmouse->dev;
+
+ if (psmouse->pktcnt != 3)
+ return PSMOUSE_GOOD_DATA;
+
+ input_regs(dev, regs);
+
+ /* calculate X and Y */
+ if ((packet[0] & 0x08) == 0x00) {
+ input_report_abs(dev, ABS_X,
+ (packet[1] | ((packet[0] & 0x30) << 4)));
+ input_report_abs(dev, ABS_Y,
+ 1024 - (packet[2] | ((packet[0] & 0xC0) << 2)));
+ } else {
+ input_report_rel(dev, REL_X,
+ ((packet[0] & 0x10) ? packet[1] - 256 : packet[1]));
+ input_report_rel(dev, REL_Y,
+ -(int)((packet[0] & 0x20) ? packet[2] - 256 : packet[2]));
+ }
+
+ input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
+ input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
+ input_report_key(dev, BTN_TOUCH, packet[0] & 0x04);
+
+ input_sync(dev);
+
+ return PSMOUSE_FULL_PACKET;
+}
+
+static int lifebook_absolute_mode(struct psmouse *psmouse)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ unsigned char param;
+
+ if (psmouse_reset(psmouse))
+ return -1;
+
+ /*
+ Enable absolute output -- ps2_command fails always but if
+ you leave this call out the touchsreen will never send
+ absolute coordinates
+ */
+ param = 0x07;
+ ps2_command(ps2dev, &param, PSMOUSE_CMD_SETRES);
+
+ return 0;
+}
+
+static void lifebook_set_resolution(struct psmouse *psmouse, unsigned int resolution)
+{
+ unsigned char params[] = { 0, 1, 2, 2, 3 };
+
+ if (resolution == 0 || resolution > 400)
+ resolution = 400;
+
+ ps2_command(&psmouse->ps2dev, &params[resolution / 100], PSMOUSE_CMD_SETRES);
+ psmouse->resolution = 50 << params[resolution / 100];
+}
+
+static void lifebook_disconnect(struct psmouse *psmouse)
+{
+ psmouse_reset(psmouse);
+}
+
+int lifebook_detect(struct psmouse *psmouse, int set_properties)
+{
+ if (!dmi_check_system(lifebook_dmi_table))
+ return -1;
+
+ if (set_properties) {
+ psmouse->vendor = "Fujitsu";
+ psmouse->name = "Lifebook TouchScreen";
+ }
+
+ return 0;
+}
+
+int lifebook_init(struct psmouse *psmouse)
+{
+ if (lifebook_absolute_mode(psmouse))
+ return -1;
+
+ psmouse->dev.evbit[0] = BIT(EV_ABS) | BIT(EV_KEY) | BIT(EV_REL);
+ psmouse->dev.keybit[LONG(BTN_LEFT)] = BIT(BTN_LEFT) | BIT(BTN_MIDDLE) | BIT(BTN_RIGHT);
+ psmouse->dev.keybit[LONG(BTN_TOUCH)] = BIT(BTN_TOUCH);
+ psmouse->dev.relbit[0] = BIT(REL_X) | BIT(REL_Y);
+ input_set_abs_params(&psmouse->dev, ABS_X, 0, 1024, 0, 0);
+ input_set_abs_params(&psmouse->dev, ABS_Y, 0, 1024, 0, 0);
+
+ psmouse->protocol_handler = lifebook_process_byte;
+ psmouse->set_resolution = lifebook_set_resolution;
+ psmouse->disconnect = lifebook_disconnect;
+ psmouse->reconnect = lifebook_absolute_mode;
+ psmouse->pktsize = 3;
+
+ return 0;
+}
+
diff --git a/drivers/input/mouse/lifebook.h b/drivers/input/mouse/lifebook.h
new file mode 100644
index 00000000000..be1c0943825
--- /dev/null
+++ b/drivers/input/mouse/lifebook.h
@@ -0,0 +1,17 @@
+/*
+ * Fujitsu B-series Lifebook PS/2 TouchScreen driver
+ *
+ * Copyright (c) 2005 Vojtech Pavlik
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _LIFEBOOK_H
+#define _LIFEBOOK_H
+
+int lifebook_detect(struct psmouse *psmouse, int set_properties);
+int lifebook_init(struct psmouse *psmouse);
+
+#endif
diff --git a/drivers/input/mouse/logibm.c b/drivers/input/mouse/logibm.c
index 77eb83e87f6..8b524316722 100644
--- a/drivers/input/mouse/logibm.c
+++ b/drivers/input/mouse/logibm.c
@@ -18,18 +18,18 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
+ * the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
+ *
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
@@ -77,16 +77,11 @@ MODULE_PARM_DESC(irq, "IRQ number (5=default)");
__obsolete_setup("logibm_irq=");
-static int logibm_used = 0;
-
static irqreturn_t logibm_interrupt(int irq, void *dev_id, struct pt_regs *regs);
static int logibm_open(struct input_dev *dev)
{
- if (logibm_used++)
- return 0;
if (request_irq(logibm_irq, logibm_interrupt, 0, "logibm", NULL)) {
- logibm_used--;
printk(KERN_ERR "logibm.c: Can't allocate irq %d\n", logibm_irq);
return -EBUSY;
}
@@ -96,8 +91,6 @@ static int logibm_open(struct input_dev *dev)
static void logibm_close(struct input_dev *dev)
{
- if (--logibm_used)
- return;
outb(LOGIBM_DISABLE_IRQ, LOGIBM_CONTROL_PORT);
free_irq(logibm_irq, NULL);
}
@@ -167,7 +160,7 @@ static int __init logibm_init(void)
outb(LOGIBM_DISABLE_IRQ, LOGIBM_CONTROL_PORT);
input_register_device(&logibm_dev);
-
+
printk(KERN_INFO "input: Logitech bus mouse at %#x irq %d\n", LOGIBM_BASE, logibm_irq);
return 0;
diff --git a/drivers/input/mouse/maplemouse.c b/drivers/input/mouse/maplemouse.c
index 12dc0ef5020..e90c60cbbf0 100644
--- a/drivers/input/mouse/maplemouse.c
+++ b/drivers/input/mouse/maplemouse.c
@@ -1,6 +1,6 @@
/*
* $Id: maplemouse.c,v 1.2 2004/03/22 01:18:15 lethal Exp $
- * SEGA Dreamcast mouse driver
+ * SEGA Dreamcast mouse driver
* Based on drivers/usb/usbmouse.c
*/
@@ -15,80 +15,51 @@
MODULE_AUTHOR("YAEGASHI Takeshi <t@keshi.org>");
MODULE_DESCRIPTION("SEGA Dreamcast mouse driver");
-struct dc_mouse {
- struct input_dev dev;
- int open;
-};
-
-
static void dc_mouse_callback(struct mapleq *mq)
{
int buttons, relx, rely, relz;